Compare commits

..

6 Commits

Author SHA1 Message Date
Juan Font Alonso
07f6bad067 Docs update 2022-09-02 19:20:41 +02:00
Juan Font Alonso
9c8131b967 Updated changelog 2022-09-02 19:19:10 +02:00
Juan Font Alonso
3454e03331 Release related stuff to 1.19 2022-09-02 19:16:43 +02:00
Juan Font Alonso
e84bda4fe6 Move Dockerfiles to 1.19 2022-09-02 19:13:15 +02:00
Juan Font Alonso
f4152a166e Target go 1.19 in go.mod 2022-09-02 19:12:41 +02:00
Juan Font Alonso
f3b99e31e4 Minor change in go.mod 2022-09-02 09:23:02 +02:00
120 changed files with 2893 additions and 7052 deletions

2
.github/FUNDING.yml vendored Normal file
View File

@@ -0,0 +1,2 @@
ko_fi: kradalby
github: [kradalby]

View File

@@ -13,13 +13,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
uses: tj-actions/changed-files@v14.1
with:
files: |
*.nix

View File

@@ -9,7 +9,7 @@ jobs:
add-contributors:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
- name: Delete upstream contributor branch
# Allow continue on failure to account for when the
# upstream branch is deleted or does not exist.

View File

@@ -1,5 +1,5 @@
---
name: Lint
name: CI
on: [push, pull_request]
@@ -7,13 +7,13 @@ jobs:
golangci-lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
uses: tj-actions/changed-files@v14.1
with:
files: |
*.nix
@@ -26,7 +26,7 @@ jobs:
if: steps.changed-files.outputs.any_changed == 'true'
uses: golangci/golangci-lint-action@v2
with:
version: v1.49.0
version: v1.46.1
# Only block PRs on new problems.
# If this is not enabled, we will end up having PRs

View File

@@ -1,5 +1,5 @@
---
name: Release
name: release
on:
push:
@@ -12,11 +12,11 @@ jobs:
runs-on: ubuntu-18.04 # due to CGO we need to user an older version
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v3
uses: actions/setup-go@v2
with:
go-version: 1.19.0
@@ -37,7 +37,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set up Docker Buildx
@@ -65,8 +65,8 @@ jobs:
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=raw,value=latest
type=sha
type=raw,value=develop
- name: Login to DockerHub
uses: docker/login-action@v1
with:
@@ -100,7 +100,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set up Docker Buildx
@@ -125,13 +125,13 @@ jobs:
${{ secrets.DOCKERHUB_USERNAME }}/headscale
ghcr.io/${{ github.repository_owner }}/headscale
flavor: |
suffix=-debug,onlatest=true
latest=false
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=sha
type=raw,value=develop
type=semver,pattern={{version}}-debug
type=semver,pattern={{major}}.{{minor}}-debug
type=semver,pattern={{major}}-debug
type=raw,value=latest-debug
type=sha,suffix=-debug
- name: Login to DockerHub
uses: docker/login-action@v1
with:
@@ -161,3 +161,69 @@ jobs:
run: |
rm -rf /tmp/.buildx-cache-debug
mv /tmp/.buildx-cache-debug-new /tmp/.buildx-cache-debug
docker-alpine-release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Set up QEMU for multiple platforms
uses: docker/setup-qemu-action@master
with:
platforms: arm64,amd64
- name: Cache Docker layers
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache-alpine
key: ${{ runner.os }}-buildx-alpine-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-alpine-
- name: Docker meta
id: meta-alpine
uses: docker/metadata-action@v3
with:
# list of Docker images to use as base name for tags
images: |
${{ secrets.DOCKERHUB_USERNAME }}/headscale
ghcr.io/${{ github.repository_owner }}/headscale
flavor: |
latest=false
tags: |
type=semver,pattern={{version}}-alpine
type=semver,pattern={{major}}.{{minor}}-alpine
type=semver,pattern={{major}}-alpine
type=raw,value=latest-alpine
type=sha,suffix=-alpine
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GHCR
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push
id: docker_build
uses: docker/build-push-action@v2
with:
push: true
context: .
file: Dockerfile.alpine
tags: ${{ steps.meta-alpine.outputs.tags }}
labels: ${{ steps.meta-alpine.outputs.labels }}
platforms: linux/amd64,linux/arm64
cache-from: type=local,src=/tmp/.buildx-cache-alpine
cache-to: type=local,dest=/tmp/.buildx-cache-alpine-new
build-args: |
VERSION=${{ steps.meta-alpine.outputs.version }}
- name: Prepare cache for next build
run: |
rm -rf /tmp/.buildx-cache-alpine
mv /tmp/.buildx-cache-alpine-new /tmp/.buildx-cache-alpine

27
.github/workflows/renovatebot.yml vendored Normal file
View File

@@ -0,0 +1,27 @@
---
name: Renovate
on:
schedule:
- cron: "* * 5,20 * *" # Every 5th and 20th of the month
workflow_dispatch:
jobs:
renovate:
runs-on: ubuntu-latest
steps:
- name: Get token
id: get_token
uses: machine-learning-apps/actions-app-token@master
with:
APP_PEM: ${{ secrets.RENOVATEBOT_SECRET }}
APP_ID: ${{ secrets.RENOVATEBOT_APP_ID }}
- name: Checkout
uses: actions/checkout@v2.0.0
- name: Self-hosted Renovate
uses: renovatebot/github-action@v31.81.3
with:
configurationFile: .github/renovate.json
token: "x-access-token:${{ steps.get_token.outputs.app_token }}"
# env:
# LOG_LEVEL: "debug"

View File

@@ -1,35 +0,0 @@
name: Integration Test CLI
on: [pull_request]
jobs:
integration-test-cli:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Set Swap Space
uses: pierotofy/set-swap-space@master
with:
swap-size-gb: 10
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run CLI integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: nix develop --command -- make test_integration_cli

View File

@@ -1,35 +0,0 @@
name: Integration Test DERP
on: [pull_request]
jobs:
integration-test-derp:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Set Swap Space
uses: pierotofy/set-swap-space@master
with:
swap-size-gb: 10
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run Embedded DERP server integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: nix develop --command -- make test_integration_derp

View File

@@ -1,27 +0,0 @@
name: Integration Test v2 - kradalby
on: [pull_request]
jobs:
integration-test-v2-kradalby:
runs-on: [self-hosted, linux, x64, nixos, docker]
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: nix develop --command -- make test_integration_v2_general

58
.github/workflows/test-integration.yml vendored Normal file
View File

@@ -0,0 +1,58 @@
name: CI
on: [pull_request]
jobs:
integration-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 2
- name: Set Swap Space
uses: pierotofy/set-swap-space@master
with:
swap-size-gb: 10
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v14.1
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run CLI integration tests
if: steps.changed-files.outputs.any_changed == 'true'
uses: nick-fields/retry@v2
with:
timeout_minutes: 240
max_attempts: 5
retry_on: error
command: nix develop --command -- make test_integration_cli
- name: Run Embedded DERP server integration tests
if: steps.changed-files.outputs.any_changed == 'true'
uses: nick-fields/retry@v2
with:
timeout_minutes: 240
max_attempts: 5
retry_on: error
command: nix develop --command -- make test_integration_derp
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
uses: nick-fields/retry@v2
with:
timeout_minutes: 240
max_attempts: 5
retry_on: error
command: nix develop --command -- make test_integration_general

View File

@@ -1,4 +1,4 @@
name: Tests
name: CI
on: [push, pull_request]
@@ -7,13 +7,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v2
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
uses: tj-actions/changed-files@v14.1
with:
files: |
*.nix

View File

@@ -1,8 +1,6 @@
---
run:
timeout: 10m
build-tags:
- ts2019
issues:
skip-dirs:
@@ -28,7 +26,6 @@ linters:
- ireturn
- execinquery
- exhaustruct
- nolintlint
# We should strive to enable these:
- wrapcheck
@@ -36,9 +33,6 @@ linters:
- makezero
- maintidx
# Limits the methods of an interface to 10. We have more in integration tests
- interfacebloat
# We might want to enable this, but it might be a lot of work
- cyclop
- nestif

View File

@@ -20,8 +20,6 @@ builds:
- -mod=readonly
ldflags:
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
tags:
- ts2019
- id: darwin-arm64
main: ./cmd/headscale/headscale.go
@@ -36,8 +34,6 @@ builds:
- -mod=readonly
ldflags:
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
tags:
- ts2019
- id: linux-amd64
mod_timestamp: "{{ .CommitTimestamp }}"
@@ -50,8 +46,6 @@ builds:
main: ./cmd/headscale/headscale.go
ldflags:
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
tags:
- ts2019
- id: linux-arm64
mod_timestamp: "{{ .CommitTimestamp }}"
@@ -64,8 +58,6 @@ builds:
main: ./cmd/headscale/headscale.go
ldflags:
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
tags:
- ts2019
archives:
- id: golang-cross

View File

@@ -2,44 +2,9 @@
## 0.17.0 (2022-XX-XX)
### BREAKING
- Log level option `log_level` was moved to a distinct `log` config section and renamed to `level` [#768](https://github.com/juanfont/headscale/pull/768)
- Removed Alpine Linux container image [#962](https://github.com/juanfont/headscale/pull/962)
### Important Changes
- Added support for Tailscale TS2021 protocol [#738](https://github.com/juanfont/headscale/pull/738)
- Add experimental support for [SSH ACL](https://tailscale.com/kb/1018/acls/#tailscale-ssh) (see docs for limitations) [#847](https://github.com/juanfont/headscale/pull/847)
- Please note that this support should be considered _partially_ implemented
- SSH ACLs status:
- Support `accept` and `check` (SSH can be enabled and used for connecting and authentication)
- Rejecting connections **are not supported**, meaning that if you enable SSH, then assume that _all_ `ssh` connections **will be allowed**.
- If you decied to try this feature, please carefully managed permissions by blocking port `22` with regular ACLs or do _not_ set `--ssh` on your clients.
- We are currently improving our testing of the SSH ACLs, help us get an overview by testing and giving feedback.
- This feature should be considered dangerous and it is disabled by default. Enable by setting `HEADSCALE_EXPERIMENTAL_FEATURE_SSH=1`.
### Changes
- Add ability to specify config location via env var `HEADSCALE_CONFIG` [#674](https://github.com/juanfont/headscale/issues/674)
- Target Go 1.19 for Headscale [#778](https://github.com/juanfont/headscale/pull/778)
- Target Tailscale v1.30.0 to build Headscale [#780](https://github.com/juanfont/headscale/pull/780)
- Give a warning when running Headscale with reverse proxy improperly configured for WebSockets [#788](https://github.com/juanfont/headscale/pull/788)
- Fix subnet routers with Primary Routes [#811](https://github.com/juanfont/headscale/pull/811)
- Added support for JSON logs [#653](https://github.com/juanfont/headscale/issues/653)
- Sanitise the node key passed to registration url [#823](https://github.com/juanfont/headscale/pull/823)
- Add support for generating pre-auth keys with tags [#767](https://github.com/juanfont/headscale/pull/767)
- Add support for evaluating `autoApprovers` ACL entries when a machine is registered [#763](https://github.com/juanfont/headscale/pull/763)
- Add config flag to allow Headscale to start if OIDC provider is down [#829](https://github.com/juanfont/headscale/pull/829)
- Fix prefix length comparison bug in AutoApprovers route evaluation [#862](https://github.com/juanfont/headscale/pull/862)
- Random node DNS suffix only applied if names collide in namespace. [#766](https://github.com/juanfont/headscale/issues/766)
- Remove `ip_prefix` configuration option and warning [#899](https://github.com/juanfont/headscale/pull/899)
- Add `dns_config.override_local_dns` option [#905](https://github.com/juanfont/headscale/pull/905)
- Fix some DNS config issues [#660](https://github.com/juanfont/headscale/issues/660)
- Make it possible to disable TS2019 with build flag [#928](https://github.com/juanfont/headscale/pull/928)
- Fix OIDC registration issues [#960](https://github.com/juanfont/headscale/pull/960) and [#971](https://github.com/juanfont/headscale/pull/971)
- Add support for specifying NextDNS DNS-over-HTTPS resolver [#940](https://github.com/juanfont/headscale/pull/940)
- Make more sslmode available for postgresql connection [#927](https://github.com/juanfont/headscale/pull/927)
- Use Go 1.19 for Headscale [#776](https://github.com/juanfont/headscale/pull/776)
## 0.16.4 (2022-08-21)

View File

@@ -9,7 +9,7 @@ RUN go mod download
COPY . .
RUN CGO_ENABLED=0 GOOS=linux go install -tags ts2019 -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
RUN strip /go/bin/headscale
RUN test -e /go/bin/headscale

24
Dockerfile.alpine Normal file
View File

@@ -0,0 +1,24 @@
# Builder image
FROM docker.io/golang:1.19.0-alpine AS build
ARG VERSION=dev
ENV GOPATH /go
WORKDIR /go/src/headscale
COPY go.mod go.sum /go/src/headscale/
RUN apk add gcc musl-dev
RUN go mod download
COPY . .
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
RUN strip /go/bin/headscale
RUN test -e /go/bin/headscale
# Production image
FROM docker.io/alpine:latest
COPY --from=build /go/bin/headscale /bin/headscale
ENV TZ UTC
EXPOSE 8080/tcp
CMD ["headscale"]

View File

@@ -9,11 +9,11 @@ RUN go mod download
COPY . .
RUN CGO_ENABLED=0 GOOS=linux go install -tags ts2019 -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
RUN test -e /go/bin/headscale
# Debug image
FROM docker.io/golang:1.19.0-bullseye
FROM gcr.io/distroless/base-debian11:debug
COPY --from=build /go/bin/headscale /bin/headscale
ENV TZ UTC

View File

@@ -4,16 +4,14 @@ ARG TAILSCALE_VERSION=*
ARG TAILSCALE_CHANNEL=stable
RUN apt-get update \
&& apt-get install -y gnupg curl ssh \
&& apt-get install -y gnupg curl \
&& curl -fsSL https://pkgs.tailscale.com/${TAILSCALE_CHANNEL}/ubuntu/focal.gpg | apt-key add - \
&& curl -fsSL https://pkgs.tailscale.com/${TAILSCALE_CHANNEL}/ubuntu/focal.list | tee /etc/apt/sources.list.d/tailscale.list \
&& apt-get update \
&& apt-get install -y ca-certificates tailscale=${TAILSCALE_VERSION} dnsutils \
&& rm -rf /var/lib/apt/lists/*
RUN adduser --shell=/bin/bash ssh-it-user
ADD integration_test/etc_embedded_derp/tls/server.crt /usr/local/share/ca-certificates/
RUN chmod 644 /usr/local/share/ca-certificates/server.crt
RUN chmod 644 /usr/local/share/ca-certificates/server.crt
RUN update-ca-certificates

View File

@@ -1,10 +1,9 @@
FROM golang:latest
RUN apt-get update \
&& apt-get install -y ca-certificates dnsutils git iptables ssh \
&& apt-get install -y ca-certificates dnsutils git iptables \
&& rm -rf /var/lib/apt/lists/*
RUN useradd --shell=/bin/bash --create-home ssh-it-user
RUN git clone https://github.com/tailscale/tailscale.git
@@ -19,6 +18,6 @@ RUN cp tailscale /usr/local/bin/
RUN cp tailscaled /usr/local/bin/
ADD integration_test/etc_embedded_derp/tls/server.crt /usr/local/share/ca-certificates/
RUN chmod 644 /usr/local/share/ca-certificates/server.crt
RUN chmod 644 /usr/local/share/ca-certificates/server.crt
RUN update-ca-certificates

View File

@@ -10,8 +10,6 @@ ifeq ($(filter $(GOOS), openbsd netbsd soloaris plan9), )
else
endif
TAGS = -tags ts2019
# GO_SOURCES = $(wildcard *.go)
# PROTO_SOURCES = $(wildcard **/*.proto)
GO_SOURCES = $(call rwildcard,,*.go)
@@ -19,44 +17,23 @@ PROTO_SOURCES = $(call rwildcard,,*.proto)
build:
nix build
GOOS=$(GOOS) CGO_ENABLED=0 go build -trimpath $(pieflags) -mod=readonly -ldflags "-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$(version)" cmd/headscale/headscale.go
dev: lint test build
test:
@go test $(TAGS) -short -coverprofile=coverage.out ./...
@go test -coverprofile=coverage.out ./...
test_integration: test_integration_cli test_integration_derp test_integration_oidc test_integration_v2_general
test_integration: test_integration_cli test_integration_derp test_integration_general
test_integration_cli:
docker network rm $$(docker network ls --filter name=headscale --quiet) || true
docker network create headscale-test || true
docker run -t --rm \
--network headscale-test \
-v ~/.cache/hs-integration-go:/go \
-v $$PWD:$$PWD -w $$PWD \
-v /var/run/docker.sock:/var/run/docker.sock golang:1 \
go test $(TAGS) -failfast -timeout 30m -count=1 -run IntegrationCLI ./...
go test -failfast -tags integration_cli,integration -timeout 30m -count=1 ./...
test_integration_derp:
docker network rm $$(docker network ls --filter name=headscale --quiet) || true
docker network create headscale-test || true
docker run -t --rm \
--network headscale-test \
-v ~/.cache/hs-integration-go:/go \
-v $$PWD:$$PWD -w $$PWD \
-v /var/run/docker.sock:/var/run/docker.sock golang:1 \
go test $(TAGS) -failfast -timeout 30m -count=1 -run IntegrationDERP ./...
go test -failfast -tags integration_derp,integration -timeout 30m -count=1 ./...
test_integration_v2_general:
docker run \
-t --rm \
-v ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
-v $$PWD:$$PWD -w $$PWD/integration \
-v /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go test $(TAGS) -failfast ./... -timeout 120m -parallel 8
test_integration_general:
go test -failfast -tags integration_general,integration -timeout 30m -count=1 ./...
coverprofile_func:
go tool cover -func=coverage.out

198
README.md
View File

@@ -1,4 +1,4 @@
![headscale logo](./docs/logo/headscale3_header_stacked_left.png)
# headscale
![ci](https://github.com/juanfont/headscale/actions/workflows/test.yml/badge.svg)
@@ -195,15 +195,6 @@ make build
<sub style="font-size:14px"><b>Jiang Zhu</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/tsujamin>
<img src=https://avatars.githubusercontent.com/u/2435619?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Benjamin Roberts/>
<br />
<sub style="font-size:14px"><b>Benjamin Roberts</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/reynico>
<img src=https://avatars.githubusercontent.com/u/715768?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Nico/>
@@ -211,6 +202,8 @@ make build
<sub style="font-size:14px"><b>Nico</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/e-zk>
<img src=https://avatars.githubusercontent.com/u/58356365?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=e-zk/>
@@ -246,8 +239,6 @@ make build
<sub style="font-size:14px"><b>ohdearaugustin</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/mpldr>
<img src=https://avatars.githubusercontent.com/u/33086936?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Moritz Poldrack/>
@@ -255,6 +246,8 @@ make build
<sub style="font-size:14px"><b>Moritz Poldrack</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/GrigoriyMikhalkin>
<img src=https://avatars.githubusercontent.com/u/3637857?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=GrigoriyMikhalkin/>
@@ -262,20 +255,6 @@ make build
<sub style="font-size:14px"><b>GrigoriyMikhalkin</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/mike-lloyd03>
<img src=https://avatars.githubusercontent.com/u/49411532?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mike Lloyd/>
<br />
<sub style="font-size:14px"><b>Mike Lloyd</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/iSchluff>
<img src=https://avatars.githubusercontent.com/u/1429641?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Anton Schubert/>
<br />
<sub style="font-size:14px"><b>Anton Schubert</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/Niek>
<img src=https://avatars.githubusercontent.com/u/213140?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Niek van der Maas/>
@@ -290,8 +269,6 @@ make build
<sub style="font-size:14px"><b>Eugen Biegler</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/617a7a>
<img src=https://avatars.githubusercontent.com/u/67651251?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Azz/>
@@ -299,6 +276,13 @@ make build
<sub style="font-size:14px"><b>Azz</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/iSchluff>
<img src=https://avatars.githubusercontent.com/u/1429641?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Anton Schubert/>
<br />
<sub style="font-size:14px"><b>Anton Schubert</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/qbit>
<img src=https://avatars.githubusercontent.com/u/68368?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Aaron Bieber/>
@@ -306,13 +290,8 @@ make build
<sub style="font-size:14px"><b>Aaron Bieber</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/kazauwa>
<img src=https://avatars.githubusercontent.com/u/12330159?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Igor Perepilitsyn/>
<br />
<sub style="font-size:14px"><b>Igor Perepilitsyn</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/Aluxima>
<img src=https://avatars.githubusercontent.com/u/16262531?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Laurent Marchaud/>
@@ -327,20 +306,11 @@ make build
<sub style="font-size:14px"><b>Fernando De Lucchi</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/OrvilleQ>
<img src=https://avatars.githubusercontent.com/u/21377465?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Orville Q. Song/>
<br />
<sub style="font-size:14px"><b>Orville Q. Song</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/hdhoang>
<img src=https://avatars.githubusercontent.com/u/12537?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=hdhoang/>
<img src=https://avatars.githubusercontent.com/u/12537?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Hoàng Đức Hiếu/>
<br />
<sub style="font-size:14px"><b>hdhoang</b></sub>
<sub style="font-size:14px"><b>Hoàng Đức Hiếu</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
@@ -357,13 +327,6 @@ make build
<sub style="font-size:14px"><b>Deon Thomas</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/madjam002>
<img src=https://avatars.githubusercontent.com/u/679137?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jamie Greeff/>
<br />
<sub style="font-size:14px"><b>Jamie Greeff</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/ChibangLW>
<img src=https://avatars.githubusercontent.com/u/22293464?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ChibangLW/>
@@ -371,6 +334,8 @@ make build
<sub style="font-size:14px"><b>ChibangLW</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/mevansam>
<img src=https://avatars.githubusercontent.com/u/403630?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mevan Samaratunga/>
@@ -378,8 +343,6 @@ make build
<sub style="font-size:14px"><b>Mevan Samaratunga</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/dragetd>
<img src=https://avatars.githubusercontent.com/u/3639577?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Michael G./>
@@ -408,13 +371,6 @@ make build
<sub style="font-size:14px"><b>Stefan Majer</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/kevin1sMe>
<img src=https://avatars.githubusercontent.com/u/6886076?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=kevinlin/>
<br />
<sub style="font-size:14px"><b>kevinlin</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/artemklevtsov>
<img src=https://avatars.githubusercontent.com/u/603798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Artem Klevtsov/>
@@ -431,13 +387,6 @@ make build
<sub style="font-size:14px"><b>Casey Marshall</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/CNLHC>
<img src=https://avatars.githubusercontent.com/u/21005146?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=LiuHanCheng/>
<br />
<sub style="font-size:14px"><b>LiuHanCheng</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/pvinis>
<img src=https://avatars.githubusercontent.com/u/100233?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pavlos Vinieratos/>
@@ -453,21 +402,12 @@ make build
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/snh>
<img src=https://avatars.githubusercontent.com/u/2051768?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Steven Honson/>
<br />
<sub style="font-size:14px"><b>Steven Honson</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/ratsclub>
<a href=https://github.com/vtrf>
<img src=https://avatars.githubusercontent.com/u/25647735?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Victor Freire/>
<br />
<sub style="font-size:14px"><b>Victor Freire</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/lachy2849>
<img src=https://avatars.githubusercontent.com/u/98844035?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=lachy2849/>
@@ -482,6 +422,8 @@ make build
<sub style="font-size:14px"><b>thomas</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/aberoham>
<img src=https://avatars.githubusercontent.com/u/586805?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Abraham Ingersoll/>
@@ -489,13 +431,6 @@ make build
<sub style="font-size:14px"><b>Abraham Ingersoll</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/puzpuzpuz>
<img src=https://avatars.githubusercontent.com/u/37772591?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Andrei Pechkurov/>
<br />
<sub style="font-size:14px"><b>Andrei Pechkurov</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/apognu>
<img src=https://avatars.githubusercontent.com/u/3017182?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Antoine POPINEAU/>
@@ -510,15 +445,6 @@ make build
<sub style="font-size:14px"><b>Aofei Sheng</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/arnarg>
<img src=https://avatars.githubusercontent.com/u/1291396?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Arnar/>
<br />
<sub style="font-size:14px"><b>Arnar</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/awoimbee>
<img src=https://avatars.githubusercontent.com/u/22431493?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Arthur Woimbée/>
@@ -540,6 +466,8 @@ make build
<sub style="font-size:14px"><b> Carson Yang</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/kundel>
<img src=https://avatars.githubusercontent.com/u/10158899?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=kundel/>
@@ -554,8 +482,6 @@ make build
<sub style="font-size:14px"><b>Felix Kronlage-Dammers</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/felixonmars>
<img src=https://avatars.githubusercontent.com/u/1006477?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Felix Yan/>
@@ -570,6 +496,13 @@ make build
<sub style="font-size:14px"><b>JJGadgets</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/madjam002>
<img src=https://avatars.githubusercontent.com/u/679137?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jamie Greeff/>
<br />
<sub style="font-size:14px"><b>Jamie Greeff</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/jimt>
<img src=https://avatars.githubusercontent.com/u/180326?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jim Tittsler/>
@@ -577,13 +510,8 @@ make build
<sub style="font-size:14px"><b>Jim Tittsler</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/ShadowJonathan>
<img src=https://avatars.githubusercontent.com/u/22740616?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jonathan de Jong/>
<br />
<sub style="font-size:14px"><b>Jonathan de Jong</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/piec>
<img src=https://avatars.githubusercontent.com/u/781471?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pierre Carru/>
@@ -598,8 +526,6 @@ make build
<sub style="font-size:14px"><b>Rasmus Moorats</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/rcursaru>
<img src=https://avatars.githubusercontent.com/u/16259641?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=rcursaru/>
@@ -609,9 +535,9 @@ make build
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/renovate-bot>
<img src=https://avatars.githubusercontent.com/u/25180681?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mend Renovate/>
<img src=https://avatars.githubusercontent.com/u/25180681?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=WhiteSource Renovate/>
<br />
<sub style="font-size:14px"><b>Mend Renovate</b></sub>
<sub style="font-size:14px"><b>WhiteSource Renovate</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
@@ -628,13 +554,8 @@ make build
<sub style="font-size:14px"><b>Shaanan Cohney</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/stefanvanburen>
<img src=https://avatars.githubusercontent.com/u/622527?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Stefan VanBuren/>
<br />
<sub style="font-size:14px"><b>Stefan VanBuren</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/sophware>
<img src=https://avatars.githubusercontent.com/u/41669?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=sophware/>
@@ -642,8 +563,6 @@ make build
<sub style="font-size:14px"><b>sophware</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/m-tanner-dev0>
<img src=https://avatars.githubusercontent.com/u/97977342?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tanner/>
@@ -672,13 +591,6 @@ make build
<sub style="font-size:14px"><b>Tianon Gravi</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/thetillhoff>
<img src=https://avatars.githubusercontent.com/u/25052289?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Till Hoffmann/>
<br />
<sub style="font-size:14px"><b>Till Hoffmann</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/woudsma>
<img src=https://avatars.githubusercontent.com/u/6162978?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tjerk Woudsma/>
@@ -709,13 +621,6 @@ make build
<sub style="font-size:14px"><b>Zakhar Bessarab</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/zhzy0077>
<img src=https://avatars.githubusercontent.com/u/8717471?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zhiyuan Zheng/>
<br />
<sub style="font-size:14px"><b>Zhiyuan Zheng</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/Bpazy>
<img src=https://avatars.githubusercontent.com/u/9838749?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ziyuan Han/>
@@ -730,8 +635,6 @@ make build
<sub style="font-size:14px"><b>derelm</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/nning>
<img src=https://avatars.githubusercontent.com/u/557430?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=henning mueller/>
@@ -739,6 +642,8 @@ make build
<sub style="font-size:14px"><b>henning mueller</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/ignoramous>
<img src=https://avatars.githubusercontent.com/u/852289?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ignoramous/>
@@ -746,25 +651,11 @@ make build
<sub style="font-size:14px"><b>ignoramous</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/magichuihui>
<img src=https://avatars.githubusercontent.com/u/10866198?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=suhelen/>
<br />
<sub style="font-size:14px"><b>suhelen</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/lion24>
<img src=https://avatars.githubusercontent.com/u/1382102?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=sharkonet/>
<img src=https://avatars.githubusercontent.com/u/1382102?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=lion24/>
<br />
<sub style="font-size:14px"><b>sharkonet</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/manju-rn>
<img src=https://avatars.githubusercontent.com/u/26291847?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=manju-rn/>
<br />
<sub style="font-size:14px"><b>manju-rn</b></sub>
<sub style="font-size:14px"><b>lion24</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
@@ -774,15 +665,6 @@ make build
<sub style="font-size:14px"><b>pernila</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/phpmalik>
<img src=https://avatars.githubusercontent.com/u/26834645?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=phpmalik/>
<br />
<sub style="font-size:14px"><b>phpmalik</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/Wakeful-Cloud>
<img src=https://avatars.githubusercontent.com/u/38930607?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Wakeful-Cloud/>

129
acls.go
View File

@@ -5,17 +5,15 @@ import (
"errors"
"fmt"
"io"
"net/netip"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/rs/zerolog/log"
"github.com/tailscale/hujson"
"gopkg.in/yaml.v3"
"tailscale.com/envknob"
"inet.af/netaddr"
"tailscale.com/tailcfg"
)
@@ -56,8 +54,6 @@ const (
ProtocolFC = 133 // Fibre Channel
)
var featureEnableSSH = envknob.RegisterBool("HEADSCALE_EXPERIMENTAL_FEATURE_SSH")
// LoadACLPolicy loads the ACL policy from the specify path, and generates the ACL rules.
func (h *Headscale) LoadACLPolicy(path string) error {
log.Debug().
@@ -124,20 +120,6 @@ func (h *Headscale) UpdateACLRules() error {
log.Trace().Interface("ACL", rules).Msg("ACL rules generated")
h.aclRules = rules
if featureEnableSSH() {
sshRules, err := h.generateSSHRules()
if err != nil {
return err
}
log.Trace().Interface("SSH", sshRules).Msg("SSH rules generated")
if h.sshPolicy == nil {
h.sshPolicy = &tailcfg.SSHPolicy{}
}
h.sshPolicy.Rules = sshRules
} else if h.aclPolicy != nil && len(h.aclPolicy.SSHs) > 0 {
log.Info().Msg("SSH ACLs has been defined, but HEADSCALE_EXPERIMENTAL_FEATURE_SSH is not enabled, this is a unstable feature, check docs before activating")
}
return nil
}
@@ -205,111 +187,6 @@ func (h *Headscale) generateACLRules() ([]tailcfg.FilterRule, error) {
return rules, nil
}
func (h *Headscale) generateSSHRules() ([]*tailcfg.SSHRule, error) {
rules := []*tailcfg.SSHRule{}
if h.aclPolicy == nil {
return nil, errEmptyPolicy
}
machines, err := h.ListMachines()
if err != nil {
return nil, err
}
acceptAction := tailcfg.SSHAction{
Message: "",
Reject: false,
Accept: true,
SessionDuration: 0,
AllowAgentForwarding: false,
HoldAndDelegate: "",
AllowLocalPortForwarding: true,
}
rejectAction := tailcfg.SSHAction{
Message: "",
Reject: true,
Accept: false,
SessionDuration: 0,
AllowAgentForwarding: false,
HoldAndDelegate: "",
AllowLocalPortForwarding: false,
}
for index, sshACL := range h.aclPolicy.SSHs {
action := rejectAction
switch sshACL.Action {
case "accept":
action = acceptAction
case "check":
checkAction, err := sshCheckAction(sshACL.CheckPeriod)
if err != nil {
log.Error().
Msgf("Error parsing SSH %d, check action with unparsable duration '%s'", index, sshACL.CheckPeriod)
} else {
action = *checkAction
}
default:
log.Error().
Msgf("Error parsing SSH %d, unknown action '%s'", index, sshACL.Action)
return nil, err
}
principals := make([]*tailcfg.SSHPrincipal, 0, len(sshACL.Sources))
for innerIndex, rawSrc := range sshACL.Sources {
expandedSrcs, err := expandAlias(
machines,
*h.aclPolicy,
rawSrc,
h.cfg.OIDC.StripEmaildomain,
)
if err != nil {
log.Error().
Msgf("Error parsing SSH %d, Source %d", index, innerIndex)
return nil, err
}
for _, expandedSrc := range expandedSrcs {
principals = append(principals, &tailcfg.SSHPrincipal{
NodeIP: expandedSrc,
})
}
}
userMap := make(map[string]string, len(sshACL.Users))
for _, user := range sshACL.Users {
userMap[user] = "="
}
rules = append(rules, &tailcfg.SSHRule{
RuleExpires: nil,
Principals: principals,
SSHUsers: userMap,
Action: &action,
})
}
return rules, nil
}
func sshCheckAction(duration string) (*tailcfg.SSHAction, error) {
sessionLength, err := time.ParseDuration(duration)
if err != nil {
return nil, err
}
return &tailcfg.SSHAction{
Message: "",
Reject: false,
Accept: true,
SessionDuration: sessionLength,
AllowAgentForwarding: false,
HoldAndDelegate: "",
AllowLocalPortForwarding: true,
}, nil
}
func (h *Headscale) generateACLPolicySrcIP(
machines []Machine,
aclPolicy ACLPolicy,
@@ -517,13 +394,13 @@ func expandAlias(
}
// if alias is an IP
ip, err := netip.ParseAddr(alias)
ip, err := netaddr.ParseIP(alias)
if err == nil {
return []string{ip.String()}, nil
}
// if alias is an CIDR
cidr, err := netip.ParsePrefix(alias)
cidr, err := netaddr.ParseIPPrefix(alias)
if err == nil {
return []string{cidr.String()}, nil
}

View File

@@ -7,7 +7,6 @@ import (
"testing"
"gopkg.in/check.v1"
"tailscale.com/envknob"
"tailscale.com/tailcfg"
)
@@ -74,81 +73,6 @@ func (s *Suite) TestInvalidAction(c *check.C) {
c.Assert(errors.Is(err, errInvalidAction), check.Equals, true)
}
func (s *Suite) TestSshRules(c *check.C) {
envknob.Setenv("HEADSCALE_EXPERIMENTAL_FEATURE_SSH", "1")
namespace, err := app.CreateNamespace("user1")
c.Assert(err, check.IsNil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
c.Assert(err, check.IsNil)
_, err = app.GetMachine("user1", "testmachine")
c.Assert(err, check.NotNil)
hostInfo := tailcfg.Hostinfo{
OS: "centos",
Hostname: "testmachine",
RequestTags: []string{"tag:test"},
}
machine := Machine{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Hostname: "testmachine",
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.1")},
NamespaceID: namespace.ID,
RegisterMethod: RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID),
HostInfo: HostInfo(hostInfo),
}
app.db.Save(&machine)
app.aclPolicy = &ACLPolicy{
Groups: Groups{
"group:test": []string{"user1"},
},
Hosts: Hosts{
"client": netip.PrefixFrom(netip.MustParseAddr("100.64.99.42"), 32),
},
ACLs: []ACL{
{
Action: "accept",
Sources: []string{"*"},
Destinations: []string{"*:*"},
},
},
SSHs: []SSH{
{
Action: "accept",
Sources: []string{"group:test"},
Destinations: []string{"client"},
Users: []string{"autogroup:nonroot"},
},
{
Action: "accept",
Sources: []string{"*"},
Destinations: []string{"client"},
Users: []string{"autogroup:nonroot"},
},
},
}
err = app.UpdateACLRules()
c.Assert(err, check.IsNil)
c.Assert(app.sshPolicy, check.NotNil)
c.Assert(app.sshPolicy.Rules, check.HasLen, 2)
c.Assert(app.sshPolicy.Rules[0].SSHUsers, check.HasLen, 1)
c.Assert(app.sshPolicy.Rules[0].Principals, check.HasLen, 1)
c.Assert(app.sshPolicy.Rules[0].Principals[0].NodeIP, check.Matches, "100.64.0.1")
c.Assert(app.sshPolicy.Rules[1].SSHUsers, check.HasLen, 1)
c.Assert(app.sshPolicy.Rules[1].Principals, check.HasLen, 1)
c.Assert(app.sshPolicy.Rules[1].Principals[0].NodeIP, check.Matches, "*")
}
func (s *Suite) TestInvalidGroupInGroup(c *check.C) {
// this ACL is wrong because the group in Sources sections doesn't exist
app.aclPolicy = &ACLPolicy{
@@ -190,7 +114,7 @@ func (s *Suite) TestValidExpandTagOwnersInSources(c *check.C) {
namespace, err := app.CreateNamespace("user1")
c.Assert(err, check.IsNil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = app.GetMachine("user1", "testmachine")
@@ -240,7 +164,7 @@ func (s *Suite) TestValidExpandTagOwnersInDestinations(c *check.C) {
namespace, err := app.CreateNamespace("user1")
c.Assert(err, check.IsNil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = app.GetMachine("user1", "testmachine")
@@ -290,7 +214,7 @@ func (s *Suite) TestInvalidTagValidNamespace(c *check.C) {
namespace, err := app.CreateNamespace("user1")
c.Assert(err, check.IsNil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = app.GetMachine("user1", "testmachine")
@@ -339,7 +263,7 @@ func (s *Suite) TestValidTagInvalidNamespace(c *check.C) {
namespace, err := app.CreateNamespace("user1")
c.Assert(err, check.IsNil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = app.GetMachine("user1", "webserver")
@@ -471,7 +395,7 @@ func (s *Suite) TestPortNamespace(c *check.C) {
namespace, err := app.CreateNamespace("testnamespace")
c.Assert(err, check.IsNil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = app.GetMachine("testnamespace", "testmachine")
@@ -513,7 +437,7 @@ func (s *Suite) TestPortGroup(c *check.C) {
namespace, err := app.CreateNamespace("testnamespace")
c.Assert(err, check.IsNil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = app.GetMachine("testnamespace", "testmachine")
@@ -901,6 +825,7 @@ func Test_listMachinesInNamespace(t *testing.T) {
}
}
// nolint
func Test_expandAlias(t *testing.T) {
type args struct {
machines []Machine

View File

@@ -11,13 +11,11 @@ import (
// ACLPolicy represents a Tailscale ACL Policy.
type ACLPolicy struct {
Groups Groups `json:"groups" yaml:"groups"`
Hosts Hosts `json:"hosts" yaml:"hosts"`
TagOwners TagOwners `json:"tagOwners" yaml:"tagOwners"`
ACLs []ACL `json:"acls" yaml:"acls"`
Tests []ACLTest `json:"tests" yaml:"tests"`
AutoApprovers AutoApprovers `json:"autoApprovers" yaml:"autoApprovers"`
SSHs []SSH `json:"ssh" yaml:"ssh"`
Groups Groups `json:"groups" yaml:"groups"`
Hosts Hosts `json:"hosts" yaml:"hosts"`
TagOwners TagOwners `json:"tagOwners" yaml:"tagOwners"`
ACLs []ACL `json:"acls" yaml:"acls"`
Tests []ACLTest `json:"tests" yaml:"tests"`
}
// ACL is a basic rule for the ACL Policy.
@@ -44,23 +42,7 @@ type ACLTest struct {
Deny []string `json:"deny,omitempty" yaml:"deny,omitempty"`
}
// AutoApprovers specify which users (namespaces?), groups or tags have their advertised routes
// or exit node status automatically enabled.
type AutoApprovers struct {
Routes map[string][]string `json:"routes" yaml:"routes"`
ExitNode []string `json:"exitNode" yaml:"exitNode"`
}
// SSH controls who can ssh into which machines.
type SSH struct {
Action string `json:"action" yaml:"action"`
Sources []string `json:"src" yaml:"src"`
Destinations []string `json:"dst" yaml:"dst"`
Users []string `json:"users" yaml:"users"`
CheckPeriod string `json:"checkPeriod,omitempty" yaml:"checkPeriod,omitempty"`
}
// UnmarshalJSON allows to parse the Hosts directly into netip objects.
// UnmarshalJSON allows to parse the Hosts directly into netaddr objects.
func (hosts *Hosts) UnmarshalJSON(data []byte) error {
newHosts := Hosts{}
hostIPPrefixMap := make(map[string]string)
@@ -89,7 +71,7 @@ func (hosts *Hosts) UnmarshalJSON(data []byte) error {
return nil
}
// UnmarshalYAML allows to parse the Hosts directly into netip objects.
// UnmarshalYAML allows to parse the Hosts directly into netaddr objects.
func (hosts *Hosts) UnmarshalYAML(data []byte) error {
newHosts := Hosts{}
hostIPPrefixMap := make(map[string]string)
@@ -118,28 +100,3 @@ func (policy ACLPolicy) IsZero() bool {
return false
}
// Returns the list of autoApproving namespaces, groups or tags for a given IPPrefix.
func (autoApprovers *AutoApprovers) GetRouteApprovers(
prefix netip.Prefix,
) ([]string, error) {
if prefix.Bits() == 0 {
return autoApprovers.ExitNode, nil // 0.0.0.0/0, ::/0 or equivalent
}
approverAliases := []string{}
for autoApprovedPrefix, autoApproverAliases := range autoApprovers.Routes {
autoApprovedPrefix, err := netip.ParsePrefix(autoApprovedPrefix)
if err != nil {
return nil, err
}
if prefix.Bits() >= autoApprovedPrefix.Bits() &&
autoApprovedPrefix.Contains(prefix.Masked().Addr()) {
approverAliases = append(approverAliases, autoApproverAliases...)
}
}
return approverAliases, nil
}

34
api.go
View File

@@ -9,7 +9,6 @@ import (
"github.com/gorilla/mux"
"github.com/rs/zerolog/log"
"tailscale.com/types/key"
)
const (
@@ -53,7 +52,7 @@ func (h *Headscale) HealthHandler(
}
}
if err := h.pingDB(req.Context()); err != nil {
if err := h.pingDB(); err != nil {
respond(err)
return
@@ -94,34 +93,7 @@ func (h *Headscale) RegisterWebAPI(
) {
vars := mux.Vars(req)
nodeKeyStr, ok := vars["nkey"]
if !NodePublicKeyRegex.Match([]byte(nodeKeyStr)) {
log.Warn().Str("node_key", nodeKeyStr).Msg("Invalid node key passed to registration url")
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusUnauthorized)
_, err := writer.Write([]byte("Unauthorized"))
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Failed to write response")
}
return
}
// We need to make sure we dont open for XSS style injections, if the parameter that
// is passed as a key is not parsable/validated as a NodePublic key, then fail to render
// the template and log an error.
var nodeKey key.NodePublic
err := nodeKey.UnmarshalText(
[]byte(NodePublicKeyEnsurePrefix(nodeKeyStr)),
)
if !ok || nodeKeyStr == "" || err != nil {
log.Warn().Err(err).Msg("Failed to parse incoming nodekey")
if !ok || nodeKeyStr == "" {
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusBadRequest)
_, err := writer.Write([]byte("Wrong params"))
@@ -158,7 +130,7 @@ func (h *Headscale) RegisterWebAPI(
writer.Header().Set("Content-Type", "text/html; charset=utf-8")
writer.WriteHeader(http.StatusOK)
_, err = writer.Write(content.Bytes())
_, err := writer.Write(content.Bytes())
if err != nil {
log.Error().
Caller().

View File

@@ -13,7 +13,7 @@ func (h *Headscale) generateMapResponse(
Str("func", "generateMapResponse").
Str("machine", mapRequest.Hostinfo.Hostname).
Msg("Creating Map response")
node, err := machine.toNode(h.cfg.BaseDomain, h.cfg.DNSConfig)
node, err := machine.toNode(h.cfg.BaseDomain, h.cfg.DNSConfig, true)
if err != nil {
log.Error().
Caller().
@@ -35,9 +35,9 @@ func (h *Headscale) generateMapResponse(
return nil, err
}
profiles := h.getMapResponseUserProfiles(*machine, peers)
profiles := getMapResponseUserProfiles(*machine, peers)
nodePeers, err := peers.toNodes(h.cfg.BaseDomain, h.cfg.DNSConfig)
nodePeers, err := peers.toNodes(h.cfg.BaseDomain, h.cfg.DNSConfig, true)
if err != nil {
log.Error().
Caller().
@@ -62,7 +62,6 @@ func (h *Headscale) generateMapResponse(
DNSConfig: dnsConfig,
Domain: h.cfg.BaseDomain,
PacketFilter: h.aclRules,
SSHPolicy: h.sshPolicy,
DERPMap: h.DERPMap,
UserProfiles: profiles,
Debug: &tailcfg.Debug{

110
app.go
View File

@@ -11,7 +11,6 @@ import (
"os"
"os/signal"
"sort"
"strconv"
"strings"
"sync"
"syscall"
@@ -19,13 +18,13 @@ import (
"github.com/coreos/go-oidc/v3/oidc"
"github.com/gorilla/mux"
grpcMiddleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/patrickmn/go-cache"
zerolog "github.com/philip-bui/grpc-zerolog"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/puzpuzpuz/xsync/v2"
"github.com/puzpuzpuz/xsync"
zl "github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"golang.org/x/crypto/acme"
@@ -52,6 +51,10 @@ const (
errUnsupportedLetsEncryptChallengeType = Error(
"unknown value for Lets Encrypt challenge type",
)
ErrFailedPrivateKey = Error("failed to read or create private key")
ErrFailedNoisePrivateKey = Error("failed to read or create Noise protocol private key")
ErrSamePrivateKeys = Error("private key and noise private key are the same")
)
const (
@@ -88,9 +91,8 @@ type Headscale struct {
aclPolicy *ACLPolicy
aclRules []tailcfg.FilterRule
sshPolicy *tailcfg.SSHPolicy
lastStateChange *xsync.MapOf[string, time.Time]
lastStateChange *xsync.MapOf[time.Time]
oidcProvider *oidc.Provider
oauth2Config *oauth2.Config
@@ -103,20 +105,41 @@ type Headscale struct {
pollNetMapStreamWG sync.WaitGroup
}
// Look up the TLS constant relative to user-supplied TLS client
// authentication mode. If an unknown mode is supplied, the default
// value, tls.RequireAnyClientCert, is returned. The returned boolean
// indicates if the supplied mode was valid.
func LookupTLSClientAuthMode(mode string) (tls.ClientAuthType, bool) {
switch mode {
case DisabledClientAuth:
// Client cert is _not_ required.
return tls.NoClientCert, true
case RelaxedClientAuth:
// Client cert required, but _not verified_.
return tls.RequireAnyClientCert, true
case EnforcedClientAuth:
// Client cert is _required and verified_.
return tls.RequireAndVerifyClientCert, true
default:
// Return the default when an unknown value is supplied.
return tls.RequireAnyClientCert, false
}
}
func NewHeadscale(cfg *Config) (*Headscale, error) {
privateKey, err := readOrCreatePrivateKey(cfg.PrivateKeyPath)
if err != nil {
return nil, fmt.Errorf("failed to read or create private key: %w", err)
return nil, ErrFailedPrivateKey
}
// TS2021 requires to have a different key from the legacy protocol.
noisePrivateKey, err := readOrCreatePrivateKey(cfg.NoisePrivateKeyPath)
if err != nil {
return nil, fmt.Errorf("failed to read or create Noise protocol private key: %w", err)
return nil, ErrFailedNoisePrivateKey
}
if privateKey.Equal(*noisePrivateKey) {
return nil, fmt.Errorf("private key and noise private key are the same: %w", err)
return nil, ErrSamePrivateKeys
}
var dbString string
@@ -129,12 +152,8 @@ func NewHeadscale(cfg *Config) (*Headscale, error) {
cfg.DBuser,
)
if sslEnabled, err := strconv.ParseBool(cfg.DBssl); err == nil {
if !sslEnabled {
dbString += " sslmode=disable"
}
} else {
dbString += fmt.Sprintf(" sslmode=%s", cfg.DBssl)
if !cfg.DBssl {
dbString += " sslmode=disable"
}
if cfg.DBport != 0 {
@@ -174,11 +193,7 @@ func NewHeadscale(cfg *Config) (*Headscale, error) {
if cfg.OIDC.Issuer != "" {
err = app.initOIDC()
if err != nil {
if cfg.OIDC.OnlyStartIfOIDCIsAvailable {
return nil, err
} else {
log.Warn().Err(err).Msg("failed to set up OIDC provider, falling back to CLI based authentication")
}
return nil, err
}
}
@@ -433,19 +448,16 @@ func (h *Headscale) createRouter(grpcMux *runtime.ServeMux) *mux.Router {
router.HandleFunc("/health", h.HealthHandler).Methods(http.MethodGet)
router.HandleFunc("/key", h.KeyHandler).Methods(http.MethodGet)
router.HandleFunc("/register/{nkey}", h.RegisterWebAPI).Methods(http.MethodGet)
h.addLegacyHandlers(router)
router.HandleFunc("/machine/{mkey}/map", h.PollNetMapHandler).Methods(http.MethodPost)
router.HandleFunc("/machine/{mkey}", h.RegistrationHandler).Methods(http.MethodPost)
router.HandleFunc("/oidc/register/{nkey}", h.RegisterOIDC).Methods(http.MethodGet)
router.HandleFunc("/oidc/callback", h.OIDCCallback).Methods(http.MethodGet)
router.HandleFunc("/apple", h.AppleConfigMessage).Methods(http.MethodGet)
router.HandleFunc("/apple/{platform}", h.ApplePlatformConfig).
Methods(http.MethodGet)
router.HandleFunc("/apple/{platform}", h.ApplePlatformConfig).Methods(http.MethodGet)
router.HandleFunc("/windows", h.WindowsConfigMessage).Methods(http.MethodGet)
router.HandleFunc("/windows/tailscale.reg", h.WindowsRegConfig).
Methods(http.MethodGet)
router.HandleFunc("/windows/tailscale.reg", h.WindowsRegConfig).Methods(http.MethodGet)
router.HandleFunc("/swagger", SwaggerUI).Methods(http.MethodGet)
router.HandleFunc("/swagger/v1/openapiv2.json", SwaggerAPIv1).
Methods(http.MethodGet)
router.HandleFunc("/swagger/v1/openapiv2.json", SwaggerAPIv1).Methods(http.MethodGet)
if h.cfg.DERP.ServerEnabled {
router.HandleFunc("/derp", h.DERPHandler)
@@ -465,8 +477,7 @@ func (h *Headscale) createRouter(grpcMux *runtime.ServeMux) *mux.Router {
func (h *Headscale) createNoiseMux() *mux.Router {
router := mux.NewRouter()
router.HandleFunc("/machine/register", h.NoiseRegistrationHandler).
Methods(http.MethodPost)
router.HandleFunc("/machine/register", h.NoiseRegistrationHandler).Methods(http.MethodPost)
router.HandleFunc("/machine/map", h.NoisePollNetMapHandler)
return router
@@ -590,7 +601,7 @@ func (h *Headscale) Serve() error {
grpcOptions := []grpc.ServerOption{
grpc.UnaryInterceptor(
grpcMiddleware.ChainUnaryServer(
grpc_middleware.ChainUnaryServer(
h.grpcAuthenticationInterceptor,
zerolog.NewUnaryServerInterceptor(),
),
@@ -809,18 +820,10 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
// Configuration via autocert with HTTP-01. This requires listening on
// port 80 for the certificate validation in addition to the headscale
// service, which can be configured to run on any other port.
server := &http.Server{
Addr: h.cfg.TLS.LetsEncrypt.Listen,
Handler: certManager.HTTPHandler(http.HandlerFunc(h.redirect)),
ReadTimeout: HTTPReadTimeout,
}
go func() {
err := server.ListenAndServe()
log.Fatal().
Caller().
Err(err).
Err(http.ListenAndServe(h.cfg.TLS.LetsEncrypt.Listen, certManager.HTTPHandler(http.HandlerFunc(h.redirect)))).
Msg("failed to set up a HTTP server")
}()
@@ -840,7 +843,12 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
log.Warn().Msg("Listening with TLS but ServerURL does not start with https://")
}
log.Info().Msg(fmt.Sprintf(
"Client authentication (mTLS) is \"%s\". See the docs to learn about configuring this setting.",
h.cfg.TLS.ClientAuthMode))
tlsConfig := &tls.Config{
ClientAuth: h.cfg.TLS.ClientAuthMode,
NextProtos: []string{"http/1.1"},
Certificates: make([]tls.Certificate, 1),
MinVersion: tls.VersionTLS12,
@@ -852,36 +860,38 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
}
}
func (h *Headscale) setLastStateChangeToNow() {
func (h *Headscale) setLastStateChangeToNow(namespaces ...string) {
var err error
now := time.Now().UTC()
namespaces, err := h.ListNamespaces()
if err != nil {
log.Error().
Caller().
Err(err).
Msg("failed to fetch all namespaces, failing to update last changed state.")
if len(namespaces) == 0 {
namespaces, err = h.ListNamespacesStr()
if err != nil {
log.Error().
Caller().
Err(err).
Msg("failed to fetch all namespaces, failing to update last changed state.")
}
}
for _, namespace := range namespaces {
lastStateUpdate.WithLabelValues(namespace.Name, "headscale").Set(float64(now.Unix()))
lastStateUpdate.WithLabelValues(namespace, "headscale").Set(float64(now.Unix()))
if h.lastStateChange == nil {
h.lastStateChange = xsync.NewMapOf[time.Time]()
}
h.lastStateChange.Store(namespace.Name, now)
h.lastStateChange.Store(namespace, now)
}
}
func (h *Headscale) getLastStateChange(namespaces ...Namespace) time.Time {
func (h *Headscale) getLastStateChange(namespaces ...string) time.Time {
times := []time.Time{}
// getLastStateChange takes a list of namespaces as a "filter", if no namespaces
// are past, then use the entier list of namespaces and look for the last update
if len(namespaces) > 0 {
for _, namespace := range namespaces {
if lastChange, ok := h.lastStateChange.Load(namespace.Name); ok {
if lastChange, ok := h.lastStateChange.Load(namespace); ok {
times = append(times, lastChange)
}
}

View File

@@ -59,3 +59,20 @@ func (s *Suite) ResetDB(c *check.C) {
}
app.db = db
}
// Enusre an error is returned when an invalid auth mode
// is supplied.
func (s *Suite) TestInvalidClientAuthMode(c *check.C) {
_, isValid := LookupTLSClientAuthMode("invalid")
c.Assert(isValid, check.Equals, false)
}
// Ensure that all client auth modes return a nil error.
func (s *Suite) TestAuthModes(c *check.C) {
modes := []string{"disabled", "relaxed", "enforced"}
for _, v := range modes {
_, isValid := LookupTLSClientAuthMode(v)
c.Assert(isValid, check.Equals, true)
}
}

View File

@@ -3,7 +3,6 @@ package cli
import (
"fmt"
"github.com/juanfont/headscale"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
@@ -11,7 +10,8 @@ import (
)
const (
errPreAuthKeyMalformed = Error("key is malformed. expected 64 hex characters with `nodekey` prefix")
keyLength = 64
errPreAuthKeyTooShort = Error("key too short, must be 64 hexadecimal characters")
)
// Error is used to compare errors as per https://dave.cheney.net/2016/04/07/constant-errors
@@ -87,8 +87,8 @@ var createNodeCmd = &cobra.Command{
return
}
if !headscale.NodePublicKeyRegex.Match([]byte(machineKey)) {
err = errPreAuthKeyMalformed
if len(machineKey) != keyLength {
err = errPreAuthKeyTooShort
ErrorOutput(
err,
fmt.Sprintf("Error: %s", err),

View File

@@ -1,104 +0,0 @@
package cli
import (
"fmt"
"net"
"os"
"strconv"
"time"
"github.com/oauth2-proxy/mockoidc"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
)
const (
errMockOidcClientIDNotDefined = Error("MOCKOIDC_CLIENT_ID not defined")
errMockOidcClientSecretNotDefined = Error("MOCKOIDC_CLIENT_SECRET not defined")
errMockOidcPortNotDefined = Error("MOCKOIDC_PORT not defined")
accessTTL = 10 * time.Minute
refreshTTL = 60 * time.Minute
)
func init() {
rootCmd.AddCommand(mockOidcCmd)
}
var mockOidcCmd = &cobra.Command{
Use: "mockoidc",
Short: "Runs a mock OIDC server for testing",
Long: "This internal command runs a OpenID Connect for testing purposes",
Run: func(cmd *cobra.Command, args []string) {
err := mockOIDC()
if err != nil {
log.Error().Err(err).Msgf("Error running mock OIDC server")
os.Exit(1)
}
},
}
func mockOIDC() error {
clientID := os.Getenv("MOCKOIDC_CLIENT_ID")
if clientID == "" {
return errMockOidcClientIDNotDefined
}
clientSecret := os.Getenv("MOCKOIDC_CLIENT_SECRET")
if clientSecret == "" {
return errMockOidcClientSecretNotDefined
}
addrStr := os.Getenv("MOCKOIDC_ADDR")
if addrStr == "" {
return errMockOidcPortNotDefined
}
portStr := os.Getenv("MOCKOIDC_PORT")
if portStr == "" {
return errMockOidcPortNotDefined
}
port, err := strconv.Atoi(portStr)
if err != nil {
return err
}
mock, err := getMockOIDC(clientID, clientSecret)
if err != nil {
return err
}
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", addrStr, port))
if err != nil {
return err
}
err = mock.Start(listener, nil)
if err != nil {
return err
}
log.Info().Msgf("Mock OIDC server listening on %s", listener.Addr().String())
log.Info().Msgf("Issuer: %s", mock.Issuer())
c := make(chan struct{})
<-c
return nil
}
func getMockOIDC(clientID string, clientSecret string) (*mockoidc.MockOIDC, error) {
keypair, err := mockoidc.NewKeypair(nil)
if err != nil {
return nil, err
}
mock := mockoidc.MockOIDC{
ClientID: clientID,
ClientSecret: clientSecret,
AccessTTL: accessTTL,
RefreshTTL: refreshTTL,
CodeChallengeMethodsSupported: []string{"plain", "S256"},
Keypair: keypair,
SessionStore: mockoidc.NewSessionStore(),
UserQueue: &mockoidc.UserQueue{},
ErrorQueue: &mockoidc.ErrorQueue{},
}
return &mock, nil
}

View File

@@ -3,7 +3,6 @@ package cli
import (
"fmt"
"log"
"net/netip"
"strconv"
"strings"
"time"
@@ -14,6 +13,7 @@ import (
"github.com/pterm/pterm"
"github.com/spf13/cobra"
"google.golang.org/grpc/status"
"inet.af/netaddr"
"tailscale.com/types/key"
)
@@ -134,9 +134,7 @@ var registerNodeCmd = &cobra.Command{
return
}
SuccessOutput(
response.Machine,
fmt.Sprintf("Machine %s registered", response.Machine.GivenName), output)
SuccessOutput(response.Machine, "Machine register", output)
},
}
@@ -559,7 +557,7 @@ func nodesToPtables(
var IPV4Address string
var IPV6Address string
for _, addr := range machine.IpAddresses {
if netip.MustParseAddr(addr).Is4() {
if netaddr.MustParseIP(addr).Is4() {
IPV4Address = addr
} else {
IPV6Address = addr

View File

@@ -3,7 +3,6 @@ package cli
import (
"fmt"
"strconv"
"strings"
"time"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
@@ -34,8 +33,6 @@ func init() {
Bool("ephemeral", false, "Preauthkey for ephemeral nodes")
createPreAuthKeyCmd.Flags().
StringP("expiration", "e", DefaultPreAuthKeyExpiry, "Human-readable expiration of the key (e.g. 30m, 24h)")
createPreAuthKeyCmd.Flags().
StringSlice("tags", []string{}, "Tags to automatically assign to node")
}
var preauthkeysCmd = &cobra.Command{
@@ -84,16 +81,7 @@ var listPreAuthKeys = &cobra.Command{
}
tableData := pterm.TableData{
{
"ID",
"Key",
"Reusable",
"Ephemeral",
"Used",
"Expiration",
"Created",
"Tags",
},
{"ID", "Key", "Reusable", "Ephemeral", "Used", "Expiration", "Created"},
}
for _, key := range response.PreAuthKeys {
expiration := "-"
@@ -108,14 +96,6 @@ var listPreAuthKeys = &cobra.Command{
reusable = fmt.Sprintf("%v", key.GetReusable())
}
aclTags := ""
for _, tag := range key.AclTags {
aclTags += "," + tag
}
aclTags = strings.TrimLeft(aclTags, ",")
tableData = append(tableData, []string{
key.GetId(),
key.GetKey(),
@@ -124,7 +104,6 @@ var listPreAuthKeys = &cobra.Command{
strconv.FormatBool(key.GetUsed()),
expiration,
key.GetCreatedAt().AsTime().Format("2006-01-02 15:04:05"),
aclTags,
})
}
@@ -157,7 +136,6 @@ var createPreAuthKeyCmd = &cobra.Command{
reusable, _ := cmd.Flags().GetBool("reusable")
ephemeral, _ := cmd.Flags().GetBool("ephemeral")
tags, _ := cmd.Flags().GetStringSlice("tags")
log.Trace().
Bool("reusable", reusable).
@@ -169,7 +147,6 @@ var createPreAuthKeyCmd = &cobra.Command{
Namespace: namespace,
Reusable: reusable,
Ephemeral: ephemeral,
AclTags: tags,
}
durationStr, _ := cmd.Flags().GetString("expiration")

View File

@@ -15,10 +15,6 @@ import (
var cfgFile string = ""
func init() {
if len(os.Args) > 1 && (os.Args[1] == "version" || os.Args[1] == "mockoidc" || os.Args[1] == "completion") {
return
}
cobra.OnInitialize(initConfig)
rootCmd.PersistentFlags().
StringVarP(&cfgFile, "config", "c", "", "config file (default is /etc/headscale/config.yaml)")
@@ -51,7 +47,7 @@ func initConfig() {
machineOutput := HasMachineOutputFlag()
zerolog.SetGlobalLevel(cfg.Log.Level)
zerolog.SetGlobalLevel(cfg.LogLevel)
// If the user has requested a "machine" readable format,
// then disable login so the output remains valid.
@@ -59,10 +55,6 @@ func initConfig() {
zerolog.SetGlobalLevel(zerolog.Disabled)
}
if cfg.Log.Format == headscale.JSONLogFormat {
log.Logger = log.Output(os.Stdout)
}
if !cfg.DisableUpdateCheck && !machineOutput {
if (runtime.GOOS == "linux" || runtime.GOOS == "darwin") &&
Version != "dev" {

View File

@@ -19,7 +19,6 @@ import (
const (
HeadscaleDateTimeFormat = "2006-01-02 15:04:05"
SocketWritePermissions = 0o666
)
func getHeadscaleApp() (*headscale.Headscale, error) {
@@ -82,19 +81,6 @@ func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc.
address = cfg.UnixSocket
// Try to give the user better feedback if we cannot write to the headscale
// socket.
socket, err := os.OpenFile(cfg.UnixSocket, os.O_WRONLY, SocketWritePermissions) //nolint
if err != nil {
if os.IsPermission(err) {
log.Fatal().
Err(err).
Str("socket", cfg.UnixSocket).
Msgf("Unable to read/write to headscale socket, do you have the correct permissions?")
}
}
socket.Close()
grpcOptions = append(
grpcOptions,
grpc.WithTransportCredentials(insecure.NewCredentials()),

View File

@@ -55,10 +55,10 @@ func (*Suite) TestConfigFileLoading(c *check.C) {
// Test that config file was interpreted correctly
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
c.Assert(viper.GetString("listen_addr"), check.Equals, "127.0.0.1:8080")
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8080")
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite3")
c.Assert(viper.GetString("db_path"), check.Equals, "./db.sqlite")
c.Assert(viper.GetString("db_path"), check.Equals, "/var/lib/headscale/db.sqlite")
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
@@ -98,10 +98,10 @@ func (*Suite) TestConfigLoading(c *check.C) {
// Test that config file was interpreted correctly
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
c.Assert(viper.GetString("listen_addr"), check.Equals, "127.0.0.1:8080")
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8080")
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite3")
c.Assert(viper.GetString("db_path"), check.Equals, "./db.sqlite")
c.Assert(viper.GetString("db_path"), check.Equals, "/var/lib/headscale/db.sqlite")
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")

View File

@@ -14,9 +14,7 @@ server_url: http://127.0.0.1:8080
# Address to listen to / bind to on the server
#
# For production:
# listen_addr: 0.0.0.0:8080
listen_addr: 127.0.0.1:8080
listen_addr: 0.0.0.0:8080
# Address to listen to /metrics, you may want
# to keep this endpoint private to your internal
@@ -29,10 +27,7 @@ metrics_listen_addr: 127.0.0.1:9090
# remotely with the CLI
# Note: Remote access _only_ works if you have
# valid certificates.
#
# For production:
# grpc_listen_addr: 0.0.0.0:50443
grpc_listen_addr: 127.0.0.1:50443
grpc_listen_addr: 0.0.0.0:50443
# Allow the gRPC admin interface to run in INSECURE
# mode. This is not recommended as the traffic will
@@ -40,25 +35,20 @@ grpc_listen_addr: 127.0.0.1:50443
# are doing.
grpc_allow_insecure: false
# Private key used to encrypt the traffic between headscale
# Private key used encrypt the traffic between headscale
# and Tailscale clients.
# The private key file will be autogenerated if it's missing.
#
# For production:
# /var/lib/headscale/private.key
private_key_path: ./private.key
# The private key file which will be
# autogenerated if it's missing
private_key_path: /var/lib/headscale/private.key
# The Noise section includes specific configuration for the
# TS2021 Noise protocol
# TS2021 Noise procotol
noise:
# The Noise private key is used to encrypt the
# traffic between headscale and Tailscale clients when
# using the new Noise-based protocol. It must be different
# from the legacy private key.
#
# For production:
# private_key_path: /var/lib/headscale/noise_private.key
private_key_path: ./noise_private.key
private_key_path: /var/lib/headscale/noise_private.key
# List of IP prefixes to allocate tailaddresses from.
# Each prefix consists of either an IPv4 or IPv6 address,
@@ -88,7 +78,7 @@ derp:
region_code: "headscale"
region_name: "Headscale Embedded DERP"
# Listens over UDP at the configured address for STUN connections - to help with NAT traversal.
# Listens in UDP at the configured address for STUN connections to help on NAT traversal.
# When the embedded DERP server is enabled stun_listen_addr MUST be defined.
#
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
@@ -122,18 +112,15 @@ disable_check_updates: false
# Time before an inactive ephemeral node is deleted?
ephemeral_node_inactivity_timeout: 30m
# Period to check for node updates within the tailnet. A value too low will severely affect
# Period to check for node updates in the tailnet. A value too low will severily affect
# CPU consumption of Headscale. A value too high (over 60s) will cause problems
# for the nodes, as they won't get updates or keep alive messages frequently enough.
# to the nodes, as they won't get updates or keep alive messages in time.
# In case of doubts, do not touch the default 10s.
node_update_check_interval: 10s
# SQLite config
db_type: sqlite3
# For production:
# db_path: /var/lib/headscale/db.sqlite
db_path: ./db.sqlite
db_path: /var/lib/headscale/db.sqlite
# # Postgres config
# If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank.
@@ -143,9 +130,6 @@ db_path: ./db.sqlite
# db_name: headscale
# db_user: foo
# db_pass: bar
# If other 'sslmode' is required instead of 'require(true)' and 'disabled(false)', set the 'sslmode' you need
# in the 'db_ssl' field. Refers to https://www.postgresql.org/docs/current/libpq-ssl.html Table 34.1.
# db_ssl: false
### TLS configuration
@@ -164,18 +148,23 @@ acme_email: ""
# Domain name to request a TLS certificate for:
tls_letsencrypt_hostname: ""
# Client (Tailscale/Browser) authentication mode (mTLS)
# Acceptable values:
# - disabled: client authentication disabled
# - relaxed: client certificate is required but not verified
# - enforced: client certificate is required and verified
tls_client_auth_mode: relaxed
# Path to store certificates and metadata needed by
# letsencrypt
# For production:
# tls_letsencrypt_cache_dir: /var/lib/headscale/cache
tls_letsencrypt_cache_dir: ./cache
tls_letsencrypt_cache_dir: /var/lib/headscale/cache
# Type of ACME challenge to use, currently supported types:
# HTTP-01 or TLS-ALPN-01
# See [docs/tls.md](docs/tls.md) for more information
tls_letsencrypt_challenge_type: HTTP-01
# When HTTP-01 challenge is chosen, letsencrypt must set up a
# verification endpoint, and it will be listening on:
# verification endpoint, and it will be listning on:
# :http = port 80
tls_letsencrypt_listen: ":http"
@@ -183,10 +172,7 @@ tls_letsencrypt_listen: ":http"
tls_cert_path: ""
tls_key_path: ""
log:
# Output formatting for logs: text or json
format: text
level: info
log_level: info
# Path to a file containg ACL policies.
# ACLs can be defined as YAML or HUJSON.
@@ -203,25 +189,10 @@ acl_policy_path: ""
# - https://tailscale.com/blog/2021-09-private-dns-with-magicdns/
#
dns_config:
# Whether to prefer using Headscale provided DNS or use local.
override_local_dns: true
# List of DNS servers to expose to clients.
nameservers:
- 1.1.1.1
# NextDNS (see https://tailscale.com/kb/1218/nextdns/).
# "abc123" is example NextDNS ID, replace with yours.
#
# With metadata sharing:
# nameservers:
# - https://dns.nextdns.io/abc123
#
# Without metadata sharing:
# nameservers:
# - 2a07:a8c0::ab:c123
# - 2a07:a8c1::ab:c123
# Split DNS (see https://tailscale.com/kb/1054/dns/),
# list of search domains and the DNS to query for each one.
#
@@ -246,9 +217,9 @@ dns_config:
base_domain: example.com
# Unix socket used for the CLI to connect without authentication
# Note: for production you will want to set this to something like:
# unix_socket: /var/run/headscale.sock
unix_socket: ./headscale.sock
# Note: for local development, you probably want to change this to:
# unix_socket: ./headscale.sock
unix_socket: /var/run/headscale.sock
unix_socket_permission: "0770"
#
# headscale supports experimental OpenID connect support,
@@ -256,7 +227,6 @@ unix_socket_permission: "0770"
# help us test it.
# OpenID Connect
# oidc:
# only_start_if_oidc_is_available: true
# issuer: "https://your-oidc.issuer.com/path"
# client_id: "your-oidc-client-id"
# client_secret: "your-oidc-client-secret"

189
config.go
View File

@@ -1,14 +1,16 @@
package headscale
import (
"crypto/tls"
"errors"
"fmt"
"io/fs"
"net/netip"
"net/url"
"strings"
"time"
"net/netip"
"github.com/coreos/go-oidc/v3/oidc"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
@@ -21,9 +23,6 @@ import (
const (
tlsALPN01ChallengeType = "TLS-ALPN-01"
http01ChallengeType = "HTTP-01"
JSONLogFormat = "json"
TextLogFormat = "text"
)
// Config contains the initial Headscale configuration.
@@ -39,7 +38,7 @@ type Config struct {
PrivateKeyPath string
NoisePrivateKeyPath string
BaseDomain string
Log LogConfig
LogLevel zerolog.Level
DisableUpdateCheck bool
DERP DERPConfig
@@ -51,7 +50,7 @@ type Config struct {
DBname string
DBuser string
DBpass string
DBssl string
DBssl bool
TLS TLSConfig
@@ -74,8 +73,9 @@ type Config struct {
}
type TLSConfig struct {
CertPath string
KeyPath string
CertPath string
KeyPath string
ClientAuthMode tls.ClientAuthType
LetsEncrypt LetsEncryptConfig
}
@@ -88,15 +88,14 @@ type LetsEncryptConfig struct {
}
type OIDCConfig struct {
OnlyStartIfOIDCIsAvailable bool
Issuer string
ClientID string
ClientSecret string
Scope []string
ExtraParams map[string]string
AllowedDomains []string
AllowedUsers []string
StripEmaildomain bool
Issuer string
ClientID string
ClientSecret string
Scope []string
ExtraParams map[string]string
AllowedDomains []string
AllowedUsers []string
StripEmaildomain bool
}
type DERPConfig struct {
@@ -126,11 +125,6 @@ type ACLConfig struct {
PolicyPath string
}
type LogConfig struct {
Format string
Level zerolog.Level
}
func LoadConfig(path string, isFile bool) error {
if isFile {
viper.SetConfigFile(path)
@@ -152,12 +146,11 @@ func LoadConfig(path string, isFile bool) error {
viper.SetDefault("tls_letsencrypt_cache_dir", "/var/www/.cache")
viper.SetDefault("tls_letsencrypt_challenge_type", http01ChallengeType)
viper.SetDefault("tls_client_auth_mode", "relaxed")
viper.SetDefault("log.level", "info")
viper.SetDefault("log.format", TextLogFormat)
viper.SetDefault("log_level", "info")
viper.SetDefault("dns_config", nil)
viper.SetDefault("dns_config.override_local_dns", true)
viper.SetDefault("derp.server.enabled", false)
viper.SetDefault("derp.server.stun.enabled", true)
@@ -173,7 +166,6 @@ func LoadConfig(path string, isFile bool) error {
viper.SetDefault("oidc.scope", []string{oidc.ScopeOpenID, "profile", "email"})
viper.SetDefault("oidc.strip_email_domain", true)
viper.SetDefault("oidc.only_start_if_oidc_is_available", true)
viper.SetDefault("logtail.enabled", false)
viper.SetDefault("randomize_client_port", false)
@@ -182,10 +174,6 @@ func LoadConfig(path string, isFile bool) error {
viper.SetDefault("node_update_check_interval", "10s")
if IsCLIConfigured() {
return nil
}
if err := viper.ReadInConfig(); err != nil {
log.Warn().Err(err).Msg("Failed to read configuration from disk")
@@ -221,6 +209,19 @@ func LoadConfig(path string, isFile bool) error {
errorText += "Fatal config error: server_url must start with https:// or http://\n"
}
_, authModeValid := LookupTLSClientAuthMode(
viper.GetString("tls_client_auth_mode"),
)
if !authModeValid {
errorText += fmt.Sprintf(
"Invalid tls_client_auth_mode supplied: %s. Accepted values: %s, %s, %s.",
viper.GetString("tls_client_auth_mode"),
DisabledClientAuth,
RelaxedClientAuth,
EnforcedClientAuth)
}
// Minimum inactivity time out is keepalive timeout (60s) plus a few seconds
// to avoid races
minInactivityTimeout, _ := time.ParseDuration("65s")
@@ -250,6 +251,10 @@ func LoadConfig(path string, isFile bool) error {
}
func GetTLSConfig() TLSConfig {
tlsClientAuthMode, _ := LookupTLSClientAuthMode(
viper.GetString("tls_client_auth_mode"),
)
return TLSConfig{
LetsEncrypt: LetsEncryptConfig{
Hostname: viper.GetString("tls_letsencrypt_hostname"),
@@ -265,6 +270,7 @@ func GetTLSConfig() TLSConfig {
KeyPath: AbsolutePathFromConfigPath(
viper.GetString("tls_key_path"),
),
ClientAuthMode: tlsClientAuthMode,
}
}
@@ -329,58 +335,17 @@ func GetACLConfig() ACLConfig {
}
}
func GetLogConfig() LogConfig {
logLevelStr := viper.GetString("log.level")
logLevel, err := zerolog.ParseLevel(logLevelStr)
if err != nil {
logLevel = zerolog.DebugLevel
}
logFormatOpt := viper.GetString("log.format")
var logFormat string
switch logFormatOpt {
case "json":
logFormat = JSONLogFormat
case "text":
logFormat = TextLogFormat
case "":
logFormat = TextLogFormat
default:
log.Error().
Str("func", "GetLogConfig").
Msgf("Could not parse log format: %s. Valid choices are 'json' or 'text'", logFormatOpt)
}
return LogConfig{
Format: logFormat,
Level: logLevel,
}
}
func GetDNSConfig() (*tailcfg.DNSConfig, string) {
if viper.IsSet("dns_config") {
dnsConfig := &tailcfg.DNSConfig{}
overrideLocalDNS := viper.GetBool("dns_config.override_local_dns")
if viper.IsSet("dns_config.nameservers") {
nameserversStr := viper.GetStringSlice("dns_config.nameservers")
nameservers := []netip.Addr{}
resolvers := []*dnstype.Resolver{}
nameservers := make([]netip.Addr, len(nameserversStr))
resolvers := make([]*dnstype.Resolver, len(nameserversStr))
for _, nameserverStr := range nameserversStr {
// Search for explicit DNS-over-HTTPS resolvers
if strings.HasPrefix(nameserverStr, "https://") {
resolvers = append(resolvers, &dnstype.Resolver{
Addr: nameserverStr,
})
// This nameserver can not be parsed as an IP address
continue
}
// Parse nameserver as a regular IP
for index, nameserverStr := range nameserversStr {
nameserver, err := netip.ParseAddr(nameserverStr)
if err != nil {
log.Error().
@@ -389,19 +354,14 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
Msgf("Could not parse nameserver IP: %s", nameserverStr)
}
nameservers = append(nameservers, nameserver)
resolvers = append(resolvers, &dnstype.Resolver{
nameservers[index] = nameserver
resolvers[index] = &dnstype.Resolver{
Addr: nameserver.String(),
})
}
}
dnsConfig.Nameservers = nameservers
if overrideLocalDNS {
dnsConfig.Resolvers = resolvers
} else {
dnsConfig.FallbackResolvers = resolvers
}
dnsConfig.Resolvers = resolvers
}
if viper.IsSet("dns_config.restricted_nameservers") {
@@ -436,17 +396,17 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
}
if viper.IsSet("dns_config.domains") {
domains := viper.GetStringSlice("dns_config.domains")
if len(dnsConfig.Nameservers) > 0 {
dnsConfig.Domains = domains
} else if domains != nil {
log.Warn().
Msg("Warning: dns_config.domains is set, but no nameservers are configured. Ignoring domains.")
}
dnsConfig.Domains = viper.GetStringSlice("dns_config.domains")
}
if viper.IsSet("dns_config.magic_dns") {
dnsConfig.Proxied = viper.GetBool("dns_config.magic_dns")
magicDNS := viper.GetBool("dns_config.magic_dns")
if len(dnsConfig.Nameservers) > 0 {
dnsConfig.Proxied = magicDNS
} else if magicDNS {
log.Warn().
Msg("Warning: dns_config.magic_dns is set, but no nameservers are configured. Ignoring magic_dns.")
}
}
var baseDomain string
@@ -463,17 +423,6 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
}
func GetHeadscaleConfig() (*Config, error) {
if IsCLIConfigured() {
return &Config{
CLI: CLIConfig{
Address: viper.GetString("cli.address"),
APIKey: viper.GetString("cli.api_key"),
Timeout: viper.GetDuration("cli.timeout"),
Insecure: viper.GetBool("cli.insecure"),
},
}, nil
}
dnsConfig, baseDomain := GetDNSConfig()
derpConfig := GetDERPConfig()
logConfig := GetLogTailConfig()
@@ -482,6 +431,28 @@ func GetHeadscaleConfig() (*Config, error) {
configuredPrefixes := viper.GetStringSlice("ip_prefixes")
parsedPrefixes := make([]netip.Prefix, 0, len(configuredPrefixes)+1)
logLevelStr := viper.GetString("log_level")
logLevel, err := zerolog.ParseLevel(logLevelStr)
if err != nil {
logLevel = zerolog.DebugLevel
}
legacyPrefixField := viper.GetString("ip_prefix")
if len(legacyPrefixField) > 0 {
log.
Warn().
Msgf(
"%s, %s",
"use of 'ip_prefix' for configuration is deprecated",
"please see 'ip_prefixes' in the shipped example.",
)
legacyPrefix, err := netip.ParsePrefix(legacyPrefixField)
if err != nil {
panic(fmt.Errorf("failed to parse ip_prefix: %w", err))
}
parsedPrefixes = append(parsedPrefixes, legacyPrefix)
}
for i, prefixInConfig := range configuredPrefixes {
prefix, err := netip.ParsePrefix(prefixInConfig)
if err != nil {
@@ -518,6 +489,7 @@ func GetHeadscaleConfig() (*Config, error) {
GRPCAddr: viper.GetString("grpc_listen_addr"),
GRPCAllowInsecure: viper.GetBool("grpc_allow_insecure"),
DisableUpdateCheck: viper.GetBool("disable_check_updates"),
LogLevel: logLevel,
IPPrefixes: prefixes,
PrivateKeyPath: AbsolutePathFromConfigPath(
@@ -545,7 +517,7 @@ func GetHeadscaleConfig() (*Config, error) {
DBname: viper.GetString("db_name"),
DBuser: viper.GetString("db_user"),
DBpass: viper.GetString("db_pass"),
DBssl: viper.GetString("db_ssl"),
DBssl: viper.GetBool("db_ssl"),
TLS: GetTLSConfig(),
@@ -558,9 +530,6 @@ func GetHeadscaleConfig() (*Config, error) {
UnixSocketPermission: GetFileMode("unix_socket_permission"),
OIDC: OIDCConfig{
OnlyStartIfOIDCIsAvailable: viper.GetBool(
"oidc.only_start_if_oidc_is_available",
),
Issuer: viper.GetString("oidc.issuer"),
ClientID: viper.GetString("oidc.client_id"),
ClientSecret: viper.GetString("oidc.client_secret"),
@@ -574,8 +543,6 @@ func GetHeadscaleConfig() (*Config, error) {
LogTail: logConfig,
RandomizeClientPort: randomizeClientPort,
ACL: GetACLConfig(),
CLI: CLIConfig{
Address: viper.GetString("cli.address"),
APIKey: viper.GetString("cli.api_key"),
@@ -583,10 +550,6 @@ func GetHeadscaleConfig() (*Config, error) {
Insecure: viper.GetBool("cli.insecure"),
},
Log: GetLogConfig(),
ACL: GetACLConfig(),
}, nil
}
func IsCLIConfigured() bool {
return viper.GetString("cli.address") != "" && viper.GetString("cli.api_key") != ""
}

9
db.go
View File

@@ -131,11 +131,6 @@ func (h *Headscale) initDB() error {
return err
}
err = db.AutoMigrate(&PreAuthKeyACLTag{})
if err != nil {
return err
}
_ = db.Migrator().DropTable("shared_machines")
err = db.AutoMigrate(&APIKey{})
@@ -226,8 +221,8 @@ func (h *Headscale) setValue(key string, value string) error {
return nil
}
func (h *Headscale) pingDB(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, time.Second)
func (h *Headscale) pingDB() error {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
db, err := h.db.DB()
if err != nil {

View File

@@ -34,7 +34,7 @@ func loadDERPMapFromURL(addr url.URL) (*tailcfg.DERPMap, error) {
ctx, cancel := context.WithTimeout(context.Background(), HTTPReadTimeout)
defer cancel()
req, err := http.NewRequestWithContext(ctx, http.MethodGet, addr.String(), nil)
req, err := http.NewRequestWithContext(ctx, "GET", addr.String(), nil)
if err != nil {
return nil, err
}

View File

@@ -99,13 +99,10 @@ func (h *Headscale) DERPHandler(
req *http.Request,
) {
log.Trace().Caller().Msgf("/derp request from %v", req.RemoteAddr)
upgrade := strings.ToLower(req.Header.Get("Upgrade"))
if upgrade != "websocket" && upgrade != "derp" {
if upgrade != "" {
log.Warn().
Caller().
Msg("No Upgrade header in DERP server request. If headscale is behind a reverse proxy, make sure it is configured to pass WebSockets through.")
up := strings.ToLower(req.Header.Get("Upgrade"))
if up != "websocket" && up != "derp" {
if up != "" {
log.Warn().Caller().Msgf("Weird websockets connection upgrade: %q", up)
}
writer.Header().Set("Content-Type", "text/plain")
writer.WriteHeader(http.StatusUpgradeRequired)
@@ -157,7 +154,7 @@ func (h *Headscale) DERPHandler(
if !fastStart {
pubKey := h.privateKey.Public()
pubKeyStr := pubKey.UntypedHexString() //nolint
pubKeyStr := pubKey.UntypedHexString() // nolint
fmt.Fprintf(conn, "HTTP/1.1 101 Switching Protocols\r\n"+
"Upgrade: DERP\r\n"+
"Connection: Upgrade\r\n"+
@@ -177,7 +174,7 @@ func (h *Headscale) DERPProbeHandler(
req *http.Request,
) {
switch req.Method {
case http.MethodHead, http.MethodGet:
case "HEAD", "GET":
writer.Header().Set("Access-Control-Allow-Origin", "*")
writer.WriteHeader(http.StatusOK)
default:
@@ -205,7 +202,7 @@ func (h *Headscale) DERPBootstrapDNSHandler(
) {
dnsEntries := make(map[string][]net.IP)
resolvCtx, cancel := context.WithTimeout(req.Context(), time.Minute)
resolvCtx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
var resolver net.Resolver
for _, region := range h.DERPMap.Regions {

35
dns.go
View File

@@ -3,13 +3,11 @@ package headscale
import (
"fmt"
"net/netip"
"net/url"
"strings"
mapset "github.com/deckarep/golang-set/v2"
"go4.org/netipx"
"tailscale.com/tailcfg"
"tailscale.com/types/dnstype"
"tailscale.com/util/dnsname"
)
@@ -22,10 +20,6 @@ const (
ipv6AddressLength = 128
)
const (
nextDNSDoHPrefix = "https://dns.nextdns.io"
)
// generateMagicDNSRootDomains generates a list of DNS entries to be included in `Routes` in `MapResponse`.
// This list of reverse DNS entries instructs the OS on what subnets and domains the Tailscale embedded DNS
// server (listening in 100.100.100.100 udp/53) should be used for.
@@ -158,39 +152,16 @@ func generateIPv6DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN {
return fqdns
}
// If any nextdns DoH resolvers are present in the list of resolvers it will
// take metadata from the machine metadata and instruct tailscale to add it
// to the requests. This makes it possible to identify from which device the
// requests come in the NextDNS dashboard.
//
// This will produce a resolver like:
// `https://dns.nextdns.io/<nextdns-id>?device_name=node-name&device_model=linux&device_ip=100.64.0.1`
func addNextDNSMetadata(resolvers []*dnstype.Resolver, machine Machine) {
for _, resolver := range resolvers {
if strings.HasPrefix(resolver.Addr, nextDNSDoHPrefix) {
attrs := url.Values{
"device_name": []string{machine.Hostname},
"device_model": []string{machine.HostInfo.OS},
}
if len(machine.IPAddresses) > 0 {
attrs.Add("device_ip", machine.IPAddresses[0].String())
}
resolver.Addr = fmt.Sprintf("%s?%s", resolver.Addr, attrs.Encode())
}
}
}
func getMapResponseDNSConfig(
dnsConfigOrig *tailcfg.DNSConfig,
baseDomain string,
machine Machine,
peers Machines,
) *tailcfg.DNSConfig {
var dnsConfig *tailcfg.DNSConfig = dnsConfigOrig.Clone()
var dnsConfig *tailcfg.DNSConfig
if dnsConfigOrig != nil && dnsConfigOrig.Proxied { // if MagicDNS is enabled
// Only inject the Search Domain of the current namespace - shared nodes should use their full FQDN
dnsConfig = dnsConfigOrig.Clone()
dnsConfig.Domains = append(
dnsConfig.Domains,
fmt.Sprintf(
@@ -213,7 +184,5 @@ func getMapResponseDNSConfig(
dnsConfig = dnsConfigOrig
}
addNextDNSMetadata(dnsConfig.Resolvers, machine)
return dnsConfig
}

View File

@@ -126,7 +126,6 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
false,
false,
nil,
nil,
)
c.Assert(err, check.IsNil)
@@ -135,7 +134,6 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
false,
false,
nil,
nil,
)
c.Assert(err, check.IsNil)
@@ -144,7 +142,6 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
false,
false,
nil,
nil,
)
c.Assert(err, check.IsNil)
@@ -153,7 +150,6 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
false,
false,
nil,
nil,
)
c.Assert(err, check.IsNil)
@@ -273,7 +269,6 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
false,
false,
nil,
nil,
)
c.Assert(err, check.IsNil)
@@ -282,7 +277,6 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
false,
false,
nil,
nil,
)
c.Assert(err, check.IsNil)
@@ -291,7 +285,6 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
false,
false,
nil,
nil,
)
c.Assert(err, check.IsNil)
@@ -300,7 +293,6 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
false,
false,
nil,
nil,
)
c.Assert(err, check.IsNil)

View File

@@ -28,7 +28,6 @@ written by community members. It is _not_ verified by `headscale` developers.
- [Running headscale in a container](running-headscale-container.md)
- [Running headscale on OpenBSD](running-headscale-openbsd.md)
- [Running headscale behind a reverse proxy](reverse-proxy.md)
## Misc

Binary file not shown.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 49 KiB

View File

@@ -1,48 +0,0 @@
# Better route management
As of today, route management in Headscale is very basic and does not allow for much flexibility, including implementing subnet HA, 4via6 or more advanced features. We also have a number of bugs (e.g., routes exposed by ephemeral nodes)
This proposal aims to improve the route management.
## Current situation
Routes advertised by the nodes are read from the Hostinfo struct. If approved from the the CLI or via autoApprovers, the route is added to the EnabledRoutes field in `Machine`.
This means that the advertised routes are not persisted in the database, as Hostinfo is always replaced. In the same way, EnabledRoutes can get out of sync with the actual routes in the node.
In case of colliding routes (i.e., subnets that are exposed from multiple nodes), we are currently just sending all of them in `PrimaryRoutes`... and hope for the best. (`PrimaryRoutes` is the field in `Node` used for subnet failover).
## Proposal
The core part is to create a new `Route` struct (and DB table), with the following fields:
```go
type Route struct {
ID uint64 `gorm:"primary_key"`
Machine *Machine
Prefix IPPrefix
Advertised bool
Enabled bool
IsPrimary bool
CreatedAt *time.Time
UpdatedAt *time.Time
DeletedAt *time.Time
}
```
- The `Advertised` field is set to true if the route is being advertised by the node. It is set to false if the route is removed. This way we can indicate if a later enabled route has stopped being advertised. A similar behaviour happens in the Tailscale.com control panel.
- The `Enabled` field is set to true if the route is enabled - via CLI or autoApprovers.
- `IsPrimary` indicates if Headscale has selected this route as the primary route for that particular subnet. This allows us to implement subnet failover. This would be fully automatic if there is more than subnet routers advertising the same network - which is the behaviour of Tailscale.com.
## Stuff to bear in mind
- We need to make sure to migrate the current `EnabledRoutes` of `Machine` into the new table.
- When a node stops sharing a subnet, I reckon we should mark it both as not `Advertised` and not `Enabled`. Users should re-enable it if the node advertises it again.
- If only one subnet router is advertising a subnet, we should mark it as primary.
- Regarding subnet failover, the current behaviour of Tailscale.com is to perform the failover after 15 seconds from the node disconnecting from their control panel. I reckon we cannot do the same currently. Our maximum granularity is the keep alive period.

View File

@@ -1,100 +0,0 @@
# Running headscale behind a reverse proxy
Running headscale behind a reverse proxy is useful when running multiple applications on the same server, and you want to reuse the same external IP and port - usually tcp/443 for HTTPS.
### WebSockets
The reverse proxy MUST be configured to support WebSockets, as it is needed for clients running Tailscale v1.30+.
WebSockets support is required when using the headscale embedded DERP server. In this case, you will also need to expose the UDP port used for STUN (by default, udp/3478). Please check our [config-example.yaml](https://github.com/juanfont/headscale/blob/main/config-example.yaml).
### TLS
Headscale can be configured not to use TLS, leaving it to the reverse proxy to handle. Add the following configuration values to your headscale config file.
```yaml
server_url: https://<YOUR_SERVER_NAME> # This should be the FQDN at which headscale will be served
listen_addr: 0.0.0.0:8080
metrics_listen_addr: 0.0.0.0:9090
tls_cert_path: ""
tls_key_path: ""
```
## nginx
The following example configuration can be used in your nginx setup, substituting values as necessary. `<IP:PORT>` should be the IP address and port where headscale is running. In most cases, this will be `http://localhost:8080`.
```Nginx
map $http_upgrade $connection_upgrade {
default keep-alive;
'websocket' upgrade;
'' close;
}
server {
listen 80;
listen [::]:80;
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name <YOUR_SERVER_NAME>;
ssl_certificate <PATH_TO_CERT>;
ssl_certificate_key <PATH_CERT_KEY>;
ssl_protocols TLSv1.2 TLSv1.3;
location / {
proxy_pass http://<IP:PORT>;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $server_name;
proxy_redirect http:// https://;
proxy_buffering off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
add_header Strict-Transport-Security "max-age=15552000; includeSubDomains" always;
}
}
```
## istio/envoy
If you using [Istio](https://istio.io/) ingressgateway or [Envoy](https://www.envoyproxy.io/) as reverse proxy, there are some tips for you. If not set, you may see some debug log in proxy as below:
```log
Sending local reply with details upgrade_failed
```
### Envoy
You need add a new upgrade_type named `tailscale-control-protocol`. [see detail](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto#extensions-filters-network-http-connection-manager-v3-httpconnectionmanager-upgradeconfig)
### Istio
Same as envoy, we can use `EnvoyFilter` to add upgrade_type.
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: EnvoyFilter
metadata:
name: headscale-behind-istio-ingress
namespace: istio-system
spec:
configPatches:
- applyTo: NETWORK_FILTER
match:
listener:
filterChain:
filter:
name: envoy.filters.network.http_connection_manager
patch:
operation: MERGE
value:
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
upgrade_configs:
- upgrade_type: tailscale-control-protocol
```

View File

@@ -48,17 +48,15 @@ Modify the config file to your preferences before launching Docker container.
Here are some settings that you likely want:
```yaml
# Change to your hostname or host IP
server_url: http://your-host-name:8080
server_url: http://your-host-name:8080 # Change to your hostname or host IP
# Listen to 0.0.0.0 so it's accessible outside the container
metrics_listen_addr: 0.0.0.0:9090
# The default /var/lib/headscale path is not writable in the container
private_key_path: /etc/headscale/private.key
# The default /var/lib/headscale path is not writable in the container
noise:
private_key_path: /etc/headscale/noise_private.key
private_key_path: /var/lib/headscale/noise_private.key
# The default /var/lib/headscale path is not writable in the container
db_type: sqlite3
db_path: /etc/headscale/db.sqlite
```
@@ -68,6 +66,7 @@ db_path: /etc/headscale/db.sqlite
docker run \
--name headscale \
--detach \
--rm \
--volume $(pwd)/config:/etc/headscale/ \
--publish 127.0.0.1:8080:8080 \
--publish 127.0.0.1:9090:9090 \

View File

@@ -29,3 +29,17 @@ headscale can also be configured to expose its web service via TLS. To configure
tls_cert_path: ""
tls_key_path: ""
```
### Configuring Mutual TLS Authentication (mTLS)
mTLS is a method by which an HTTPS server authenticates clients, e.g. Tailscale, using TLS certificates. This can be configured by applying one of the following values to the `tls_client_auth_mode` setting in the configuration file.
| Value | Behavior |
| ------------------- | ---------------------------------------------------------- |
| `disabled` | Disable mTLS. |
| `relaxed` (default) | A client certificate is required, but it is not verified. |
| `enforced` | Requires clients to supply a certificate that is verified. |
```yaml
tls_client_auth_mode: ""
```

14
flake.lock generated
View File

@@ -2,11 +2,11 @@
"nodes": {
"flake-utils": {
"locked": {
"lastModified": 1659877975,
"narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=",
"lastModified": 1653893745,
"narHash": "sha256-0jntwV3Z8//YwuOjzhV2sgJJPt+HY6KhU7VZUL0fKZQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0",
"rev": "1ed9fb1935d260de5fe1c2f7ee0ebaae17ed2fa1",
"type": "github"
},
"original": {
@@ -17,16 +17,16 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1666869603,
"narHash": "sha256-3V53or4Vpu4+LrGfGSh3T2V8+qf5RP6nRuex9GywkwE=",
"lastModified": 1654847188,
"narHash": "sha256-MC+eP7XOGE1LAswOPqdcGoUqY9mEQ3ZaaxamVTbc0hM=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "2001e2b31c565bcdf7bc13062b8d7cfccaca05b8",
"rev": "8b66e3f2ebcc644b78cec9d6f152192f4e7d322f",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"ref": "nixos-22.05",
"repo": "nixpkgs",
"type": "github"
}

301
flake.nix
View File

@@ -2,178 +2,167 @@
description = "headscale - Open Source Tailscale Control server";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
nixpkgs.url = "github:NixOS/nixpkgs/nixos-22.05";
flake-utils.url = "github:numtide/flake-utils";
};
outputs = {
self,
nixpkgs,
flake-utils,
...
}: let
headscaleVersion =
if (self ? shortRev)
then self.shortRev
else "dev";
in
outputs = { self, nixpkgs, flake-utils, ... }:
let
headscaleVersion = if (self ? shortRev) then self.shortRev else "dev";
in
{
overlay = _: prev: let
pkgs = nixpkgs.legacyPackages.${prev.system};
in rec {
headscale = pkgs.buildGo119Module rec {
pname = "headscale";
version = headscaleVersion;
src = pkgs.lib.cleanSource self;
overlay = final: prev:
let
pkgs = nixpkgs.legacyPackages.${prev.system};
in
rec {
headscale =
pkgs.buildGo118Module rec {
pname = "headscale";
version = headscaleVersion;
src = pkgs.lib.cleanSource self;
tags = ["ts2019"];
# When updating go.mod or go.sum, a new sha will need to be calculated,
# update this if you have a mismatch after doing a change to thos files.
vendorSha256 = "sha256-paDdPsi5OfxsmgX7c5NSDSLYDipFqxxcxV3K4Tc77nQ=";
# Only run unit tests when testing a build
checkFlags = ["-short"];
ldflags = [ "-s" "-w" "-X github.com/juanfont/headscale/cmd/headscale/cli.Version=v${version}" ];
};
# When updating go.mod or go.sum, a new sha will need to be calculated,
# update this if you have a mismatch after doing a change to thos files.
vendorSha256 = "sha256-Cq0WipTQ+kGcvnfP0kjyvjyonl2OC9W7Tj0MCuB1lDU=";
golines =
pkgs.buildGoModule rec {
pname = "golines";
version = "0.9.0";
ldflags = ["-s" "-w" "-X github.com/juanfont/headscale/cmd/headscale/cli.Version=v${version}"];
};
src = pkgs.fetchFromGitHub {
owner = "segmentio";
repo = "golines";
rev = "v${version}";
sha256 = "sha256-BUXEg+4r9L/gqe4DhTlhN55P3jWt7ZyWFQycO6QePrw=";
};
golines = pkgs.buildGoModule rec {
pname = "golines";
version = "0.11.0";
vendorSha256 = "sha256-sEzWUeVk5GB0H41wrp12P8sBWRjg0FHUX6ABDEEBqK8=";
src = pkgs.fetchFromGitHub {
owner = "segmentio";
repo = "golines";
rev = "v${version}";
sha256 = "sha256-2K9KAg8iSubiTbujyFGN3yggrL+EDyeUCs9OOta/19A=";
nativeBuildInputs = [ pkgs.installShellFiles ];
};
golangci-lint = prev.golangci-lint.override {
# Override https://github.com/NixOS/nixpkgs/pull/166801 which changed this
# to buildGo118Module because it does not build on Darwin.
inherit (prev) buildGoModule;
};
vendorSha256 = "sha256-rxYuzn4ezAxaeDhxd8qdOzt+CKYIh03A9zKNdzILq18=";
# golangci-lint =
# pkgs.buildGo117Module rec {
# pname = "golangci-lint";
# version = "1.46.2";
#
# src = pkgs.fetchFromGitHub {
# owner = "golangci";
# repo = "golangci-lint";
# rev = "v${version}";
# sha256 = "sha256-7sDAwWz+qoB/ngeH35tsJ5FZUfAQvQsU6kU9rUHIHMk=";
# };
#
# vendorSha256 = "sha256-w38OKN6HPoz37utG/2QSPMai55IRDXCIIymeMe6ogIU=";
#
# nativeBuildInputs = [ pkgs.installShellFiles ];
# };
nativeBuildInputs = [pkgs.installShellFiles];
protoc-gen-grpc-gateway =
pkgs.buildGoModule rec {
pname = "grpc-gateway";
version = "2.8.0";
src = pkgs.fetchFromGitHub {
owner = "grpc-ecosystem";
repo = "grpc-gateway";
rev = "v${version}";
sha256 = "sha256-8eBBBYJ+tBjB2fgPMX/ZlbN3eeS75e8TAZYOKXs6hcg=";
};
vendorSha256 = "sha256-AW2Gn/mlZyLMwF+NpK59eiOmQrYWW/9HPjbunYc9Ij4=";
nativeBuildInputs = [ pkgs.installShellFiles ];
subPackages = [ "protoc-gen-grpc-gateway" "protoc-gen-openapiv2" ];
};
};
golangci-lint = prev.golangci-lint.override {
# Override https://github.com/NixOS/nixpkgs/pull/166801 which changed this
# to buildGo118Module because it does not build on Darwin.
inherit (prev) buildGoModule;
};
# golangci-lint =
# pkgs.buildGo117Module rec {
# pname = "golangci-lint";
# version = "1.46.2";
#
# src = pkgs.fetchFromGitHub {
# owner = "golangci";
# repo = "golangci-lint";
# rev = "v${version}";
# sha256 = "sha256-7sDAwWz+qoB/ngeH35tsJ5FZUfAQvQsU6kU9rUHIHMk=";
# };
#
# vendorSha256 = "sha256-w38OKN6HPoz37utG/2QSPMai55IRDXCIIymeMe6ogIU=";
#
# nativeBuildInputs = [ pkgs.installShellFiles ];
# };
protoc-gen-grpc-gateway = pkgs.buildGoModule rec {
pname = "grpc-gateway";
version = "2.8.0";
src = pkgs.fetchFromGitHub {
owner = "grpc-ecosystem";
repo = "grpc-gateway";
rev = "v${version}";
sha256 = "sha256-8eBBBYJ+tBjB2fgPMX/ZlbN3eeS75e8TAZYOKXs6hcg=";
} // flake-utils.lib.eachDefaultSystem
(system:
let
pkgs = import nixpkgs {
overlays = [ self.overlay ];
inherit system;
};
vendorSha256 = "sha256-AW2Gn/mlZyLMwF+NpK59eiOmQrYWW/9HPjbunYc9Ij4=";
nativeBuildInputs = [pkgs.installShellFiles];
subPackages = ["protoc-gen-grpc-gateway" "protoc-gen-openapiv2"];
};
};
}
// flake-utils.lib.eachDefaultSystem
(system: let
pkgs = import nixpkgs {
overlays = [self.overlay];
inherit system;
};
buildDeps = with pkgs; [git go_1_19 gnumake];
devDeps = with pkgs;
buildDeps
++ [
golangci-lint
golines
nodePackages.prettier
# Protobuf dependencies
protobuf
protoc-gen-go
protoc-gen-go-grpc
protoc-gen-grpc-gateway
buf
clang-tools # clang-format
];
# Add entry to build a docker image with headscale
# caveat: only works on Linux
#
# Usage:
# nix build .#headscale-docker
# docker load < result
headscale-docker = pkgs.dockerTools.buildLayeredImage {
name = "headscale";
tag = headscaleVersion;
contents = [pkgs.headscale];
config.Entrypoint = [(pkgs.headscale + "/bin/headscale")];
};
in rec {
# `nix develop`
devShell = pkgs.mkShell {
buildInputs = devDeps;
shellHook = ''
export GOFLAGS=-tags="ts2019"
'';
};
# `nix build`
packages = with pkgs; {
inherit headscale;
inherit headscale-docker;
};
defaultPackage = pkgs.headscale;
# `nix run`
apps.headscale = flake-utils.lib.mkApp {
drv = packages.headscale;
};
defaultApp = apps.headscale;
checks = {
format =
pkgs.runCommand "check-format"
{
buildInputs = with pkgs; [
gnumake
nixpkgs-fmt
buildDeps = with pkgs; [ git go_1_18 gnumake ];
devDeps = with pkgs;
buildDeps ++ [
golangci-lint
nodePackages.prettier
golines
clang-tools
nodePackages.prettier
# Protobuf dependencies
protobuf
protoc-gen-go
protoc-gen-go-grpc
protoc-gen-grpc-gateway
buf
clang-tools # clang-format
];
} ''
${pkgs.nixpkgs-fmt}/bin/nixpkgs-fmt ${./.}
${pkgs.golangci-lint}/bin/golangci-lint run --fix --timeout 10m
${pkgs.nodePackages.prettier}/bin/prettier --write '**/**.{ts,js,md,yaml,yml,sass,css,scss,html}'
${pkgs.golines}/bin/golines --max-len=88 --base-formatter=gofumpt -w ${./.}
${pkgs.clang-tools}/bin/clang-format -style="{BasedOnStyle: Google, IndentWidth: 4, AlignConsecutiveDeclarations: true, AlignConsecutiveAssignments: true, ColumnLimit: 0}" -i ${./.}
'';
};
});
# Add entry to build a docker image with headscale
# caveat: only works on Linux
#
# Usage:
# nix build .#headscale-docker
# docker load < result
headscale-docker = pkgs.dockerTools.buildLayeredImage {
name = "headscale";
tag = headscaleVersion;
contents = [ pkgs.headscale ];
config.Entrypoint = [ (pkgs.headscale + "/bin/headscale") ];
};
in
rec {
# `nix develop`
devShell = pkgs.mkShell { buildInputs = devDeps; };
# `nix build`
packages = with pkgs; {
inherit headscale;
inherit headscale-docker;
};
defaultPackage = pkgs.headscale;
# `nix run`
apps.headscale = flake-utils.lib.mkApp {
drv = packages.headscale;
};
defaultApp = apps.headscale;
checks = {
format = pkgs.runCommand "check-format"
{
buildInputs = with pkgs; [
gnumake
nixpkgs-fmt
golangci-lint
nodePackages.prettier
golines
clang-tools
];
} ''
${pkgs.nixpkgs-fmt}/bin/nixpkgs-fmt ${./.}
${pkgs.golangci-lint}/bin/golangci-lint run --fix --timeout 10m
${pkgs.nodePackages.prettier}/bin/prettier --write '**/**.{ts,js,md,yaml,yml,sass,css,scss,html}'
${pkgs.golines}/bin/golines --max-len=88 --base-formatter=gofumpt -w ${./.}
${pkgs.clang-tools}/bin/clang-format -style="{BasedOnStyle: Google, IndentWidth: 4, AlignConsecutiveDeclarations: true, AlignConsecutiveAssignments: true, ColumnLimit: 0}" -i ${./.}
'';
};
});
}

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc-gen-go v1.27.1
// protoc (unknown)
// source: headscale/v1/apikey.proto

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc-gen-go v1.27.1
// protoc (unknown)
// source: headscale/v1/device.proto

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc-gen-go v1.27.1
// protoc (unknown)
// source: headscale/v1/headscale.proto

File diff suppressed because it is too large Load Diff

View File

@@ -1,8 +1,4 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc (unknown)
// source: headscale/v1/headscale.proto
package v1

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc-gen-go v1.27.1
// protoc (unknown)
// source: headscale/v1/machine.proto

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc-gen-go v1.27.1
// protoc (unknown)
// source: headscale/v1/namespace.proto

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc-gen-go v1.27.1
// protoc (unknown)
// source: headscale/v1/preauthkey.proto
@@ -34,7 +34,6 @@ type PreAuthKey struct {
Used bool `protobuf:"varint,6,opt,name=used,proto3" json:"used,omitempty"`
Expiration *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=expiration,proto3" json:"expiration,omitempty"`
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
AclTags []string `protobuf:"bytes,9,rep,name=acl_tags,json=aclTags,proto3" json:"acl_tags,omitempty"`
}
func (x *PreAuthKey) Reset() {
@@ -125,13 +124,6 @@ func (x *PreAuthKey) GetCreatedAt() *timestamppb.Timestamp {
return nil
}
func (x *PreAuthKey) GetAclTags() []string {
if x != nil {
return x.AclTags
}
return nil
}
type CreatePreAuthKeyRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -141,7 +133,6 @@ type CreatePreAuthKeyRequest struct {
Reusable bool `protobuf:"varint,2,opt,name=reusable,proto3" json:"reusable,omitempty"`
Ephemeral bool `protobuf:"varint,3,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
Expiration *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expiration,proto3" json:"expiration,omitempty"`
AclTags []string `protobuf:"bytes,5,rep,name=acl_tags,json=aclTags,proto3" json:"acl_tags,omitempty"`
}
func (x *CreatePreAuthKeyRequest) Reset() {
@@ -204,13 +195,6 @@ func (x *CreatePreAuthKeyRequest) GetExpiration() *timestamppb.Timestamp {
return nil
}
func (x *CreatePreAuthKeyRequest) GetAclTags() []string {
if x != nil {
return x.AclTags
}
return nil
}
type CreatePreAuthKeyResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -452,7 +436,7 @@ var file_headscale_v1_preauthkey_proto_rawDesc = []byte{
0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
0x0c, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xac,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x91,
0x02, 0x0a, 0x0a, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x12, 0x1c, 0x0a,
0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69,
@@ -470,45 +454,42 @@ var file_headscale_v1_preauthkey_proto_rawDesc = []byte{
0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
0x41, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x6c, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x09,
0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x6c, 0x54, 0x61, 0x67, 0x73, 0x22, 0xc8, 0x01,
0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b,
0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d,
0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61,
0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x75, 0x73, 0x61,
0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x75, 0x73, 0x61,
0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c,
0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61,
0x6c, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a,
0x08, 0x61, 0x63, 0x6c, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52,
0x07, 0x61, 0x63, 0x6c, 0x54, 0x61, 0x67, 0x73, 0x22, 0x56, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x72, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68,
0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65, 0x61,
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74,
0x68, 0x4b, 0x65, 0x79, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79,
0x22, 0x49, 0x0a, 0x17, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74,
0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e,
0x41, 0x74, 0x22, 0xad, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65,
0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c,
0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08,
0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08,
0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x70, 0x68, 0x65,
0x6d, 0x65, 0x72, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x70, 0x68,
0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x22, 0x56, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41,
0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a,
0x0a, 0x0c, 0x70, 0x72, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x0a,
0x70, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x22, 0x49, 0x0a, 0x17, 0x45, 0x78,
0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x1a, 0x0a, 0x18, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50,
0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x22, 0x36, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68,
0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e,
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x1a, 0x0a, 0x18, 0x45,
0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x50,
0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22,
0x57, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65,
0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72,
0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
0x2e, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x70, 0x72, 0x65,
0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68,
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f,
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f,
0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x57, 0x0a, 0x17, 0x4c, 0x69, 0x73,
0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68,
0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65,
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x41, 0x75,
0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65,
0x79, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.28.1
// protoc-gen-go v1.27.1
// protoc (unknown)
// source: headscale/v1/routes.proto

View File

@@ -824,12 +824,6 @@
"expiration": {
"type": "string",
"format": "date-time"
},
"aclTags": {
"type": "array",
"items": {
"type": "string"
}
}
}
},
@@ -1108,12 +1102,6 @@
"createdAt": {
"type": "string",
"format": "date-time"
},
"aclTags": {
"type": "array",
"items": {
"type": "string"
}
}
}
},

147
go.mod
View File

@@ -3,148 +3,149 @@ module github.com/juanfont/headscale
go 1.19
require (
github.com/AlecAivazis/survey/v2 v2.3.6
github.com/AlecAivazis/survey/v2 v2.3.5
github.com/ccding/go-stun/stun v0.0.0-20200514191101-4dc67bcdb029
github.com/cenkalti/backoff/v4 v4.1.3
github.com/coreos/go-oidc/v3 v3.4.0
github.com/coreos/go-oidc/v3 v3.2.0
github.com/deckarep/golang-set/v2 v2.1.0
github.com/efekarakus/termcolor v1.0.1
github.com/glebarez/sqlite v1.5.0
github.com/gofrs/uuid v4.3.0+incompatible
github.com/glebarez/sqlite v1.4.6
github.com/gofrs/uuid v4.2.0+incompatible
github.com/gorilla/mux v1.8.0
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0
github.com/klauspost/compress v1.15.12
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.2
github.com/klauspost/compress v1.15.9
github.com/oauth2-proxy/mockoidc v0.0.0-20220308204021-b9169deeb282
github.com/ory/dockertest/v3 v3.9.1
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/philip-bui/grpc-zerolog v1.0.1
github.com/prometheus/client_golang v1.13.0
github.com/prometheus/common v0.37.0
github.com/pterm/pterm v0.12.49
github.com/puzpuzpuz/xsync/v2 v2.0.2
github.com/rs/zerolog v1.28.0
github.com/spf13/cobra v1.6.1
github.com/spf13/viper v1.13.0
github.com/pterm/pterm v0.12.45
github.com/puzpuzpuz/xsync v1.4.2
github.com/rs/zerolog v1.27.0
github.com/spf13/cobra v1.5.0
github.com/spf13/viper v1.12.0
github.com/stretchr/testify v1.8.0
github.com/tailscale/hujson v0.0.0-20220630195928-54599719472f
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e
go4.org/netipx v0.0.0-20220925034521-797b0c90d8ab
golang.org/x/crypto v0.1.0
golang.org/x/net v0.1.0
golang.org/x/oauth2 v0.1.0
golang.org/x/sync v0.1.0
google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c
google.golang.org/grpc v1.50.1
go4.org/netipx v0.0.0-20220725152314-7e7bdc8411bf
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e
golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
google.golang.org/genproto v0.0.0-20220808204814-fd01256a5276
google.golang.org/grpc v1.48.0
google.golang.org/protobuf v1.28.1
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
gorm.io/driver/postgres v1.4.5
gorm.io/gorm v1.24.1-0.20221019064659-5dd2bb482755
tailscale.com v1.32.2
gorm.io/driver/postgres v1.3.8
gorm.io/gorm v1.23.8
inet.af/netaddr v0.0.0-20220617031823-097006376321
tailscale.com v1.30.0
)
require (
atomicgo.dev/cursor v0.1.1 // indirect
atomicgo.dev/keyboard v0.2.8 // indirect
filippo.io/edwards25519 v1.0.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect
filippo.io/edwards25519 v1.0.0-rc.1 // indirect
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
github.com/akutz/memconn v0.1.0 // indirect
github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/containerd/console v1.0.3 // indirect
github.com/containerd/continuity v0.3.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/cli v20.10.21+incompatible // indirect
github.com/docker/docker v20.10.21+incompatible // indirect
github.com/docker/cli v20.10.16+incompatible // indirect
github.com/docker/docker v20.10.16+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/fxamacker/cbor/v2 v2.4.0 // indirect
github.com/glebarez/go-sqlite v1.19.2 // indirect
github.com/glebarez/go-sqlite v1.17.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/go-cmp v0.5.8 // indirect
github.com/google/go-github v17.0.0+incompatible // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gookit/color v1.5.2 // indirect
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/gookit/color v1.5.0 // indirect
github.com/hashicorp/go-version v1.4.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hdevalence/ed25519consensus v0.1.0 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/hdevalence/ed25519consensus v0.0.0-20220222234857-c00d1f31bab3 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
github.com/jackc/pgconn v1.13.0 // indirect
github.com/jackc/pgconn v1.12.1 // indirect
github.com/jackc/pgio v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgproto3/v2 v2.3.1 // indirect
github.com/jackc/pgproto3/v2 v2.3.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
github.com/jackc/pgtype v1.12.0 // indirect
github.com/jackc/pgx/v4 v4.17.2 // indirect
github.com/jackc/pgtype v1.11.0 // indirect
github.com/jackc/pgx/v4 v4.16.1 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/jinzhu/now v1.1.4 // indirect
github.com/josharian/native v1.0.0 // indirect
github.com/jsimonetti/rtnetlink v1.2.3 // indirect
github.com/jsimonetti/rtnetlink v1.1.2-0.20220408201609-d380b505068b // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/lithammer/fuzzysearch v1.1.5 // indirect
github.com/magiconair/properties v1.8.6 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mdlayher/netlink v1.6.2 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/mdlayher/netlink v1.6.0 // indirect
github.com/mdlayher/socket v0.2.3 // indirect
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
github.com/mitchellh/go-ps v1.0.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
github.com/opencontainers/runc v1.1.4 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 // indirect
github.com/opencontainers/runc v1.1.2 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
github.com/pelletier/go-toml/v2 v2.0.1 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20220927061507-ef77025ab5aa // indirect
github.com/rivo/uniseg v0.4.2 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/rogpeppe/go-internal v1.8.1-0.20211023094830-115ce09fd6b4 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/spf13/afero v1.9.2 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/spf13/afero v1.8.2 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/subosito/gotenv v1.4.1 // indirect
github.com/subosito/gotenv v1.3.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
go4.org/mem v0.0.0-20220726221520-4f986261bf13 // indirect
golang.org/x/mod v0.6.0 // indirect
golang.org/x/sys v0.1.0 // indirect
golang.org/x/term v0.1.0 // indirect
golang.org/x/text v0.4.0 // indirect
golang.org/x/time v0.1.0 // indirect
golang.org/x/tools v0.2.0 // indirect
golang.zx2c4.com/wireguard/windows v0.5.3 // indirect
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 // indirect
go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect
go4.org/mem v0.0.0-20210711025021-927187094b94 // indirect
go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 // indirect
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
golang.org/x/term v0.0.0-20220411215600-e5f449aeb171 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
golang.zx2c4.com/wireguard/windows v0.4.10 // indirect
google.golang.org/appengine v1.6.7 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/ini.v1 v1.66.4 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
modernc.org/libc v1.21.4 // indirect
modernc.org/mathutil v1.5.0 // indirect
modernc.org/memory v1.4.0 // indirect
modernc.org/sqlite v1.19.3 // indirect
modernc.org/libc v1.16.8 // indirect
modernc.org/mathutil v1.4.1 // indirect
modernc.org/memory v1.1.1 // indirect
modernc.org/sqlite v1.17.3 // indirect
nhooyr.io/websocket v1.8.7 // indirect
)

944
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
// nolint
//nolint
package headscale
import (
@@ -12,7 +12,6 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
)
type headscaleV1APIServer struct { // v1.HeadscaleServiceServer
@@ -107,21 +106,11 @@ func (api headscaleV1APIServer) CreatePreAuthKey(
expiration = request.GetExpiration().AsTime()
}
for _, tag := range request.AclTags {
err := validateTag(tag)
if err != nil {
return &v1.CreatePreAuthKeyResponse{
PreAuthKey: nil,
}, status.Error(codes.InvalidArgument, err.Error())
}
}
preAuthKey, err := api.h.CreatePreAuthKey(
request.GetNamespace(),
request.GetReusable(),
request.GetEphemeral(),
&expiration,
request.AclTags,
)
if err != nil {
return nil, err
@@ -480,7 +469,7 @@ func (api headscaleV1APIServer) DebugCreateMachine(
Hostname: "DebugTestMachine",
}
givenName, err := api.h.GenerateGivenName(request.GetKey(), request.GetName())
givenName, err := api.h.GenerateGivenName(request.GetName())
if err != nil {
return nil, err
}
@@ -497,14 +486,9 @@ func (api headscaleV1APIServer) DebugCreateMachine(
HostInfo: HostInfo(hostinfo),
}
nodeKey := key.NodePublic{}
err = nodeKey.UnmarshalText([]byte(request.GetKey()))
if err != nil {
log.Panic().Msg("can not add machine for debug. invalid node key")
}
api.h.registrationCache.Set(
NodePublicKeyStripPrefix(nodeKey),
request.GetKey(),
newMachine,
registerCacheExpiration,
)

View File

@@ -1,15 +0,0 @@
//go:build ts2019
package headscale
import (
"net/http"
"github.com/gorilla/mux"
)
func (h *Headscale) addLegacyHandlers(router *mux.Router) {
router.HandleFunc("/machine/{mkey}/map", h.PollNetMapHandler).
Methods(http.MethodPost)
router.HandleFunc("/machine/{mkey}", h.RegistrationHandler).Methods(http.MethodPost)
}

View File

@@ -1,8 +0,0 @@
//go:build !ts2019
package headscale
import "github.com/gorilla/mux"
func (h *Headscale) addLegacyHandlers(router *mux.Router) {
}

View File

@@ -1,302 +0,0 @@
package integration
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"log"
"net"
"net/http"
"strconv"
"testing"
"github.com/juanfont/headscale"
"github.com/juanfont/headscale/integration/dockertestutil"
"github.com/juanfont/headscale/integration/hsic"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
)
const (
dockerContextPath = "../."
hsicOIDCMockHashLength = 6
oidcServerPort = 10000
)
var errStatusCodeNotOK = errors.New("status code not OK")
type AuthOIDCScenario struct {
*Scenario
mockOIDC *dockertest.Resource
}
func TestOIDCAuthenticationPingAll(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
baseScenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
scenario := AuthOIDCScenario{
Scenario: baseScenario,
}
spec := map[string]int{
"namespace1": len(TailscaleVersions),
}
oidcConfig, err := scenario.runMockOIDC()
if err != nil {
t.Errorf("failed to run mock OIDC server: %s", err)
}
oidcMap := map[string]string{
"HEADSCALE_OIDC_ISSUER": oidcConfig.Issuer,
"HEADSCALE_OIDC_CLIENT_ID": oidcConfig.ClientID,
"HEADSCALE_OIDC_CLIENT_SECRET": oidcConfig.ClientSecret,
"HEADSCALE_OIDC_STRIP_EMAIL_DOMAIN": fmt.Sprintf("%t", oidcConfig.StripEmaildomain),
}
err = scenario.CreateHeadscaleEnv(
spec,
hsic.WithTestName("oidcauthping"),
hsic.WithConfigEnv(oidcMap),
hsic.WithHostnameAsServerURL(),
)
if err != nil {
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
allIps, err := scenario.ListTailscaleClientsIPs()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync()
if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
success := 0
for _, client := range allClients {
for _, ip := range allIps {
err := client.Ping(ip.String())
if err != nil {
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
} else {
success++
}
}
}
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}
func (s *AuthOIDCScenario) CreateHeadscaleEnv(
namespaces map[string]int,
opts ...hsic.Option,
) error {
headscale, err := s.Headscale(opts...)
if err != nil {
return err
}
err = headscale.WaitForReady()
if err != nil {
return err
}
for namespaceName, clientCount := range namespaces {
log.Printf("creating namespace %s with %d clients", namespaceName, clientCount)
err = s.CreateNamespace(namespaceName)
if err != nil {
return err
}
err = s.CreateTailscaleNodesInNamespace(namespaceName, "all", clientCount)
if err != nil {
return err
}
err = s.runTailscaleUp(namespaceName, headscale.GetEndpoint())
if err != nil {
return err
}
}
return nil
}
func (s *AuthOIDCScenario) runMockOIDC() (*headscale.OIDCConfig, error) {
hash, _ := headscale.GenerateRandomStringDNSSafe(hsicOIDCMockHashLength)
hostname := fmt.Sprintf("hs-oidcmock-%s", hash)
mockOidcOptions := &dockertest.RunOptions{
Name: hostname,
Cmd: []string{"headscale", "mockoidc"},
ExposedPorts: []string{"10000/tcp"},
PortBindings: map[docker.Port][]docker.PortBinding{
"10000/tcp": {{HostPort: "10000"}},
},
Networks: []*dockertest.Network{s.Scenario.network},
Env: []string{
fmt.Sprintf("MOCKOIDC_ADDR=%s", hostname),
"MOCKOIDC_PORT=10000",
"MOCKOIDC_CLIENT_ID=superclient",
"MOCKOIDC_CLIENT_SECRET=supersecret",
},
}
headscaleBuildOptions := &dockertest.BuildOptions{
Dockerfile: "Dockerfile.debug",
ContextDir: dockerContextPath,
}
err := s.pool.RemoveContainerByName(hostname)
if err != nil {
return nil, err
}
if pmockoidc, err := s.pool.BuildAndRunWithBuildOptions(
headscaleBuildOptions,
mockOidcOptions,
dockertestutil.DockerRestartPolicy); err == nil {
s.mockOIDC = pmockoidc
} else {
return nil, err
}
log.Println("Waiting for headscale mock oidc to be ready for tests")
hostEndpoint := fmt.Sprintf(
"%s:%s",
s.mockOIDC.GetIPInNetwork(s.network),
s.mockOIDC.GetPort(fmt.Sprintf("%d/tcp", oidcServerPort)),
)
if err := s.pool.Retry(func() error {
oidcConfigURL := fmt.Sprintf("http://%s/oidc/.well-known/openid-configuration", hostEndpoint)
httpClient := &http.Client{}
ctx := context.Background()
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, oidcConfigURL, nil)
resp, err := httpClient.Do(req)
if err != nil {
log.Printf("headscale mock OIDC tests is not ready: %s\n", err)
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return errStatusCodeNotOK
}
return nil
}); err != nil {
return nil, err
}
log.Printf("headscale mock oidc is ready for tests at %s", hostEndpoint)
return &headscale.OIDCConfig{
Issuer: fmt.Sprintf("http://%s/oidc",
net.JoinHostPort(s.mockOIDC.GetIPInNetwork(s.network), strconv.Itoa(oidcServerPort))),
ClientID: "superclient",
ClientSecret: "supersecret",
StripEmaildomain: true,
}, nil
}
func (s *AuthOIDCScenario) runTailscaleUp(
namespaceStr, loginServer string,
) error {
headscale, err := s.Headscale()
if err != nil {
return err
}
log.Printf("running tailscale up for namespace %s", namespaceStr)
if namespace, ok := s.namespaces[namespaceStr]; ok {
for _, client := range namespace.Clients {
namespace.joinWaitGroup.Add(1)
go func(c TailscaleClient) {
defer namespace.joinWaitGroup.Done()
// TODO(juanfont): error handle this
loginURL, err := c.UpWithLoginURL(loginServer)
if err != nil {
log.Printf("failed to run tailscale up: %s", err)
}
loginURL.Host = fmt.Sprintf("%s:8080", headscale.GetIP())
loginURL.Scheme = "http"
insecureTransport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // nolint
}
log.Printf("%s login url: %s\n", c.Hostname(), loginURL.String())
httpClient := &http.Client{Transport: insecureTransport}
ctx := context.Background()
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil)
resp, err := httpClient.Do(req)
if err != nil {
log.Printf("%s failed to get login url %s: %s", c.Hostname(), loginURL, err)
return
}
defer resp.Body.Close()
_, err = io.ReadAll(resp.Body)
if err != nil {
log.Printf("%s failed to read response body: %s", c.Hostname(), err)
return
}
log.Printf("Finished request for %s to join tailnet", c.Hostname())
}(client)
err = client.WaitForReady()
if err != nil {
log.Printf("error waiting for client %s to be ready: %s", client.Hostname(), err)
}
log.Printf("client %s is ready", client.Hostname())
}
namespace.joinWaitGroup.Wait()
return nil
}
return fmt.Errorf("failed to up tailscale node: %w", errNoNamespaceAvailable)
}
func (s *AuthOIDCScenario) Shutdown() error {
err := s.pool.Purge(s.mockOIDC)
if err != nil {
return err
}
return s.Scenario.Shutdown()
}

View File

@@ -1,205 +0,0 @@
package integration
import (
"context"
"errors"
"fmt"
"io"
"log"
"net/http"
"net/url"
"strings"
"testing"
"github.com/juanfont/headscale/integration/hsic"
)
var errParseAuthPage = errors.New("failed to parse auth page")
type AuthWebFlowScenario struct {
*Scenario
}
func TestAuthWebFlowAuthenticationPingAll(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
baseScenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
scenario := AuthWebFlowScenario{
Scenario: baseScenario,
}
spec := map[string]int{
"namespace1": len(TailscaleVersions),
"namespace2": len(TailscaleVersions),
}
err = scenario.CreateHeadscaleEnv(spec, hsic.WithTestName("webauthping"))
if err != nil {
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
allIps, err := scenario.ListTailscaleClientsIPs()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync()
if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
success := 0
for _, client := range allClients {
for _, ip := range allIps {
err := client.Ping(ip.String())
if err != nil {
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
} else {
success++
}
}
}
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}
func (s *AuthWebFlowScenario) CreateHeadscaleEnv(
namespaces map[string]int,
opts ...hsic.Option,
) error {
headscale, err := s.Headscale(opts...)
if err != nil {
return err
}
err = headscale.WaitForReady()
if err != nil {
return err
}
for namespaceName, clientCount := range namespaces {
log.Printf("creating namespace %s with %d clients", namespaceName, clientCount)
err = s.CreateNamespace(namespaceName)
if err != nil {
return err
}
err = s.CreateTailscaleNodesInNamespace(namespaceName, "all", clientCount)
if err != nil {
return err
}
err = s.runTailscaleUp(namespaceName, headscale.GetEndpoint())
if err != nil {
return err
}
}
return nil
}
func (s *AuthWebFlowScenario) runTailscaleUp(
namespaceStr, loginServer string,
) error {
log.Printf("running tailscale up for namespace %s", namespaceStr)
if namespace, ok := s.namespaces[namespaceStr]; ok {
for _, client := range namespace.Clients {
namespace.joinWaitGroup.Add(1)
go func(c TailscaleClient) {
defer namespace.joinWaitGroup.Done()
// TODO(juanfont): error handle this
loginURL, err := c.UpWithLoginURL(loginServer)
if err != nil {
log.Printf("failed to run tailscale up: %s", err)
}
err = s.runHeadscaleRegister(namespaceStr, loginURL)
if err != nil {
log.Printf("failed to register client: %s", err)
}
}(client)
err := client.WaitForReady()
if err != nil {
log.Printf("error waiting for client %s to be ready: %s", client.Hostname(), err)
}
}
namespace.joinWaitGroup.Wait()
return nil
}
return fmt.Errorf("failed to up tailscale node: %w", errNoNamespaceAvailable)
}
func (s *AuthWebFlowScenario) runHeadscaleRegister(namespaceStr string, loginURL *url.URL) error {
headscale, err := s.Headscale()
if err != nil {
return err
}
log.Printf("loginURL: %s", loginURL)
loginURL.Host = fmt.Sprintf("%s:8080", headscale.GetIP())
loginURL.Scheme = "http"
httpClient := &http.Client{}
ctx := context.Background()
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil)
resp, err := httpClient.Do(req)
if err != nil {
return err
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
defer resp.Body.Close()
// see api.go HTML template
codeSep := strings.Split(string(body), "</code>")
if len(codeSep) != 2 {
return errParseAuthPage
}
keySep := strings.Split(codeSep[0], "key ")
if len(keySep) != 2 {
return errParseAuthPage
}
key := keySep[1]
log.Printf("registering node %s", key)
if headscale, err := s.Headscale(); err == nil {
_, err = headscale.Execute(
[]string{"headscale", "-n", namespaceStr, "nodes", "register", "--key", key},
)
if err != nil {
log.Printf("failed to register node: %s", err)
return err
}
return nil
}
return fmt.Errorf("failed to find headscale: %w", errNoHeadscaleAvailable)
}

View File

@@ -1,391 +0,0 @@
package integration
import (
"encoding/json"
"sort"
"testing"
"time"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic"
"github.com/stretchr/testify/assert"
)
func executeAndUnmarshal[T any](headscale ControlServer, command []string, result T) error {
str, err := headscale.Execute(command)
if err != nil {
return err
}
err = json.Unmarshal([]byte(str), result)
if err != nil {
return err
}
return nil
}
func TestNamespaceCommand(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
assert.NoError(t, err)
spec := map[string]int{
"namespace1": 0,
"namespace2": 0,
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clins"))
assert.NoError(t, err)
headscale, err := scenario.Headscale()
assert.NoError(t, err)
var listNamespaces []v1.Namespace
err = executeAndUnmarshal(headscale,
[]string{
"headscale",
"namespaces",
"list",
"--output",
"json",
},
&listNamespaces,
)
assert.NoError(t, err)
result := []string{listNamespaces[0].Name, listNamespaces[1].Name}
sort.Strings(result)
assert.Equal(
t,
[]string{"namespace1", "namespace2"},
result,
)
_, err = headscale.Execute(
[]string{
"headscale",
"namespaces",
"rename",
"--output",
"json",
"namespace2",
"newname",
},
)
assert.NoError(t, err)
var listAfterRenameNamespaces []v1.Namespace
err = executeAndUnmarshal(headscale,
[]string{
"headscale",
"namespaces",
"list",
"--output",
"json",
},
&listAfterRenameNamespaces,
)
assert.NoError(t, err)
result = []string{listAfterRenameNamespaces[0].Name, listAfterRenameNamespaces[1].Name}
sort.Strings(result)
assert.Equal(
t,
[]string{"namespace1", "newname"},
result,
)
err = scenario.Shutdown()
assert.NoError(t, err)
}
func TestPreAuthKeyCommand(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
namespace := "preauthkeyspace"
count := 3
scenario, err := NewScenario()
assert.NoError(t, err)
spec := map[string]int{
namespace: 0,
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipak"))
assert.NoError(t, err)
headscale, err := scenario.Headscale()
assert.NoError(t, err)
keys := make([]*v1.PreAuthKey, count)
assert.NoError(t, err)
for index := 0; index < count; index++ {
var preAuthKey v1.PreAuthKey
err := executeAndUnmarshal(
headscale,
[]string{
"headscale",
"preauthkeys",
"--namespace",
namespace,
"create",
"--reusable",
"--expiration",
"24h",
"--output",
"json",
"--tags",
"tag:test1,tag:test2",
},
&preAuthKey,
)
assert.NoError(t, err)
keys[index] = &preAuthKey
}
assert.Len(t, keys, 3)
var listedPreAuthKeys []v1.PreAuthKey
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"preauthkeys",
"--namespace",
namespace,
"list",
"--output",
"json",
},
&listedPreAuthKeys,
)
assert.NoError(t, err)
// There is one key created by "scenario.CreateHeadscaleEnv"
assert.Len(t, listedPreAuthKeys, 4)
assert.Equal(
t,
[]string{keys[0].Id, keys[1].Id, keys[2].Id},
[]string{listedPreAuthKeys[1].Id, listedPreAuthKeys[2].Id, listedPreAuthKeys[3].Id},
)
assert.NotEmpty(t, listedPreAuthKeys[1].Key)
assert.NotEmpty(t, listedPreAuthKeys[2].Key)
assert.NotEmpty(t, listedPreAuthKeys[3].Key)
assert.True(t, listedPreAuthKeys[1].Expiration.AsTime().After(time.Now()))
assert.True(t, listedPreAuthKeys[2].Expiration.AsTime().After(time.Now()))
assert.True(t, listedPreAuthKeys[3].Expiration.AsTime().After(time.Now()))
assert.True(
t,
listedPreAuthKeys[1].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
)
assert.True(
t,
listedPreAuthKeys[2].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
)
assert.True(
t,
listedPreAuthKeys[3].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
)
for index := range listedPreAuthKeys {
if index == 0 {
continue
}
assert.Equal(t, listedPreAuthKeys[index].AclTags, []string{"tag:test1", "tag:test2"})
}
// Test key expiry
_, err = headscale.Execute(
[]string{
"headscale",
"preauthkeys",
"--namespace",
namespace,
"expire",
listedPreAuthKeys[1].Key,
},
)
assert.NoError(t, err)
var listedPreAuthKeysAfterExpire []v1.PreAuthKey
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"preauthkeys",
"--namespace",
namespace,
"list",
"--output",
"json",
},
&listedPreAuthKeysAfterExpire,
)
assert.NoError(t, err)
assert.True(t, listedPreAuthKeysAfterExpire[1].Expiration.AsTime().Before(time.Now()))
assert.True(t, listedPreAuthKeysAfterExpire[2].Expiration.AsTime().After(time.Now()))
assert.True(t, listedPreAuthKeysAfterExpire[3].Expiration.AsTime().After(time.Now()))
err = scenario.Shutdown()
assert.NoError(t, err)
}
func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
namespace := "pre-auth-key-without-exp-namespace"
scenario, err := NewScenario()
assert.NoError(t, err)
spec := map[string]int{
namespace: 0,
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipaknaexp"))
assert.NoError(t, err)
headscale, err := scenario.Headscale()
assert.NoError(t, err)
var preAuthKey v1.PreAuthKey
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"preauthkeys",
"--namespace",
namespace,
"create",
"--reusable",
"--output",
"json",
},
&preAuthKey,
)
assert.NoError(t, err)
var listedPreAuthKeys []v1.PreAuthKey
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"preauthkeys",
"--namespace",
namespace,
"list",
"--output",
"json",
},
&listedPreAuthKeys,
)
assert.NoError(t, err)
// There is one key created by "scenario.CreateHeadscaleEnv"
assert.Len(t, listedPreAuthKeys, 2)
assert.True(t, listedPreAuthKeys[1].Expiration.AsTime().After(time.Now()))
assert.True(
t,
listedPreAuthKeys[1].Expiration.AsTime().Before(time.Now().Add(time.Minute*70)),
)
err = scenario.Shutdown()
assert.NoError(t, err)
}
func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
namespace := "pre-auth-key-reus-ephm-namespace"
scenario, err := NewScenario()
assert.NoError(t, err)
spec := map[string]int{
namespace: 0,
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clipakresueeph"))
assert.NoError(t, err)
headscale, err := scenario.Headscale()
assert.NoError(t, err)
var preAuthReusableKey v1.PreAuthKey
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"preauthkeys",
"--namespace",
namespace,
"create",
"--reusable=true",
"--output",
"json",
},
&preAuthReusableKey,
)
assert.NoError(t, err)
var preAuthEphemeralKey v1.PreAuthKey
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"preauthkeys",
"--namespace",
namespace,
"create",
"--ephemeral=true",
"--output",
"json",
},
&preAuthEphemeralKey,
)
assert.NoError(t, err)
assert.True(t, preAuthEphemeralKey.GetEphemeral())
assert.False(t, preAuthEphemeralKey.GetReusable())
var listedPreAuthKeys []v1.PreAuthKey
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"preauthkeys",
"--namespace",
namespace,
"list",
"--output",
"json",
},
&listedPreAuthKeys,
)
assert.NoError(t, err)
// There is one key created by "scenario.CreateHeadscaleEnv"
assert.Len(t, listedPreAuthKeys, 3)
err = scenario.Shutdown()
assert.NoError(t, err)
}

View File

@@ -1,19 +0,0 @@
package integration
import (
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
)
type ControlServer interface {
Shutdown() error
Execute(command []string) (string, error)
GetHealthEndpoint() string
GetEndpoint() string
WaitForReady() error
CreateNamespace(namespace string) error
CreateAuthKey(namespace string) (*v1.PreAuthKey, error)
ListMachinesInNamespace(namespace string) ([]*v1.Machine, error)
GetCert() []byte
GetHostname() string
GetIP() string
}

View File

@@ -1,40 +0,0 @@
package dockertestutil
import (
"os"
"github.com/ory/dockertest/v3/docker"
)
func IsRunningInContainer() bool {
if _, err := os.Stat("/.dockerenv"); err != nil {
return false
}
return true
}
func DockerRestartPolicy(config *docker.HostConfig) {
// set AutoRemove to true so that stopped container goes away by itself on error *immediately*.
// when set to false, containers remain until the end of the integration test.
config.AutoRemove = false
config.RestartPolicy = docker.RestartPolicy{
Name: "no",
}
}
func DockerAllowLocalIPv6(config *docker.HostConfig) {
if config.Sysctls == nil {
config.Sysctls = make(map[string]string, 1)
}
config.Sysctls["net.ipv6.conf.all.disable_ipv6"] = "0"
}
func DockerAllowNetworkAdministration(config *docker.HostConfig) {
config.CapAdd = append(config.CapAdd, "NET_ADMIN")
config.Mounts = append(config.Mounts, docker.HostMount{
Type: "bind",
Source: "/dev/net/tun",
Target: "/dev/net/tun",
})
}

View File

@@ -1,94 +0,0 @@
package dockertestutil
import (
"bytes"
"errors"
"fmt"
"time"
"github.com/ory/dockertest/v3"
)
const dockerExecuteTimeout = time.Second * 10
var (
ErrDockertestCommandFailed = errors.New("dockertest command failed")
ErrDockertestCommandTimeout = errors.New("dockertest command timed out")
)
type ExecuteCommandConfig struct {
timeout time.Duration
}
type ExecuteCommandOption func(*ExecuteCommandConfig) error
func ExecuteCommandTimeout(timeout time.Duration) ExecuteCommandOption {
return ExecuteCommandOption(func(conf *ExecuteCommandConfig) error {
conf.timeout = timeout
return nil
})
}
func ExecuteCommand(
resource *dockertest.Resource,
cmd []string,
env []string,
options ...ExecuteCommandOption,
) (string, string, error) {
var stdout bytes.Buffer
var stderr bytes.Buffer
execConfig := ExecuteCommandConfig{
timeout: dockerExecuteTimeout,
}
for _, opt := range options {
if err := opt(&execConfig); err != nil {
return "", "", fmt.Errorf("execute-command/options: %w", err)
}
}
type result struct {
exitCode int
err error
}
resultChan := make(chan result, 1)
// Run your long running function in it's own goroutine and pass back it's
// response into our channel.
go func() {
exitCode, err := resource.Exec(
cmd,
dockertest.ExecOptions{
Env: append(env, "HEADSCALE_LOG_LEVEL=disabled"),
StdOut: &stdout,
StdErr: &stderr,
},
)
resultChan <- result{exitCode, err}
}()
// Listen on our channel AND a timeout channel - which ever happens first.
select {
case res := <-resultChan:
if res.err != nil {
return stdout.String(), stderr.String(), res.err
}
if res.exitCode != 0 {
// Uncomment for debugging
// log.Println("Command: ", cmd)
// log.Println("stdout: ", stdout.String())
// log.Println("stderr: ", stderr.String())
return stdout.String(), stderr.String(), ErrDockertestCommandFailed
}
return stdout.String(), stderr.String(), nil
case <-time.After(execConfig.timeout):
return stdout.String(), stderr.String(), ErrDockertestCommandTimeout
}
}

View File

@@ -1,62 +0,0 @@
package dockertestutil
import (
"errors"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
)
var ErrContainerNotFound = errors.New("container not found")
func GetFirstOrCreateNetwork(pool *dockertest.Pool, name string) (*dockertest.Network, error) {
networks, err := pool.NetworksByName(name)
if err != nil || len(networks) == 0 {
if _, err := pool.CreateNetwork(name); err == nil {
// Create does not give us an updated version of the resource, so we need to
// get it again.
networks, err := pool.NetworksByName(name)
if err != nil {
return nil, err
}
return &networks[0], nil
}
}
return &networks[0], nil
}
func AddContainerToNetwork(
pool *dockertest.Pool,
network *dockertest.Network,
testContainer string,
) error {
containers, err := pool.Client.ListContainers(docker.ListContainersOptions{
All: true,
Filters: map[string][]string{
"name": {testContainer},
},
})
if err != nil {
return err
}
err = pool.Client.ConnectNetwork(network.Network.ID, docker.NetworkConnectionOptions{
Container: containers[0].ID,
})
if err != nil {
return err
}
// TODO(kradalby): This doesnt work reliably, but calling the exact same functions
// seem to work fine...
// if container, ok := pool.ContainerByName("/" + testContainer); ok {
// err := container.ConnectToNetwork(network)
// if err != nil {
// return err
// }
// }
return nil
}

View File

@@ -1,345 +0,0 @@
package integration
import (
"fmt"
"strings"
"testing"
"time"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic"
"github.com/rs/zerolog/log"
)
func TestPingAllByIP(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
spec := map[string]int{
"namespace1": len(TailscaleVersions),
"namespace2": len(TailscaleVersions),
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyip"))
if err != nil {
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
allIps, err := scenario.ListTailscaleClientsIPs()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync()
if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
success := 0
for _, client := range allClients {
for _, ip := range allIps {
err := client.Ping(ip.String())
if err != nil {
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
} else {
success++
}
}
}
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}
func TestPingAllByHostname(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
spec := map[string]int{
// Omit 1.16.2 (-1) because it does not have the FQDN field
"namespace3": len(TailscaleVersions) - 1,
"namespace4": len(TailscaleVersions) - 1,
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyname"))
if err != nil {
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync()
if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
allHostnames, err := scenario.ListTailscaleClientsFQDNs()
if err != nil {
t.Errorf("failed to get FQDNs: %s", err)
}
success := 0
for _, client := range allClients {
for _, hostname := range allHostnames {
err := client.Ping(hostname)
if err != nil {
t.Errorf("failed to ping %s from %s: %s", hostname, client.Hostname(), err)
} else {
success++
}
}
}
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allClients))
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}
func TestTaildrop(t *testing.T) {
IntegrationSkip(t)
retry := func(times int, sleepInverval time.Duration, doWork func() error) error {
var err error
for attempts := 0; attempts < times; attempts++ {
err = doWork()
if err == nil {
return nil
}
time.Sleep(sleepInverval)
}
return err
}
scenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
spec := map[string]int{
// Omit 1.16.2 (-1) because it does not have the FQDN field
"taildrop": len(TailscaleVersions) - 1,
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("taildrop"))
if err != nil {
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync()
if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
// This will essentially fetch and cache all the FQDNs
_, err = scenario.ListTailscaleClientsFQDNs()
if err != nil {
t.Errorf("failed to get FQDNs: %s", err)
}
for _, client := range allClients {
command := []string{"touch", fmt.Sprintf("/tmp/file_from_%s", client.Hostname())}
if _, _, err := client.Execute(command); err != nil {
t.Errorf("failed to create taildrop file on %s, err: %s", client.Hostname(), err)
}
for _, peer := range allClients {
if client.Hostname() == peer.Hostname() {
continue
}
// It is safe to ignore this error as we handled it when caching it
peerFQDN, _ := peer.FQDN()
t.Run(fmt.Sprintf("%s-%s", client.Hostname(), peer.Hostname()), func(t *testing.T) {
command := []string{
"tailscale", "file", "cp",
fmt.Sprintf("/tmp/file_from_%s", client.Hostname()),
fmt.Sprintf("%s:", peerFQDN),
}
err := retry(10, 1*time.Second, func() error {
t.Logf(
"Sending file from %s to %s\n",
client.Hostname(),
peer.Hostname(),
)
_, _, err := client.Execute(command)
return err
})
if err != nil {
t.Errorf(
"failed to send taildrop file on %s, err: %s",
client.Hostname(),
err,
)
}
})
}
}
for _, client := range allClients {
command := []string{
"tailscale", "file",
"get",
"/tmp/",
}
if _, _, err := client.Execute(command); err != nil {
t.Errorf("failed to get taildrop file on %s, err: %s", client.Hostname(), err)
}
for _, peer := range allClients {
if client.Hostname() == peer.Hostname() {
continue
}
t.Run(fmt.Sprintf("%s-%s", client.Hostname(), peer.Hostname()), func(t *testing.T) {
command := []string{
"ls",
fmt.Sprintf("/tmp/file_from_%s", peer.Hostname()),
}
log.Printf(
"Checking file in %s from %s\n",
client.Hostname(),
peer.Hostname(),
)
result, _, err := client.Execute(command)
if err != nil {
t.Errorf("failed to execute command to ls taildrop: %s", err)
}
log.Printf("Result for %s: %s\n", peer.Hostname(), result)
if fmt.Sprintf("/tmp/file_from_%s\n", peer.Hostname()) != result {
t.Errorf(
"taildrop result is not correct %s, wanted %s",
result,
fmt.Sprintf("/tmp/file_from_%s\n", peer.Hostname()),
)
}
})
}
}
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}
func TestResolveMagicDNS(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
spec := map[string]int{
// Omit 1.16.2 (-1) because it does not have the FQDN field
"magicdns1": len(TailscaleVersions) - 1,
"magicdns2": len(TailscaleVersions) - 1,
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("magicdns"))
if err != nil {
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync()
if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
// Poor mans cache
_, err = scenario.ListTailscaleClientsFQDNs()
if err != nil {
t.Errorf("failed to get FQDNs: %s", err)
}
_, err = scenario.ListTailscaleClientsIPs()
if err != nil {
t.Errorf("failed to get IPs: %s", err)
}
for _, client := range allClients {
for _, peer := range allClients {
// It is safe to ignore this error as we handled it when caching it
peerFQDN, _ := peer.FQDN()
command := []string{
"tailscale",
"ip", peerFQDN,
}
result, _, err := client.Execute(command)
if err != nil {
t.Errorf(
"failed to execute resolve/ip command %s from %s: %s",
peerFQDN,
client.Hostname(),
err,
)
}
ips, err := peer.IPs()
if err != nil {
t.Errorf(
"failed to get ips for %s: %s",
peer.Hostname(),
err,
)
}
for _, ip := range ips {
if !strings.Contains(result, ip.String()) {
t.Errorf("ip %s is not found in \n%s\n", ip.String(), result)
}
}
}
}
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}

View File

@@ -1,97 +0,0 @@
package hsic
// const (
// defaultEphemeralNodeInactivityTimeout = time.Second * 30
// defaultNodeUpdateCheckInterval = time.Second * 10
// )
// TODO(kradalby): This approach doesnt work because we cannot
// serialise our config object to YAML or JSON.
// func DefaultConfig() headscale.Config {
// derpMap, _ := url.Parse("https://controlplane.tailscale.com/derpmap/default")
//
// config := headscale.Config{
// Log: headscale.LogConfig{
// Level: zerolog.TraceLevel,
// },
// ACL: headscale.GetACLConfig(),
// DBtype: "sqlite3",
// EphemeralNodeInactivityTimeout: defaultEphemeralNodeInactivityTimeout,
// NodeUpdateCheckInterval: defaultNodeUpdateCheckInterval,
// IPPrefixes: []netip.Prefix{
// netip.MustParsePrefix("fd7a:115c:a1e0::/48"),
// netip.MustParsePrefix("100.64.0.0/10"),
// },
// DNSConfig: &tailcfg.DNSConfig{
// Proxied: true,
// Nameservers: []netip.Addr{
// netip.MustParseAddr("127.0.0.11"),
// netip.MustParseAddr("1.1.1.1"),
// },
// Resolvers: []*dnstype.Resolver{
// {
// Addr: "127.0.0.11",
// },
// {
// Addr: "1.1.1.1",
// },
// },
// },
// BaseDomain: "headscale.net",
//
// DBpath: "/tmp/integration_test_db.sqlite3",
//
// PrivateKeyPath: "/tmp/integration_private.key",
// NoisePrivateKeyPath: "/tmp/noise_integration_private.key",
// Addr: "0.0.0.0:8080",
// MetricsAddr: "127.0.0.1:9090",
// ServerURL: "http://headscale:8080",
//
// DERP: headscale.DERPConfig{
// URLs: []url.URL{
// *derpMap,
// },
// AutoUpdate: false,
// UpdateFrequency: 1 * time.Minute,
// },
// }
//
// return config
// }
// TODO: Reuse the actual configuration object above.
func DefaultConfigYAML() string {
yaml := `
log:
level: trace
acl_policy_path: ""
db_type: sqlite3
db_path: /tmp/integration_test_db.sqlite3
ephemeral_node_inactivity_timeout: 30m
node_update_check_interval: 10s
ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10
dns_config:
base_domain: headscale.net
magic_dns: true
domains: []
nameservers:
- 127.0.0.11
- 1.1.1.1
private_key_path: /tmp/private.key
noise:
private_key_path: /tmp/noise_private.key
listen_addr: 0.0.0.0:8080
metrics_listen_addr: 127.0.0.1:9090
server_url: http://headscale:8080
derp:
urls:
- https://controlplane.tailscale.com/derpmap/default
auto_update_enabled: false
update_frequency: 1m
`
return yaml
}

View File

@@ -1,458 +0,0 @@
package hsic
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"log"
"math/big"
"net"
"net/http"
"time"
"github.com/juanfont/headscale"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/integration/dockertestutil"
"github.com/juanfont/headscale/integration/integrationutil"
"github.com/ory/dockertest/v3"
)
const (
hsicHashLength = 6
dockerContextPath = "../."
aclPolicyPath = "/etc/headscale/acl.hujson"
tlsCertPath = "/etc/headscale/tls.cert"
tlsKeyPath = "/etc/headscale/tls.key"
headscaleDefaultPort = 8080
)
var errHeadscaleStatusCodeNotOk = errors.New("headscale status code not ok")
type HeadscaleInContainer struct {
hostname string
pool *dockertest.Pool
container *dockertest.Resource
network *dockertest.Network
// optional config
port int
aclPolicy *headscale.ACLPolicy
env []string
tlsCert []byte
tlsKey []byte
}
type Option = func(c *HeadscaleInContainer)
func WithACLPolicy(acl *headscale.ACLPolicy) Option {
return func(hsic *HeadscaleInContainer) {
// TODO(kradalby): Move somewhere appropriate
hsic.env = append(hsic.env, fmt.Sprintf("HEADSCALE_ACL_POLICY_PATH=%s", aclPolicyPath))
hsic.aclPolicy = acl
}
}
func WithTLS() Option {
return func(hsic *HeadscaleInContainer) {
cert, key, err := createCertificate()
if err != nil {
log.Fatalf("failed to create certificates for headscale test: %s", err)
}
// TODO(kradalby): Move somewhere appropriate
hsic.env = append(hsic.env, fmt.Sprintf("HEADSCALE_TLS_CERT_PATH=%s", tlsCertPath))
hsic.env = append(hsic.env, fmt.Sprintf("HEADSCALE_TLS_KEY_PATH=%s", tlsKeyPath))
hsic.tlsCert = cert
hsic.tlsKey = key
}
}
func WithConfigEnv(configEnv map[string]string) Option {
return func(hsic *HeadscaleInContainer) {
for key, value := range configEnv {
hsic.env = append(hsic.env, fmt.Sprintf("%s=%s", key, value))
}
}
}
func WithPort(port int) Option {
return func(hsic *HeadscaleInContainer) {
hsic.port = port
}
}
func WithTestName(testName string) Option {
return func(hsic *HeadscaleInContainer) {
hash, _ := headscale.GenerateRandomStringDNSSafe(hsicHashLength)
hostname := fmt.Sprintf("hs-%s-%s", testName, hash)
hsic.hostname = hostname
}
}
func WithHostnameAsServerURL() Option {
return func(hsic *HeadscaleInContainer) {
hsic.env = append(
hsic.env,
fmt.Sprintf("HEADSCALE_SERVER_URL=http://%s:%d",
hsic.GetHostname(),
hsic.port,
))
}
}
func New(
pool *dockertest.Pool,
network *dockertest.Network,
opts ...Option,
) (*HeadscaleInContainer, error) {
hash, err := headscale.GenerateRandomStringDNSSafe(hsicHashLength)
if err != nil {
return nil, err
}
hostname := fmt.Sprintf("hs-%s", hash)
hsic := &HeadscaleInContainer{
hostname: hostname,
port: headscaleDefaultPort,
pool: pool,
network: network,
}
for _, opt := range opts {
opt(hsic)
}
log.Println("NAME: ", hsic.hostname)
portProto := fmt.Sprintf("%d/tcp", hsic.port)
headscaleBuildOptions := &dockertest.BuildOptions{
Dockerfile: "Dockerfile.debug",
ContextDir: dockerContextPath,
}
runOptions := &dockertest.RunOptions{
Name: hsic.hostname,
ExposedPorts: []string{portProto},
Networks: []*dockertest.Network{network},
// Cmd: []string{"headscale", "serve"},
// TODO(kradalby): Get rid of this hack, we currently need to give us some
// to inject the headscale configuration further down.
Entrypoint: []string{"/bin/bash", "-c", "/bin/sleep 3 ; headscale serve"},
Env: hsic.env,
}
// dockertest isnt very good at handling containers that has already
// been created, this is an attempt to make sure this container isnt
// present.
err = pool.RemoveContainerByName(hsic.hostname)
if err != nil {
return nil, err
}
container, err := pool.BuildAndRunWithBuildOptions(
headscaleBuildOptions,
runOptions,
dockertestutil.DockerRestartPolicy,
dockertestutil.DockerAllowLocalIPv6,
dockertestutil.DockerAllowNetworkAdministration,
)
if err != nil {
return nil, fmt.Errorf("could not start headscale container: %w", err)
}
log.Printf("Created %s container\n", hsic.hostname)
hsic.container = container
err = hsic.WriteFile("/etc/headscale/config.yaml", []byte(DefaultConfigYAML()))
if err != nil {
return nil, fmt.Errorf("failed to write headscale config to container: %w", err)
}
if hsic.aclPolicy != nil {
data, err := json.Marshal(hsic.aclPolicy)
if err != nil {
return nil, fmt.Errorf("failed to marshal ACL Policy to JSON: %w", err)
}
err = hsic.WriteFile(aclPolicyPath, data)
if err != nil {
return nil, fmt.Errorf("failed to write ACL policy to container: %w", err)
}
}
if hsic.hasTLS() {
err = hsic.WriteFile(tlsCertPath, hsic.tlsCert)
if err != nil {
return nil, fmt.Errorf("failed to write TLS certificate to container: %w", err)
}
err = hsic.WriteFile(tlsKeyPath, hsic.tlsKey)
if err != nil {
return nil, fmt.Errorf("failed to write TLS key to container: %w", err)
}
}
return hsic, nil
}
func (t *HeadscaleInContainer) hasTLS() bool {
return len(t.tlsCert) != 0 && len(t.tlsKey) != 0
}
func (t *HeadscaleInContainer) Shutdown() error {
return t.pool.Purge(t.container)
}
func (t *HeadscaleInContainer) Execute(
command []string,
) (string, error) {
stdout, stderr, err := dockertestutil.ExecuteCommand(
t.container,
command,
[]string{},
)
if err != nil {
log.Printf("command stderr: %s\n", stderr)
if stdout != "" {
log.Printf("command stdout: %s\n", stdout)
}
return "", err
}
return stdout, nil
}
func (t *HeadscaleInContainer) GetIP() string {
return t.container.GetIPInNetwork(t.network)
}
func (t *HeadscaleInContainer) GetPort() string {
return fmt.Sprintf("%d", t.port)
}
func (t *HeadscaleInContainer) GetHealthEndpoint() string {
return fmt.Sprintf("%s/health", t.GetEndpoint())
}
func (t *HeadscaleInContainer) GetEndpoint() string {
hostEndpoint := fmt.Sprintf("%s:%d",
t.GetIP(),
t.port)
if t.hasTLS() {
return fmt.Sprintf("https://%s", hostEndpoint)
}
return fmt.Sprintf("http://%s", hostEndpoint)
}
func (t *HeadscaleInContainer) GetCert() []byte {
return t.tlsCert
}
func (t *HeadscaleInContainer) GetHostname() string {
return t.hostname
}
func (t *HeadscaleInContainer) WaitForReady() error {
url := t.GetHealthEndpoint()
log.Printf("waiting for headscale to be ready at %s", url)
client := &http.Client{}
if t.hasTLS() {
insecureTransport := http.DefaultTransport.(*http.Transport).Clone() //nolint
insecureTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} //nolint
client = &http.Client{Transport: insecureTransport}
}
return t.pool.Retry(func() error {
resp, err := client.Get(url) //nolint
if err != nil {
return fmt.Errorf("headscale is not ready: %w", err)
}
if resp.StatusCode != http.StatusOK {
return errHeadscaleStatusCodeNotOk
}
return nil
})
}
func (t *HeadscaleInContainer) CreateNamespace(
namespace string,
) error {
command := []string{"headscale", "namespaces", "create", namespace}
_, _, err := dockertestutil.ExecuteCommand(
t.container,
command,
[]string{},
)
if err != nil {
return err
}
return nil
}
func (t *HeadscaleInContainer) CreateAuthKey(
namespace string,
) (*v1.PreAuthKey, error) {
command := []string{
"headscale",
"--namespace",
namespace,
"preauthkeys",
"create",
"--reusable",
"--expiration",
"24h",
"--output",
"json",
}
result, _, err := dockertestutil.ExecuteCommand(
t.container,
command,
[]string{},
)
if err != nil {
return nil, fmt.Errorf("failed to execute create auth key command: %w", err)
}
var preAuthKey v1.PreAuthKey
err = json.Unmarshal([]byte(result), &preAuthKey)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal auth key: %w", err)
}
return &preAuthKey, nil
}
func (t *HeadscaleInContainer) ListMachinesInNamespace(
namespace string,
) ([]*v1.Machine, error) {
command := []string{"headscale", "--namespace", namespace, "nodes", "list", "--output", "json"}
result, _, err := dockertestutil.ExecuteCommand(
t.container,
command,
[]string{},
)
if err != nil {
return nil, fmt.Errorf("failed to execute list node command: %w", err)
}
var nodes []*v1.Machine
err = json.Unmarshal([]byte(result), &nodes)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal nodes: %w", err)
}
return nodes, nil
}
func (t *HeadscaleInContainer) WriteFile(path string, data []byte) error {
return integrationutil.WriteFileToContainer(t.pool, t.container, path, data)
}
// nolint
func createCertificate() ([]byte, []byte, error) {
// From:
// https://shaneutt.com/blog/golang-ca-and-signed-cert-go/
ca := &x509.Certificate{
SerialNumber: big.NewInt(2019),
Subject: pkix.Name{
Organization: []string{"Headscale testing INC"},
Country: []string{"NL"},
Locality: []string{"Leiden"},
},
NotBefore: time.Now(),
NotAfter: time.Now().Add(30 * time.Minute),
IsCA: true,
ExtKeyUsage: []x509.ExtKeyUsage{
x509.ExtKeyUsageClientAuth,
x509.ExtKeyUsageServerAuth,
},
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
}
caPrivKey, err := rsa.GenerateKey(rand.Reader, 4096)
if err != nil {
return nil, nil, err
}
cert := &x509.Certificate{
SerialNumber: big.NewInt(1658),
Subject: pkix.Name{
Organization: []string{"Headscale testing INC"},
Country: []string{"NL"},
Locality: []string{"Leiden"},
},
IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1), net.IPv6loopback},
NotBefore: time.Now(),
NotAfter: time.Now().Add(30 * time.Minute),
SubjectKeyId: []byte{1, 2, 3, 4, 6},
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
KeyUsage: x509.KeyUsageDigitalSignature,
}
certPrivKey, err := rsa.GenerateKey(rand.Reader, 4096)
if err != nil {
return nil, nil, err
}
certBytes, err := x509.CreateCertificate(
rand.Reader,
cert,
ca,
&certPrivKey.PublicKey,
caPrivKey,
)
if err != nil {
return nil, nil, err
}
certPEM := new(bytes.Buffer)
err = pem.Encode(certPEM, &pem.Block{
Type: "CERTIFICATE",
Bytes: certBytes,
})
if err != nil {
return nil, nil, err
}
certPrivKeyPEM := new(bytes.Buffer)
err = pem.Encode(certPrivKeyPEM, &pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(certPrivKey),
})
if err != nil {
return nil, nil, err
}
return certPEM.Bytes(), certPrivKeyPEM.Bytes(), nil
}

View File

@@ -1,74 +0,0 @@
package integrationutil
import (
"archive/tar"
"bytes"
"fmt"
"io"
"path/filepath"
"github.com/juanfont/headscale/integration/dockertestutil"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
)
func WriteFileToContainer(
pool *dockertest.Pool,
container *dockertest.Resource,
path string,
data []byte,
) error {
dirPath, fileName := filepath.Split(path)
file := bytes.NewReader(data)
buf := bytes.NewBuffer([]byte{})
tarWriter := tar.NewWriter(buf)
header := &tar.Header{
Name: fileName,
Size: file.Size(),
// Mode: int64(stat.Mode()),
// ModTime: stat.ModTime(),
}
err := tarWriter.WriteHeader(header)
if err != nil {
return fmt.Errorf("failed write file header to tar: %w", err)
}
_, err = io.Copy(tarWriter, file)
if err != nil {
return fmt.Errorf("failed to copy file to tar: %w", err)
}
err = tarWriter.Close()
if err != nil {
return fmt.Errorf("failed to close tar: %w", err)
}
// Ensure the directory is present inside the container
_, _, err = dockertestutil.ExecuteCommand(
container,
[]string{"mkdir", "-p", dirPath},
[]string{},
)
if err != nil {
return fmt.Errorf("failed to ensure directory: %w", err)
}
err = pool.Client.UploadToContainer(
container.Container.ID,
docker.UploadToContainerOptions{
NoOverwriteDirNonDir: false,
Path: dirPath,
InputStream: bytes.NewReader(buf.Bytes()),
},
)
if err != nil {
return err
}
return nil
}

View File

@@ -1,470 +0,0 @@
package integration
import (
"errors"
"fmt"
"log"
"net/netip"
"os"
"sync"
"time"
"github.com/juanfont/headscale"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/integration/dockertestutil"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic"
"github.com/ory/dockertest/v3"
"github.com/puzpuzpuz/xsync/v2"
)
const (
scenarioHashLength = 6
maxWait = 60 * time.Second
)
var (
errNoHeadscaleAvailable = errors.New("no headscale available")
errNoNamespaceAvailable = errors.New("no namespace available")
// Tailscale started adding TS2021 support in CapabilityVersion>=28 (v1.24.0), but
// proper support in Headscale was only added for CapabilityVersion>=39 clients (v1.30.0).
tailscaleVersions2021 = []string{
"head",
"unstable",
"1.32.1",
"1.30.2",
}
tailscaleVersions2019 = []string{
"1.28.0",
"1.26.2",
"1.24.2",
"1.22.2",
"1.20.4",
"1.18.2",
"1.16.2",
}
// tailscaleVersionsUnavailable = []string{
// // These versions seem to fail when fetching from apt.
// "1.14.6",
// "1.12.4",
// "1.10.2",
// "1.8.7",
// }.
TailscaleVersions = append(
tailscaleVersions2021,
tailscaleVersions2019...,
)
)
type Namespace struct {
Clients map[string]TailscaleClient
createWaitGroup sync.WaitGroup
joinWaitGroup sync.WaitGroup
syncWaitGroup sync.WaitGroup
}
// TODO(kradalby): make control server configurable, test correctness with Tailscale SaaS.
type Scenario struct {
// TODO(kradalby): support multiple headcales for later, currently only
// use one.
controlServers *xsync.MapOf[string, ControlServer]
namespaces map[string]*Namespace
pool *dockertest.Pool
network *dockertest.Network
headscaleLock sync.Mutex
}
func NewScenario() (*Scenario, error) {
hash, err := headscale.GenerateRandomStringDNSSafe(scenarioHashLength)
if err != nil {
return nil, err
}
pool, err := dockertest.NewPool("")
if err != nil {
return nil, fmt.Errorf("could not connect to docker: %w", err)
}
pool.MaxWait = maxWait
networkName := fmt.Sprintf("hs-%s", hash)
if overrideNetworkName := os.Getenv("HEADSCALE_TEST_NETWORK_NAME"); overrideNetworkName != "" {
networkName = overrideNetworkName
}
network, err := dockertestutil.GetFirstOrCreateNetwork(pool, networkName)
if err != nil {
return nil, fmt.Errorf("failed to create or get network: %w", err)
}
// We run the test suite in a docker container that calls a couple of endpoints for
// readiness checks, this ensures that we can run the tests with individual networks
// and have the client reach the different containers
err = dockertestutil.AddContainerToNetwork(pool, network, "headscale-test-suite")
if err != nil {
return nil, fmt.Errorf("failed to add test suite container to network: %w", err)
}
return &Scenario{
controlServers: xsync.NewMapOf[ControlServer](),
namespaces: make(map[string]*Namespace),
pool: pool,
network: network,
}, nil
}
func (s *Scenario) Shutdown() error {
s.controlServers.Range(func(_ string, control ControlServer) bool {
err := control.Shutdown()
if err != nil {
log.Printf(
"Failed to shut down control: %s",
fmt.Errorf("failed to tear down control: %w", err),
)
}
return true
})
for namespaceName, namespace := range s.namespaces {
for _, client := range namespace.Clients {
log.Printf("removing client %s in namespace %s", client.Hostname(), namespaceName)
err := client.Shutdown()
if err != nil {
return fmt.Errorf("failed to tear down client: %w", err)
}
}
}
if err := s.pool.RemoveNetwork(s.network); err != nil {
return fmt.Errorf("failed to remove network: %w", err)
}
// TODO(kradalby): This seem redundant to the previous call
// if err := s.network.Close(); err != nil {
// return fmt.Errorf("failed to tear down network: %w", err)
// }
return nil
}
func (s *Scenario) Namespaces() []string {
namespaces := make([]string, 0)
for namespace := range s.namespaces {
namespaces = append(namespaces, namespace)
}
return namespaces
}
/// Headscale related stuff
// Note: These functions assume that there is a _single_ headscale instance for now
// TODO(kradalby): make port and headscale configurable, multiple instances support?
func (s *Scenario) Headscale(opts ...hsic.Option) (ControlServer, error) {
s.headscaleLock.Lock()
defer s.headscaleLock.Unlock()
if headscale, ok := s.controlServers.Load("headscale"); ok {
return headscale, nil
}
headscale, err := hsic.New(s.pool, s.network, opts...)
if err != nil {
return nil, fmt.Errorf("failed to create headscale container: %w", err)
}
err = headscale.WaitForReady()
if err != nil {
return nil, fmt.Errorf("failed reach headscale container: %w", err)
}
s.controlServers.Store("headscale", headscale)
return headscale, nil
}
func (s *Scenario) CreatePreAuthKey(namespace string) (*v1.PreAuthKey, error) {
if headscale, err := s.Headscale(); err == nil {
key, err := headscale.CreateAuthKey(namespace)
if err != nil {
return nil, fmt.Errorf("failed to create namespace: %w", err)
}
return key, nil
}
return nil, fmt.Errorf("failed to create namespace: %w", errNoHeadscaleAvailable)
}
func (s *Scenario) CreateNamespace(namespace string) error {
if headscale, err := s.Headscale(); err == nil {
err := headscale.CreateNamespace(namespace)
if err != nil {
return fmt.Errorf("failed to create namespace: %w", err)
}
s.namespaces[namespace] = &Namespace{
Clients: make(map[string]TailscaleClient),
}
return nil
}
return fmt.Errorf("failed to create namespace: %w", errNoHeadscaleAvailable)
}
/// Client related stuff
func (s *Scenario) CreateTailscaleNodesInNamespace(
namespaceStr string,
requestedVersion string,
count int,
opts ...tsic.Option,
) error {
if namespace, ok := s.namespaces[namespaceStr]; ok {
for i := 0; i < count; i++ {
version := requestedVersion
if requestedVersion == "all" {
version = TailscaleVersions[i%len(TailscaleVersions)]
}
headscale, err := s.Headscale()
if err != nil {
return fmt.Errorf("failed to create tailscale node: %w", err)
}
cert := headscale.GetCert()
hostname := headscale.GetHostname()
namespace.createWaitGroup.Add(1)
opts = append(opts,
tsic.WithHeadscaleTLS(cert),
tsic.WithHeadscaleName(hostname),
)
go func() {
defer namespace.createWaitGroup.Done()
// TODO(kradalby): error handle this
tsClient, err := tsic.New(
s.pool,
version,
s.network,
opts...,
)
if err != nil {
// return fmt.Errorf("failed to add tailscale node: %w", err)
log.Printf("failed to create tailscale node: %s", err)
}
err = tsClient.WaitForReady()
if err != nil {
// return fmt.Errorf("failed to add tailscale node: %w", err)
log.Printf("failed to wait for tailscaled: %s", err)
}
namespace.Clients[tsClient.Hostname()] = tsClient
}()
}
namespace.createWaitGroup.Wait()
return nil
}
return fmt.Errorf("failed to add tailscale node: %w", errNoNamespaceAvailable)
}
func (s *Scenario) RunTailscaleUp(
namespaceStr, loginServer, authKey string,
) error {
if namespace, ok := s.namespaces[namespaceStr]; ok {
for _, client := range namespace.Clients {
namespace.joinWaitGroup.Add(1)
go func(c TailscaleClient) {
defer namespace.joinWaitGroup.Done()
// TODO(kradalby): error handle this
_ = c.Up(loginServer, authKey)
}(client)
err := client.WaitForReady()
if err != nil {
log.Printf("error waiting for client %s to be ready: %s", client.Hostname(), err)
}
}
namespace.joinWaitGroup.Wait()
return nil
}
return fmt.Errorf("failed to up tailscale node: %w", errNoNamespaceAvailable)
}
func (s *Scenario) CountTailscale() int {
count := 0
for _, namespace := range s.namespaces {
count += len(namespace.Clients)
}
return count
}
func (s *Scenario) WaitForTailscaleSync() error {
tsCount := s.CountTailscale()
for _, namespace := range s.namespaces {
for _, client := range namespace.Clients {
namespace.syncWaitGroup.Add(1)
go func(c TailscaleClient) {
defer namespace.syncWaitGroup.Done()
// TODO(kradalby): error handle this
_ = c.WaitForPeers(tsCount)
}(client)
}
namespace.syncWaitGroup.Wait()
}
return nil
}
// CreateHeadscaleEnv is a conventient method returning a set up Headcale
// test environment with nodes of all versions, joined to the server with X
// namespaces.
func (s *Scenario) CreateHeadscaleEnv(
namespaces map[string]int,
tsOpts []tsic.Option,
opts ...hsic.Option,
) error {
headscale, err := s.Headscale(opts...)
if err != nil {
return err
}
for namespaceName, clientCount := range namespaces {
err = s.CreateNamespace(namespaceName)
if err != nil {
return err
}
err = s.CreateTailscaleNodesInNamespace(namespaceName, "all", clientCount, tsOpts...)
if err != nil {
return err
}
key, err := s.CreatePreAuthKey(namespaceName)
if err != nil {
return err
}
err = s.RunTailscaleUp(namespaceName, headscale.GetEndpoint(), key.GetKey())
if err != nil {
return err
}
}
return nil
}
func (s *Scenario) GetIPs(namespace string) ([]netip.Addr, error) {
var ips []netip.Addr
if ns, ok := s.namespaces[namespace]; ok {
for _, client := range ns.Clients {
clientIps, err := client.IPs()
if err != nil {
return ips, fmt.Errorf("failed to get ips: %w", err)
}
ips = append(ips, clientIps...)
}
return ips, nil
}
return ips, fmt.Errorf("failed to get ips: %w", errNoNamespaceAvailable)
}
func (s *Scenario) GetClients(namespace string) ([]TailscaleClient, error) {
var clients []TailscaleClient
if ns, ok := s.namespaces[namespace]; ok {
for _, client := range ns.Clients {
clients = append(clients, client)
}
return clients, nil
}
return clients, fmt.Errorf("failed to get clients: %w", errNoNamespaceAvailable)
}
func (s *Scenario) ListTailscaleClients(namespaces ...string) ([]TailscaleClient, error) {
var allClients []TailscaleClient
if len(namespaces) == 0 {
namespaces = s.Namespaces()
}
for _, namespace := range namespaces {
clients, err := s.GetClients(namespace)
if err != nil {
return nil, err
}
allClients = append(allClients, clients...)
}
return allClients, nil
}
func (s *Scenario) ListTailscaleClientsIPs(namespaces ...string) ([]netip.Addr, error) {
var allIps []netip.Addr
if len(namespaces) == 0 {
namespaces = s.Namespaces()
}
for _, namespace := range namespaces {
ips, err := s.GetIPs(namespace)
if err != nil {
return nil, err
}
allIps = append(allIps, ips...)
}
return allIps, nil
}
func (s *Scenario) ListTailscaleClientsFQDNs(namespaces ...string) ([]string, error) {
allFQDNs := make([]string, 0)
clients, err := s.ListTailscaleClients(namespaces...)
if err != nil {
return nil, err
}
for _, client := range clients {
fqdn, err := client.FQDN()
if err != nil {
return nil, err
}
allFQDNs = append(allFQDNs, fqdn)
}
return allFQDNs, nil
}

View File

@@ -1,189 +0,0 @@
package integration
import (
"testing"
"github.com/juanfont/headscale/integration/dockertestutil"
)
// This file is intended to "test the test framework", by proxy it will also test
// some Headcsale/Tailscale stuff, but mostly in very simple ways.
func IntegrationSkip(t *testing.T) {
t.Helper()
if !dockertestutil.IsRunningInContainer() {
t.Skip("not running in docker, skipping")
}
if testing.Short() {
t.Skip("skipping integration tests due to short flag")
}
}
func TestHeadscale(t *testing.T) {
IntegrationSkip(t)
var err error
namespace := "test-space"
scenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
t.Run("start-headscale", func(t *testing.T) {
headscale, err := scenario.Headscale()
if err != nil {
t.Errorf("failed to create start headcale: %s", err)
}
err = headscale.WaitForReady()
if err != nil {
t.Errorf("headscale failed to become ready: %s", err)
}
})
t.Run("create-namespace", func(t *testing.T) {
err := scenario.CreateNamespace(namespace)
if err != nil {
t.Errorf("failed to create namespace: %s", err)
}
if _, ok := scenario.namespaces[namespace]; !ok {
t.Errorf("namespace is not in scenario")
}
})
t.Run("create-auth-key", func(t *testing.T) {
_, err := scenario.CreatePreAuthKey(namespace)
if err != nil {
t.Errorf("failed to create preauthkey: %s", err)
}
})
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}
func TestCreateTailscale(t *testing.T) {
IntegrationSkip(t)
namespace := "only-create-containers"
scenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
scenario.namespaces[namespace] = &Namespace{
Clients: make(map[string]TailscaleClient),
}
t.Run("create-tailscale", func(t *testing.T) {
err := scenario.CreateTailscaleNodesInNamespace(namespace, "all", 3)
if err != nil {
t.Errorf("failed to add tailscale nodes: %s", err)
}
if clients := len(scenario.namespaces[namespace].Clients); clients != 3 {
t.Errorf("wrong number of tailscale clients: %d != %d", clients, 3)
}
// TODO(kradalby): Test "all" version logic
})
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}
func TestTailscaleNodesJoiningHeadcale(t *testing.T) {
IntegrationSkip(t)
var err error
namespace := "join-node-test"
count := 1
scenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
t.Run("start-headscale", func(t *testing.T) {
headscale, err := scenario.Headscale()
if err != nil {
t.Errorf("failed to create start headcale: %s", err)
}
err = headscale.WaitForReady()
if err != nil {
t.Errorf("headscale failed to become ready: %s", err)
}
})
t.Run("create-namespace", func(t *testing.T) {
err := scenario.CreateNamespace(namespace)
if err != nil {
t.Errorf("failed to create namespace: %s", err)
}
if _, ok := scenario.namespaces[namespace]; !ok {
t.Errorf("namespace is not in scenario")
}
})
t.Run("create-tailscale", func(t *testing.T) {
err := scenario.CreateTailscaleNodesInNamespace(namespace, "1.30.2", count)
if err != nil {
t.Errorf("failed to add tailscale nodes: %s", err)
}
if clients := len(scenario.namespaces[namespace].Clients); clients != count {
t.Errorf("wrong number of tailscale clients: %d != %d", clients, count)
}
})
t.Run("join-headscale", func(t *testing.T) {
key, err := scenario.CreatePreAuthKey(namespace)
if err != nil {
t.Errorf("failed to create preauthkey: %s", err)
}
headscale, err := scenario.Headscale()
if err != nil {
t.Errorf("failed to create start headcale: %s", err)
}
err = scenario.RunTailscaleUp(
namespace,
headscale.GetEndpoint(),
key.GetKey(),
)
if err != nil {
t.Errorf("failed to login: %s", err)
}
})
t.Run("get-ips", func(t *testing.T) {
ips, err := scenario.GetIPs(namespace)
if err != nil {
t.Errorf("failed to get tailscale ips: %s", err)
}
if len(ips) != count*2 {
t.Errorf("got the wrong amount of tailscale ips, %d != %d", len(ips), count*2)
}
})
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}

View File

@@ -1,519 +0,0 @@
package integration
import (
"fmt"
"strings"
"testing"
"time"
"github.com/juanfont/headscale"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic"
"github.com/stretchr/testify/assert"
)
var retry = func(times int, sleepInterval time.Duration,
doWork func() (string, string, error),
) (string, string, error) {
var result string
var stderr string
var err error
for attempts := 0; attempts < times; attempts++ {
tempResult, tempStderr, err := doWork()
result += tempResult
stderr += tempStderr
if err == nil {
return result, stderr, nil
}
// If we get a permission denied error, we can fail immediately
// since that is something we wont recover from by retrying.
if err != nil && strings.Contains(stderr, "Permission denied (tailscale)") {
return result, stderr, err
}
time.Sleep(sleepInterval)
}
return result, stderr, err
}
func TestSSHOneNamespaceAllToAll(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
spec := map[string]int{
"namespace1": len(TailscaleVersions) - 5,
}
err = scenario.CreateHeadscaleEnv(spec,
[]tsic.Option{tsic.WithSSH()},
hsic.WithACLPolicy(
&headscale.ACLPolicy{
Groups: map[string][]string{
"group:integration-test": {"namespace1"},
},
ACLs: []headscale.ACL{
{
Action: "accept",
Sources: []string{"*"},
Destinations: []string{"*:*"},
},
},
SSHs: []headscale.SSH{
{
Action: "accept",
Sources: []string{"group:integration-test"},
Destinations: []string{"group:integration-test"},
Users: []string{"ssh-it-user"},
},
},
},
),
hsic.WithConfigEnv(map[string]string{
"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1",
}),
)
if err != nil {
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync()
if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
_, err = scenario.ListTailscaleClientsFQDNs()
if err != nil {
t.Errorf("failed to get FQDNs: %s", err)
}
for _, client := range allClients {
for _, peer := range allClients {
if client.Hostname() == peer.Hostname() {
continue
}
assertSSHHostname(t, client, peer)
}
}
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}
func TestSSHMultipleNamespacesAllToAll(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
spec := map[string]int{
"namespace1": len(TailscaleVersions) - 5,
"namespace2": len(TailscaleVersions) - 5,
}
err = scenario.CreateHeadscaleEnv(spec,
[]tsic.Option{tsic.WithSSH()},
hsic.WithACLPolicy(
&headscale.ACLPolicy{
Groups: map[string][]string{
"group:integration-test": {"namespace1", "namespace2"},
},
ACLs: []headscale.ACL{
{
Action: "accept",
Sources: []string{"*"},
Destinations: []string{"*:*"},
},
},
SSHs: []headscale.SSH{
{
Action: "accept",
Sources: []string{"group:integration-test"},
Destinations: []string{"group:integration-test"},
Users: []string{"ssh-it-user"},
},
},
},
),
hsic.WithConfigEnv(map[string]string{
"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1",
}),
)
if err != nil {
t.Errorf("failed to create headscale environment: %s", err)
}
nsOneClients, err := scenario.ListTailscaleClients("namespace1")
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
nsTwoClients, err := scenario.ListTailscaleClients("namespace2")
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync()
if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
_, err = scenario.ListTailscaleClientsFQDNs()
if err != nil {
t.Errorf("failed to get FQDNs: %s", err)
}
testInterNamespaceSSH := func(sourceClients []TailscaleClient, targetClients []TailscaleClient) {
for _, client := range sourceClients {
for _, peer := range targetClients {
assertSSHHostname(t, client, peer)
}
}
}
testInterNamespaceSSH(nsOneClients, nsTwoClients)
testInterNamespaceSSH(nsTwoClients, nsOneClients)
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}
func TestSSHNoSSHConfigured(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
spec := map[string]int{
"namespace1": len(TailscaleVersions) - 5,
}
err = scenario.CreateHeadscaleEnv(spec,
[]tsic.Option{tsic.WithSSH()},
hsic.WithACLPolicy(
&headscale.ACLPolicy{
Groups: map[string][]string{
"group:integration-test": {"namespace1"},
},
ACLs: []headscale.ACL{
{
Action: "accept",
Sources: []string{"*"},
Destinations: []string{"*:*"},
},
},
SSHs: []headscale.SSH{},
},
),
hsic.WithTestName("sshnoneconfigured"),
hsic.WithConfigEnv(map[string]string{
"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1",
}),
)
if err != nil {
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync()
if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
_, err = scenario.ListTailscaleClientsFQDNs()
if err != nil {
t.Errorf("failed to get FQDNs: %s", err)
}
for _, client := range allClients {
for _, peer := range allClients {
if client.Hostname() == peer.Hostname() {
continue
}
assertSSHPermissionDenied(t, client, peer)
}
}
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}
func TestSSHIsBlockedInACL(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
spec := map[string]int{
"namespace1": len(TailscaleVersions) - 5,
}
err = scenario.CreateHeadscaleEnv(spec,
[]tsic.Option{tsic.WithSSH()},
hsic.WithACLPolicy(
&headscale.ACLPolicy{
Groups: map[string][]string{
"group:integration-test": {"namespace1"},
},
ACLs: []headscale.ACL{
{
Action: "accept",
Sources: []string{"*"},
Destinations: []string{"*:80"},
},
},
SSHs: []headscale.SSH{
{
Action: "accept",
Sources: []string{"group:integration-test"},
Destinations: []string{"group:integration-test"},
Users: []string{"ssh-it-user"},
},
},
},
),
hsic.WithTestName("sshisblockedinacl"),
hsic.WithConfigEnv(map[string]string{
"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1",
}),
)
if err != nil {
t.Errorf("failed to create headscale environment: %s", err)
}
allClients, err := scenario.ListTailscaleClients()
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync()
if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
_, err = scenario.ListTailscaleClientsFQDNs()
if err != nil {
t.Errorf("failed to get FQDNs: %s", err)
}
for _, client := range allClients {
for _, peer := range allClients {
if client.Hostname() == peer.Hostname() {
continue
}
assertSSHTimeout(t, client, peer)
}
}
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}
func TestSSNamespaceOnlyIsolation(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
if err != nil {
t.Errorf("failed to create scenario: %s", err)
}
spec := map[string]int{
"namespaceacl1": len(TailscaleVersions) - 5,
"namespaceacl2": len(TailscaleVersions) - 5,
}
err = scenario.CreateHeadscaleEnv(spec,
[]tsic.Option{tsic.WithSSH()},
hsic.WithACLPolicy(
&headscale.ACLPolicy{
Groups: map[string][]string{
"group:ssh1": {"namespaceacl1"},
"group:ssh2": {"namespaceacl2"},
},
ACLs: []headscale.ACL{
{
Action: "accept",
Sources: []string{"*"},
Destinations: []string{"*:*"},
},
},
SSHs: []headscale.SSH{
{
Action: "accept",
Sources: []string{"group:ssh1"},
Destinations: []string{"group:ssh1"},
Users: []string{"ssh-it-user"},
},
{
Action: "accept",
Sources: []string{"group:ssh2"},
Destinations: []string{"group:ssh2"},
Users: []string{"ssh-it-user"},
},
},
},
),
hsic.WithTestName("sshtwonamespaceaclblock"),
hsic.WithConfigEnv(map[string]string{
"HEADSCALE_EXPERIMENTAL_FEATURE_SSH": "1",
}),
)
if err != nil {
t.Errorf("failed to create headscale environment: %s", err)
}
ssh1Clients, err := scenario.ListTailscaleClients("namespaceacl1")
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
ssh2Clients, err := scenario.ListTailscaleClients("namespaceacl2")
if err != nil {
t.Errorf("failed to get clients: %s", err)
}
err = scenario.WaitForTailscaleSync()
if err != nil {
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
}
_, err = scenario.ListTailscaleClientsFQDNs()
if err != nil {
t.Errorf("failed to get FQDNs: %s", err)
}
// TODO(kradalby,evenh): ACLs do currently not cover reject
// cases properly, and currently will accept all incomming connections
// as long as a rule is present.
//
// for _, client := range ssh1Clients {
// for _, peer := range ssh2Clients {
// if client.Hostname() == peer.Hostname() {
// continue
// }
//
// assertSSHPermissionDenied(t, client, peer)
// }
// }
//
// for _, client := range ssh2Clients {
// for _, peer := range ssh1Clients {
// if client.Hostname() == peer.Hostname() {
// continue
// }
//
// assertSSHPermissionDenied(t, client, peer)
// }
// }
for _, client := range ssh1Clients {
for _, peer := range ssh1Clients {
if client.Hostname() == peer.Hostname() {
continue
}
assertSSHHostname(t, client, peer)
}
}
for _, client := range ssh2Clients {
for _, peer := range ssh2Clients {
if client.Hostname() == peer.Hostname() {
continue
}
assertSSHHostname(t, client, peer)
}
}
err = scenario.Shutdown()
if err != nil {
t.Errorf("failed to tear down scenario: %s", err)
}
}
func doSSH(t *testing.T, client TailscaleClient, peer TailscaleClient) (string, string, error) {
t.Helper()
peerFQDN, _ := peer.FQDN()
command := []string{
"ssh", "-o StrictHostKeyChecking=no", "-o ConnectTimeout=1",
fmt.Sprintf("%s@%s", "ssh-it-user", peerFQDN),
"'hostname'",
}
return retry(10, 1*time.Second, func() (string, string, error) {
return client.Execute(command)
})
}
func assertSSHHostname(t *testing.T, client TailscaleClient, peer TailscaleClient) {
t.Helper()
result, _, err := doSSH(t, client, peer)
assert.NoError(t, err)
assert.Contains(t, peer.ID(), strings.ReplaceAll(result, "\n", ""))
}
func assertSSHPermissionDenied(t *testing.T, client TailscaleClient, peer TailscaleClient) {
t.Helper()
result, stderr, err := doSSH(t, client, peer)
assert.Error(t, err)
assert.Empty(t, result)
assert.Contains(t, stderr, "Permission denied (tailscale)")
}
func assertSSHTimeout(t *testing.T, client TailscaleClient, peer TailscaleClient) {
t.Helper()
result, stderr, err := doSSH(t, client, peer)
assert.NoError(t, err)
assert.Empty(t, result)
assert.Contains(t, stderr, "Connection timed out")
}

View File

@@ -1,25 +0,0 @@
package integration
import (
"net/netip"
"net/url"
"tailscale.com/ipn/ipnstate"
)
//nolint
type TailscaleClient interface {
Hostname() string
Shutdown() error
Version() string
Execute(command []string) (string, string, error)
Up(loginServer, authKey string) error
UpWithLoginURL(loginServer string) (*url.URL, error)
IPs() ([]netip.Addr, error)
FQDN() (string, error)
Status() (*ipnstate.Status, error)
WaitForReady() error
WaitForPeers(expected int) error
Ping(hostnameOrIP string) error
ID() string
}

View File

@@ -1,445 +0,0 @@
package tsic
import (
"encoding/json"
"errors"
"fmt"
"log"
"net/netip"
"net/url"
"strings"
"github.com/cenkalti/backoff/v4"
"github.com/juanfont/headscale"
"github.com/juanfont/headscale/integration/dockertestutil"
"github.com/juanfont/headscale/integration/integrationutil"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
"tailscale.com/ipn/ipnstate"
)
const (
tsicHashLength = 6
dockerContextPath = "../."
headscaleCertPath = "/usr/local/share/ca-certificates/headscale.crt"
)
var (
errTailscalePingFailed = errors.New("ping failed")
errTailscaleNotLoggedIn = errors.New("tailscale not logged in")
errTailscaleWrongPeerCount = errors.New("wrong peer count")
errTailscaleCannotUpWithoutAuthkey = errors.New("cannot up without authkey")
errTailscaleNotConnected = errors.New("tailscale not connected")
)
type TailscaleInContainer struct {
version string
hostname string
pool *dockertest.Pool
container *dockertest.Resource
network *dockertest.Network
// "cache"
ips []netip.Addr
fqdn string
// optional config
headscaleCert []byte
headscaleHostname string
withSSH bool
}
type Option = func(c *TailscaleInContainer)
func WithHeadscaleTLS(cert []byte) Option {
return func(tsic *TailscaleInContainer) {
tsic.headscaleCert = cert
}
}
func WithOrCreateNetwork(network *dockertest.Network) Option {
return func(tsic *TailscaleInContainer) {
if network != nil {
tsic.network = network
return
}
network, err := dockertestutil.GetFirstOrCreateNetwork(
tsic.pool,
fmt.Sprintf("%s-network", tsic.hostname),
)
if err != nil {
log.Fatalf("failed to create network: %s", err)
}
tsic.network = network
}
}
func WithHeadscaleName(hsName string) Option {
return func(tsic *TailscaleInContainer) {
tsic.headscaleHostname = hsName
}
}
func WithSSH() Option {
return func(tsic *TailscaleInContainer) {
tsic.withSSH = true
}
}
func New(
pool *dockertest.Pool,
version string,
network *dockertest.Network,
opts ...Option,
) (*TailscaleInContainer, error) {
hash, err := headscale.GenerateRandomStringDNSSafe(tsicHashLength)
if err != nil {
return nil, err
}
hostname := fmt.Sprintf("ts-%s-%s", strings.ReplaceAll(version, ".", "-"), hash)
tsic := &TailscaleInContainer{
version: version,
hostname: hostname,
pool: pool,
network: network,
}
for _, opt := range opts {
opt(tsic)
}
tailscaleOptions := &dockertest.RunOptions{
Name: hostname,
Networks: []*dockertest.Network{network},
// Cmd: []string{
// "tailscaled", "--tun=tsdev",
// },
Entrypoint: []string{
"/bin/bash",
"-c",
"/bin/sleep 3 ; update-ca-certificates ; tailscaled --tun=tsdev",
},
}
if tsic.headscaleHostname != "" {
tailscaleOptions.ExtraHosts = []string{
"host.docker.internal:host-gateway",
fmt.Sprintf("%s:host-gateway", tsic.headscaleHostname),
}
}
// dockertest isnt very good at handling containers that has already
// been created, this is an attempt to make sure this container isnt
// present.
err = pool.RemoveContainerByName(hostname)
if err != nil {
return nil, err
}
container, err := pool.BuildAndRunWithBuildOptions(
createTailscaleBuildOptions(version),
tailscaleOptions,
dockertestutil.DockerRestartPolicy,
dockertestutil.DockerAllowLocalIPv6,
dockertestutil.DockerAllowNetworkAdministration,
)
if err != nil {
return nil, fmt.Errorf("could not start tailscale container: %w", err)
}
log.Printf("Created %s container\n", hostname)
tsic.container = container
if tsic.hasTLS() {
err = tsic.WriteFile(headscaleCertPath, tsic.headscaleCert)
if err != nil {
return nil, fmt.Errorf("failed to write TLS certificate to container: %w", err)
}
}
return tsic, nil
}
func (t *TailscaleInContainer) hasTLS() bool {
return len(t.headscaleCert) != 0
}
func (t *TailscaleInContainer) Shutdown() error {
return t.pool.Purge(t.container)
}
func (t *TailscaleInContainer) Hostname() string {
return t.hostname
}
func (t *TailscaleInContainer) Version() string {
return t.version
}
func (t *TailscaleInContainer) ID() string {
return t.container.Container.ID
}
func (t *TailscaleInContainer) Execute(
command []string,
) (string, string, error) {
stdout, stderr, err := dockertestutil.ExecuteCommand(
t.container,
command,
[]string{},
)
if err != nil {
log.Printf("command stderr: %s\n", stderr)
if stdout != "" {
log.Printf("command stdout: %s\n", stdout)
}
if strings.Contains(stderr, "NeedsLogin") {
return stdout, stderr, errTailscaleNotLoggedIn
}
return stdout, stderr, err
}
return stdout, stderr, nil
}
func (t *TailscaleInContainer) Up(
loginServer, authKey string,
) error {
command := []string{
"tailscale",
"up",
"-login-server",
loginServer,
"--authkey",
authKey,
"--hostname",
t.hostname,
}
if t.withSSH {
command = append(command, "--ssh")
}
if _, _, err := t.Execute(command); err != nil {
return fmt.Errorf("failed to join tailscale client: %w", err)
}
return nil
}
func (t *TailscaleInContainer) UpWithLoginURL(
loginServer string,
) (*url.URL, error) {
command := []string{
"tailscale",
"up",
"-login-server",
loginServer,
"--hostname",
t.hostname,
}
_, stderr, err := t.Execute(command)
if errors.Is(err, errTailscaleNotLoggedIn) {
return nil, errTailscaleCannotUpWithoutAuthkey
}
urlStr := strings.ReplaceAll(stderr, "\nTo authenticate, visit:\n\n\t", "")
urlStr = strings.TrimSpace(urlStr)
// parse URL
loginURL, err := url.Parse(urlStr)
if err != nil {
log.Printf("Could not parse login URL: %s", err)
log.Printf("Original join command result: %s", stderr)
return nil, err
}
return loginURL, nil
}
func (t *TailscaleInContainer) IPs() ([]netip.Addr, error) {
if t.ips != nil && len(t.ips) != 0 {
return t.ips, nil
}
ips := make([]netip.Addr, 0)
command := []string{
"tailscale",
"ip",
}
result, _, err := t.Execute(command)
if err != nil {
return []netip.Addr{}, fmt.Errorf("failed to join tailscale client: %w", err)
}
for _, address := range strings.Split(result, "\n") {
address = strings.TrimSuffix(address, "\n")
if len(address) < 1 {
continue
}
ip, err := netip.ParseAddr(address)
if err != nil {
return nil, err
}
ips = append(ips, ip)
}
return ips, nil
}
func (t *TailscaleInContainer) Status() (*ipnstate.Status, error) {
command := []string{
"tailscale",
"status",
"--json",
}
result, _, err := t.Execute(command)
if err != nil {
return nil, fmt.Errorf("failed to execute tailscale status command: %w", err)
}
var status ipnstate.Status
err = json.Unmarshal([]byte(result), &status)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal tailscale status: %w", err)
}
return &status, err
}
func (t *TailscaleInContainer) FQDN() (string, error) {
if t.fqdn != "" {
return t.fqdn, nil
}
status, err := t.Status()
if err != nil {
return "", fmt.Errorf("failed to get FQDN: %w", err)
}
return status.Self.DNSName, nil
}
func (t *TailscaleInContainer) WaitForReady() error {
return t.pool.Retry(func() error {
status, err := t.Status()
if err != nil {
return fmt.Errorf("failed to fetch tailscale status: %w", err)
}
if status.CurrentTailnet != nil {
return nil
}
return errTailscaleNotConnected
})
}
func (t *TailscaleInContainer) WaitForPeers(expected int) error {
return t.pool.Retry(func() error {
status, err := t.Status()
if err != nil {
return fmt.Errorf("failed to fetch tailscale status: %w", err)
}
if peers := status.Peers(); len(peers) != expected {
return errTailscaleWrongPeerCount
}
return nil
})
}
// TODO(kradalby): Make multiping, go routine magic.
func (t *TailscaleInContainer) Ping(hostnameOrIP string) error {
return t.pool.Retry(func() error {
command := []string{
"tailscale", "ping",
"--timeout=1s",
"--c=10",
"--until-direct=true",
hostnameOrIP,
}
result, _, err := t.Execute(command)
if err != nil {
log.Printf(
"failed to run ping command from %s to %s, err: %s",
t.Hostname(),
hostnameOrIP,
err,
)
return err
}
if !strings.Contains(result, "pong") && !strings.Contains(result, "is local") {
return backoff.Permanent(errTailscalePingFailed)
}
return nil
})
}
func (t *TailscaleInContainer) WriteFile(path string, data []byte) error {
return integrationutil.WriteFileToContainer(t.pool, t.container, path, data)
}
func createTailscaleBuildOptions(version string) *dockertest.BuildOptions {
var tailscaleBuildOptions *dockertest.BuildOptions
switch version {
case "head":
tailscaleBuildOptions = &dockertest.BuildOptions{
Dockerfile: "Dockerfile.tailscale-HEAD",
ContextDir: dockerContextPath,
BuildArgs: []docker.BuildArg{},
}
case "unstable":
tailscaleBuildOptions = &dockertest.BuildOptions{
Dockerfile: "Dockerfile.tailscale",
ContextDir: dockerContextPath,
BuildArgs: []docker.BuildArg{
{
Name: "TAILSCALE_VERSION",
Value: "*", // Installs the latest version https://askubuntu.com/a/824926
},
{
Name: "TAILSCALE_CHANNEL",
Value: "unstable",
},
},
}
default:
tailscaleBuildOptions = &dockertest.BuildOptions{
Dockerfile: "Dockerfile.tailscale",
ContextDir: dockerContextPath,
BuildArgs: []docker.BuildArg{
{
Name: "TAILSCALE_VERSION",
Value: version,
},
{
Name: "TAILSCALE_CHANNEL",
Value: "stable",
},
},
}
}
return tailscaleBuildOptions
}

View File

@@ -1,4 +1,5 @@
// nolint
//go:build integration_cli
package headscale
import (
@@ -12,7 +13,6 @@ import (
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
)
@@ -27,11 +27,7 @@ type IntegrationCLITestSuite struct {
env []string
}
func TestIntegrationCLITestSuite(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration tests due to short flag")
}
func TestCLIIntegrationTestSuite(t *testing.T) {
s := new(IntegrationCLITestSuite)
suite.Run(t, s)
@@ -46,11 +42,11 @@ func (s *IntegrationCLITestSuite) SetupTest() {
s.FailNow(fmt.Sprintf("Could not connect to docker: %s", err), "")
}
network, err := GetFirstOrCreateNetwork(&s.pool, headscaleNetwork)
if err != nil {
s.FailNow(fmt.Sprintf("Failed to create or get network: %s", err), "")
if pnetwork, err := s.pool.CreateNetwork("headscale-test"); err == nil {
s.network = *pnetwork
} else {
s.FailNow(fmt.Sprintf("Could not create network: %s", err), "")
}
s.network = network
headscaleBuildOptions := &dockertest.BuildOptions{
Dockerfile: "Dockerfile",
@@ -67,12 +63,8 @@ func (s *IntegrationCLITestSuite) SetupTest() {
Mounts: []string{
fmt.Sprintf("%s/integration_test/etc:/etc/headscale", currentPath),
},
Cmd: []string{"headscale", "serve"},
Networks: []*dockertest.Network{&s.network},
ExposedPorts: []string{"8080/tcp"},
PortBindings: map[docker.Port][]docker.PortBinding{
"8080/tcp": {{HostPort: "8080"}},
},
Networks: []*dockertest.Network{&s.network},
Cmd: []string{"headscale", "serve"},
}
err = s.pool.RemoveContainerByName(headscaleHostname)
@@ -95,9 +87,7 @@ func (s *IntegrationCLITestSuite) SetupTest() {
fmt.Println("Created headscale container for CLI tests")
fmt.Println("Waiting for headscale to be ready for CLI tests")
hostEndpoint := fmt.Sprintf("%s:%s",
s.headscale.GetIPInNetwork(&s.network),
s.headscale.GetPort("8080/tcp"))
hostEndpoint := fmt.Sprintf("localhost:%s", s.headscale.GetPort("8080/tcp"))
if err := s.pool.Retry(func() error {
url := fmt.Sprintf("http://%s/health", hostEndpoint)
@@ -139,7 +129,7 @@ func (s *IntegrationCLITestSuite) HandleStats(
}
func (s *IntegrationCLITestSuite) createNamespace(name string) (*v1.Namespace, error) {
result, _, err := ExecuteCommand(
result, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -182,7 +172,7 @@ func (s *IntegrationCLITestSuite) TestNamespaceCommand() {
assert.Equal(s.T(), names[2], namespaces[2].Name)
// Test list namespaces
listResult, _, err := ExecuteCommand(
listResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -204,7 +194,7 @@ func (s *IntegrationCLITestSuite) TestNamespaceCommand() {
assert.Equal(s.T(), names[2], listedNamespaces[2].Name)
// Test rename namespace
renameResult, _, err := ExecuteCommand(
renameResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -226,7 +216,7 @@ func (s *IntegrationCLITestSuite) TestNamespaceCommand() {
assert.Equal(s.T(), renamedNamespace.Name, "newname")
// Test list after rename namespaces
listAfterRenameResult, _, err := ExecuteCommand(
listAfterRenameResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -257,7 +247,7 @@ func (s *IntegrationCLITestSuite) TestPreAuthKeyCommand() {
assert.Nil(s.T(), err)
for i := 0; i < count; i++ {
preAuthResult, _, err := ExecuteCommand(
preAuthResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -270,8 +260,6 @@ func (s *IntegrationCLITestSuite) TestPreAuthKeyCommand() {
"24h",
"--output",
"json",
"--tags",
"tag:test1,tag:test2",
},
[]string{},
)
@@ -287,7 +275,7 @@ func (s *IntegrationCLITestSuite) TestPreAuthKeyCommand() {
assert.Len(s.T(), keys, 5)
// Test list of keys
listResult, _, err := ExecuteCommand(
listResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -345,14 +333,9 @@ func (s *IntegrationCLITestSuite) TestPreAuthKeyCommand() {
listedPreAuthKeys[4].Expiration.AsTime().Before(time.Now().Add(time.Hour*26)),
)
// Test that tags are present
for i := 0; i < count; i++ {
assert.Equal(s.T(), listedPreAuthKeys[i].AclTags, []string{"tag:test1", "tag:test2"})
}
// Expire three keys
for i := 0; i < 3; i++ {
_, _, err := ExecuteCommand(
_, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -368,7 +351,7 @@ func (s *IntegrationCLITestSuite) TestPreAuthKeyCommand() {
}
// Test list pre auth keys after expire
listAfterExpireResult, _, err := ExecuteCommand(
listAfterExpireResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -413,7 +396,7 @@ func (s *IntegrationCLITestSuite) TestPreAuthKeyCommandWithoutExpiry() {
namespace, err := s.createNamespace("pre-auth-key-without-exp-namespace")
assert.Nil(s.T(), err)
preAuthResult, _, err := ExecuteCommand(
preAuthResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -434,7 +417,7 @@ func (s *IntegrationCLITestSuite) TestPreAuthKeyCommandWithoutExpiry() {
assert.Nil(s.T(), err)
// Test list of keys
listResult, _, err := ExecuteCommand(
listResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -466,7 +449,7 @@ func (s *IntegrationCLITestSuite) TestPreAuthKeyCommandReusableEphemeral() {
namespace, err := s.createNamespace("pre-auth-key-reus-ephm-namespace")
assert.Nil(s.T(), err)
preAuthReusableResult, _, err := ExecuteCommand(
preAuthReusableResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -489,7 +472,7 @@ func (s *IntegrationCLITestSuite) TestPreAuthKeyCommandReusableEphemeral() {
assert.True(s.T(), preAuthReusableKey.GetReusable())
assert.False(s.T(), preAuthReusableKey.GetEphemeral())
preAuthEphemeralResult, _, err := ExecuteCommand(
preAuthEphemeralResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -531,7 +514,7 @@ func (s *IntegrationCLITestSuite) TestPreAuthKeyCommandReusableEphemeral() {
// assert.NotNil(s.T(), err)
// Test list of keys
listResult, _, err := ExecuteCommand(
listResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -558,14 +541,14 @@ func (s *IntegrationCLITestSuite) TestNodeTagCommand() {
assert.Nil(s.T(), err)
machineKeys := []string{
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
"nodekey:6abd00bb5fdda622db51387088c68e97e71ce58e7056aa54f592b6a8219d524c",
"9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
"6abd00bb5fdda622db51387088c68e97e71ce58e7056aa54f592b6a8219d524c",
}
machines := make([]*v1.Machine, len(machineKeys))
assert.Nil(s.T(), err)
for index, machineKey := range machineKeys {
_, _, err := ExecuteCommand(
_, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -584,7 +567,7 @@ func (s *IntegrationCLITestSuite) TestNodeTagCommand() {
)
assert.Nil(s.T(), err)
machineResult, _, err := ExecuteCommand(
machineResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -609,7 +592,7 @@ func (s *IntegrationCLITestSuite) TestNodeTagCommand() {
}
assert.Len(s.T(), machines, len(machineKeys))
addTagResult, _, err := ExecuteCommand(
addTagResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -629,7 +612,7 @@ func (s *IntegrationCLITestSuite) TestNodeTagCommand() {
assert.Equal(s.T(), []string{"tag:test"}, machine.ForcedTags)
// try to set a wrong tag and retrieve the error
wrongTagResult, _, err := ExecuteCommand(
wrongTagResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -651,7 +634,7 @@ func (s *IntegrationCLITestSuite) TestNodeTagCommand() {
assert.Contains(s.T(), errorOutput.Error, "tag must start with the string 'tag:'")
// Test list all nodes after added seconds
listAllResult, _, err := ExecuteCommand(
listAllResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -691,17 +674,17 @@ func (s *IntegrationCLITestSuite) TestNodeCommand() {
// Randomly generated machine keys
machineKeys := []string{
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
"nodekey:6abd00bb5fdda622db51387088c68e97e71ce58e7056aa54f592b6a8219d524c",
"nodekey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
"nodekey:8bc13285cee598acf76b1824a6f4490f7f2e3751b201e28aeb3b07fe81d5b4a1",
"nodekey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
"9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
"6abd00bb5fdda622db51387088c68e97e71ce58e7056aa54f592b6a8219d524c",
"f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
"8bc13285cee598acf76b1824a6f4490f7f2e3751b201e28aeb3b07fe81d5b4a1",
"cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
}
machines := make([]*v1.Machine, len(machineKeys))
assert.Nil(s.T(), err)
for index, machineKey := range machineKeys {
_, _, err := ExecuteCommand(
_, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -720,7 +703,7 @@ func (s *IntegrationCLITestSuite) TestNodeCommand() {
)
assert.Nil(s.T(), err)
machineResult, _, err := ExecuteCommand(
machineResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -747,7 +730,7 @@ func (s *IntegrationCLITestSuite) TestNodeCommand() {
assert.Len(s.T(), machines, len(machineKeys))
// Test list all nodes after added seconds
listAllResult, _, err := ExecuteCommand(
listAllResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -779,14 +762,14 @@ func (s *IntegrationCLITestSuite) TestNodeCommand() {
assert.Equal(s.T(), "machine-5", listAll[4].Name)
otherNamespaceMachineKeys := []string{
"nodekey:b5b444774186d4217adcec407563a1223929465ee2c68a4da13af0d0185b4f8e",
"nodekey:dc721977ac7415aafa87f7d4574cbe07c6b171834a6d37375782bdc1fb6b3584",
"b5b444774186d4217adcec407563a1223929465ee2c68a4da13af0d0185b4f8e",
"dc721977ac7415aafa87f7d4574cbe07c6b171834a6d37375782bdc1fb6b3584",
}
otherNamespaceMachines := make([]*v1.Machine, len(otherNamespaceMachineKeys))
assert.Nil(s.T(), err)
for index, machineKey := range otherNamespaceMachineKeys {
_, _, err := ExecuteCommand(
_, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -805,7 +788,7 @@ func (s *IntegrationCLITestSuite) TestNodeCommand() {
)
assert.Nil(s.T(), err)
machineResult, _, err := ExecuteCommand(
machineResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -832,7 +815,7 @@ func (s *IntegrationCLITestSuite) TestNodeCommand() {
assert.Len(s.T(), otherNamespaceMachines, len(otherNamespaceMachineKeys))
// Test list all nodes after added otherNamespace
listAllWithotherNamespaceResult, _, err := ExecuteCommand(
listAllWithotherNamespaceResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -862,7 +845,7 @@ func (s *IntegrationCLITestSuite) TestNodeCommand() {
assert.Equal(s.T(), "otherNamespace-machine-2", listAllWithotherNamespace[6].Name)
// Test list all nodes after added otherNamespace
listOnlyotherNamespaceMachineNamespaceResult, _, err := ExecuteCommand(
listOnlyotherNamespaceMachineNamespaceResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -901,7 +884,7 @@ func (s *IntegrationCLITestSuite) TestNodeCommand() {
)
// Delete a machines
_, _, err = ExecuteCommand(
_, err = ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -919,7 +902,7 @@ func (s *IntegrationCLITestSuite) TestNodeCommand() {
assert.Nil(s.T(), err)
// Test: list main namespace after machine is deleted
listOnlyMachineNamespaceAfterDeleteResult, _, err := ExecuteCommand(
listOnlyMachineNamespaceAfterDeleteResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -950,17 +933,17 @@ func (s *IntegrationCLITestSuite) TestNodeExpireCommand() {
// Randomly generated machine keys
machineKeys := []string{
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
"nodekey:6abd00bb5fdda622db51387088c68e97e71ce58e7056aa54f592b6a8219d524c",
"nodekey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
"nodekey:8bc13285cee598acf76b1824a6f4490f7f2e3751b201e28aeb3b07fe81d5b4a1",
"nodekey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
"9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
"6abd00bb5fdda622db51387088c68e97e71ce58e7056aa54f592b6a8219d524c",
"f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
"8bc13285cee598acf76b1824a6f4490f7f2e3751b201e28aeb3b07fe81d5b4a1",
"cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
}
machines := make([]*v1.Machine, len(machineKeys))
assert.Nil(s.T(), err)
for index, machineKey := range machineKeys {
_, _, err := ExecuteCommand(
_, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -979,7 +962,7 @@ func (s *IntegrationCLITestSuite) TestNodeExpireCommand() {
)
assert.Nil(s.T(), err)
machineResult, _, err := ExecuteCommand(
machineResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1005,7 +988,7 @@ func (s *IntegrationCLITestSuite) TestNodeExpireCommand() {
assert.Len(s.T(), machines, len(machineKeys))
listAllResult, _, err := ExecuteCommand(
listAllResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1031,7 +1014,7 @@ func (s *IntegrationCLITestSuite) TestNodeExpireCommand() {
assert.True(s.T(), listAll[4].Expiry.AsTime().IsZero())
for i := 0; i < 3; i++ {
_, _, err := ExecuteCommand(
_, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1045,7 +1028,7 @@ func (s *IntegrationCLITestSuite) TestNodeExpireCommand() {
assert.Nil(s.T(), err)
}
listAllAfterExpiryResult, _, err := ExecuteCommand(
listAllAfterExpiryResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1077,17 +1060,17 @@ func (s *IntegrationCLITestSuite) TestNodeRenameCommand() {
// Randomly generated machine keys
machineKeys := []string{
"nodekey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
"nodekey:8bc13285cee598acf76b1824a6f4490f7f2e3751b201e28aeb3b07fe81d5b4a1",
"nodekey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
"nodekey:6abd00bb5fdda622db51387088c68e97e71ce58e7056aa54f592b6a8219d524c",
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
"cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
"8bc13285cee598acf76b1824a6f4490f7f2e3751b201e28aeb3b07fe81d5b4a1",
"f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
"6abd00bb5fdda622db51387088c68e97e71ce58e7056aa54f592b6a8219d524c",
"9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
}
machines := make([]*v1.Machine, len(machineKeys))
assert.Nil(s.T(), err)
for index, machineKey := range machineKeys {
_, _, err := ExecuteCommand(
_, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1106,7 +1089,7 @@ func (s *IntegrationCLITestSuite) TestNodeRenameCommand() {
)
assert.Nil(s.T(), err)
machineResult, _, err := ExecuteCommand(
machineResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1132,7 +1115,7 @@ func (s *IntegrationCLITestSuite) TestNodeRenameCommand() {
assert.Len(s.T(), machines, len(machineKeys))
listAllResult, _, err := ExecuteCommand(
listAllResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1158,7 +1141,7 @@ func (s *IntegrationCLITestSuite) TestNodeRenameCommand() {
assert.Contains(s.T(), listAll[4].GetGivenName(), "machine-5")
for i := 0; i < 3; i++ {
_, _, err := ExecuteCommand(
_, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1173,7 +1156,7 @@ func (s *IntegrationCLITestSuite) TestNodeRenameCommand() {
assert.Nil(s.T(), err)
}
listAllAfterRenameResult, _, err := ExecuteCommand(
listAllAfterRenameResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1199,7 +1182,7 @@ func (s *IntegrationCLITestSuite) TestNodeRenameCommand() {
assert.Contains(s.T(), listAllAfterRename[4].GetGivenName(), "machine-5")
// Test failure for too long names
result, _, err := ExecuteCommand(
result, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1214,7 +1197,7 @@ func (s *IntegrationCLITestSuite) TestNodeRenameCommand() {
assert.Nil(s.T(), err)
assert.Contains(s.T(), result, "not be over 63 chars")
listAllAfterRenameAttemptResult, _, err := ExecuteCommand(
listAllAfterRenameAttemptResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1248,9 +1231,9 @@ func (s *IntegrationCLITestSuite) TestRouteCommand() {
assert.Nil(s.T(), err)
// Randomly generated machine keys
machineKey := "nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe"
machineKey := "9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe"
_, _, err = ExecuteCommand(
_, err = ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1273,7 +1256,7 @@ func (s *IntegrationCLITestSuite) TestRouteCommand() {
)
assert.Nil(s.T(), err)
machineResult, _, err := ExecuteCommand(
machineResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1297,7 +1280,7 @@ func (s *IntegrationCLITestSuite) TestRouteCommand() {
assert.Equal(s.T(), uint64(1), machine.Id)
assert.Equal(s.T(), "route-machine", machine.Name)
listAllResult, _, err := ExecuteCommand(
listAllResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1322,7 +1305,7 @@ func (s *IntegrationCLITestSuite) TestRouteCommand() {
assert.Empty(s.T(), listAll.EnabledRoutes)
enableTwoRoutesResult, _, err := ExecuteCommand(
enableTwoRoutesResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1354,7 +1337,7 @@ func (s *IntegrationCLITestSuite) TestRouteCommand() {
assert.Contains(s.T(), enableTwoRoutes.EnabledRoutes, "192.168.1.0/24")
// Enable only one route, effectively disabling one of the routes
enableOneRouteResult, _, err := ExecuteCommand(
enableOneRouteResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1383,7 +1366,7 @@ func (s *IntegrationCLITestSuite) TestRouteCommand() {
assert.Contains(s.T(), enableOneRoute.EnabledRoutes, "10.0.0.0/8")
// Enable only one route, effectively disabling one of the routes
failEnableNonAdvertisedRoute, _, err := ExecuteCommand(
failEnableNonAdvertisedRoute, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1407,7 +1390,7 @@ func (s *IntegrationCLITestSuite) TestRouteCommand() {
)
// Enable all routes on host
enableAllRouteResult, _, err := ExecuteCommand(
enableAllRouteResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1442,7 +1425,7 @@ func (s *IntegrationCLITestSuite) TestApiKeyCommand() {
keys := make([]string, count)
for i := 0; i < count; i++ {
apiResult, _, err := ExecuteCommand(
apiResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1468,7 +1451,7 @@ func (s *IntegrationCLITestSuite) TestApiKeyCommand() {
assert.Len(s.T(), keys, 5)
// Test list of keys
listResult, _, err := ExecuteCommand(
listResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1530,7 +1513,7 @@ func (s *IntegrationCLITestSuite) TestApiKeyCommand() {
// Expire three keys
for i := 0; i < 3; i++ {
_, _, err := ExecuteCommand(
_, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1547,7 +1530,7 @@ func (s *IntegrationCLITestSuite) TestApiKeyCommand() {
}
// Test list pre auth keys after expire
listAfterExpireResult, _, err := ExecuteCommand(
listAfterExpireResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1588,9 +1571,9 @@ func (s *IntegrationCLITestSuite) TestNodeMoveCommand() {
assert.Nil(s.T(), err)
// Randomly generated machine key
machineKey := "nodekey:688411b767663479632d44140f08a9fde87383adc7cdeb518f62ce28a17ef0aa"
machineKey := "688411b767663479632d44140f08a9fde87383adc7cdeb518f62ce28a17ef0aa"
_, _, err = ExecuteCommand(
_, err = ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1609,7 +1592,7 @@ func (s *IntegrationCLITestSuite) TestNodeMoveCommand() {
)
assert.Nil(s.T(), err)
machineResult, _, err := ExecuteCommand(
machineResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1636,7 +1619,7 @@ func (s *IntegrationCLITestSuite) TestNodeMoveCommand() {
machineId := fmt.Sprintf("%d", machine.Id)
moveToNewNSResult, _, err := ExecuteCommand(
moveToNewNSResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1658,7 +1641,7 @@ func (s *IntegrationCLITestSuite) TestNodeMoveCommand() {
assert.Equal(s.T(), machine.Namespace, newNamespace)
listAllNodesResult, _, err := ExecuteCommand(
listAllNodesResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1681,7 +1664,7 @@ func (s *IntegrationCLITestSuite) TestNodeMoveCommand() {
assert.Equal(s.T(), allNodes[0].Namespace, machine.Namespace)
assert.Equal(s.T(), allNodes[0].Namespace, newNamespace)
moveToNonExistingNSResult, _, err := ExecuteCommand(
moveToNonExistingNSResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1705,7 +1688,7 @@ func (s *IntegrationCLITestSuite) TestNodeMoveCommand() {
)
assert.Equal(s.T(), machine.Namespace, newNamespace)
moveToOldNSResult, _, err := ExecuteCommand(
moveToOldNSResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1727,7 +1710,7 @@ func (s *IntegrationCLITestSuite) TestNodeMoveCommand() {
assert.Equal(s.T(), machine.Namespace, oldNamespace)
moveToSameNSResult, _, err := ExecuteCommand(
moveToSameNSResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1759,7 +1742,7 @@ func (s *IntegrationCLITestSuite) TestLoadConfigFromCommand() {
altEnvConfig, err := os.ReadFile("integration_test/etc/alt-env-config.dump.gold.yaml")
assert.Nil(s.T(), err)
_, _, err = ExecuteCommand(
_, err = ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1774,7 +1757,7 @@ func (s *IntegrationCLITestSuite) TestLoadConfigFromCommand() {
assert.YAMLEq(s.T(), string(defaultConfig), string(defaultDumpConfig))
_, _, err = ExecuteCommand(
_, err = ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1791,7 +1774,7 @@ func (s *IntegrationCLITestSuite) TestLoadConfigFromCommand() {
assert.YAMLEq(s.T(), string(altConfig), string(altDumpConfig))
_, _, err = ExecuteCommand(
_, err = ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -1808,7 +1791,7 @@ func (s *IntegrationCLITestSuite) TestLoadConfigFromCommand() {
assert.YAMLEq(s.T(), string(altEnvConfig), string(altEnvDumpConfig))
_, _, err = ExecuteCommand(
_, err = ExecuteCommand(
&s.headscale,
[]string{
"headscale",

View File

@@ -1,4 +1,5 @@
//nolint
//go:build integration
package headscale
import (
@@ -18,8 +19,7 @@ import (
)
const (
headscaleNetwork = "headscale-test"
headscaleHostname = "headscale"
headscaleHostname = "headscale-derp"
DOCKER_EXECUTE_TIMEOUT = 10 * time.Second
)
@@ -30,10 +30,9 @@ var (
IpPrefix6 = netip.MustParsePrefix("fd7a:115c:a1e0::/48")
tailscaleVersions = []string{
"head",
"unstable",
"1.32.0",
"1.30.2",
// "head",
// "unstable",
"1.30.0",
"1.28.0",
"1.26.2",
"1.24.2",
@@ -69,7 +68,7 @@ func ExecuteCommand(
cmd []string,
env []string,
options ...ExecuteCommandOption,
) (string, string, error) {
) (string, error) {
var stdout bytes.Buffer
var stderr bytes.Buffer
@@ -79,7 +78,7 @@ func ExecuteCommand(
for _, opt := range options {
if err := opt(&execConfig); err != nil {
return "", "", fmt.Errorf("execute-command/options: %w", err)
return "", fmt.Errorf("execute-command/options: %w", err)
}
}
@@ -108,7 +107,7 @@ func ExecuteCommand(
select {
case res := <-resultChan:
if res.err != nil {
return stdout.String(), stderr.String(), res.err
return "", res.err
}
if res.exitCode != 0 {
@@ -116,19 +115,13 @@ func ExecuteCommand(
fmt.Println("stdout: ", stdout.String())
fmt.Println("stderr: ", stderr.String())
return stdout.String(), stderr.String(), fmt.Errorf(
"command failed with: %s",
stderr.String(),
)
return "", fmt.Errorf("command failed with: %s", stderr.String())
}
return stdout.String(), stderr.String(), nil
return stdout.String(), nil
case <-time.After(execConfig.timeout):
return stdout.String(), stderr.String(), fmt.Errorf(
"command timed out after %s",
execConfig.timeout,
)
return "", fmt.Errorf("command timed out after %s", execConfig.timeout)
}
}
@@ -207,7 +200,7 @@ func getIPs(
for hostname, tailscale := range tailscales {
command := []string{"tailscale", "ip"}
result, _, err := ExecuteCommand(
result, err := ExecuteCommand(
&tailscale,
command,
[]string{},
@@ -235,7 +228,7 @@ func getIPs(
func getDNSNames(
headscale *dockertest.Resource,
) ([]string, error) {
listAllResult, _, err := ExecuteCommand(
listAllResult, err := ExecuteCommand(
headscale,
[]string{
"headscale",
@@ -268,7 +261,7 @@ func getDNSNames(
func getMagicFQDN(
headscale *dockertest.Resource,
) ([]string, error) {
listAllResult, _, err := ExecuteCommand(
listAllResult, err := ExecuteCommand(
headscale,
[]string{
"headscale",
@@ -323,21 +316,3 @@ func GetEnvBool(key string) (bool, error) {
return v, nil
}
func GetFirstOrCreateNetwork(pool *dockertest.Pool, name string) (dockertest.Network, error) {
networks, err := pool.NetworksByName(name)
if err != nil || len(networks) == 0 {
if _, err := pool.CreateNetwork(name); err == nil {
// Create does not give us an updated version of the resource, so we need to
// get it again.
networks, err := pool.NetworksByName(name)
if err != nil {
return dockertest.Network{}, err
}
return networks[0], nil
}
}
return networks[0], nil
}

View File

@@ -1,4 +1,5 @@
//nolint
//go:build integration_derp
package headscale
import (
@@ -16,39 +17,34 @@ import (
"testing"
"time"
"github.com/ccding/go-stun/stun"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"github.com/ccding/go-stun/stun"
)
const (
headscaleDerpHostname = "headscale-derp"
namespaceName = "derpnamespace"
totalContainers = 3
namespaceName = "derpnamespace"
totalContainers = 3
)
type IntegrationDERPTestSuite struct {
suite.Suite
stats *suite.SuiteInformation
pool dockertest.Pool
network dockertest.Network
containerNetworks map[int]dockertest.Network // so we keep the containers isolated
headscale dockertest.Resource
saveLogs bool
pool dockertest.Pool
networks map[int]dockertest.Network // so we keep the containers isolated
headscale dockertest.Resource
saveLogs bool
tailscales map[string]dockertest.Resource
joinWaitGroup sync.WaitGroup
}
func TestIntegrationDERPTestSuite(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration tests due to short flag")
}
func TestDERPIntegrationTestSuite(t *testing.T) {
saveLogs, err := GetEnvBool("HEADSCALE_INTEGRATION_SAVE_LOG")
if err != nil {
saveLogs = false
@@ -57,7 +53,7 @@ func TestIntegrationDERPTestSuite(t *testing.T) {
s := new(IntegrationDERPTestSuite)
s.tailscales = make(map[string]dockertest.Resource)
s.containerNetworks = make(map[int]dockertest.Network)
s.networks = make(map[int]dockertest.Network)
s.saveLogs = saveLogs
suite.Run(t, s)
@@ -82,7 +78,7 @@ func TestIntegrationDERPTestSuite(t *testing.T) {
log.Printf("Could not purge resource: %s\n", err)
}
for _, network := range s.containerNetworks {
for _, network := range s.networks {
if err := network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err)
}
@@ -97,15 +93,9 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
s.FailNow(fmt.Sprintf("Could not connect to docker: %s", err), "")
}
network, err := GetFirstOrCreateNetwork(&s.pool, headscaleNetwork)
if err != nil {
s.FailNow(fmt.Sprintf("Failed to create or get network: %s", err), "")
}
s.network = network
for i := 0; i < totalContainers; i++ {
if pnetwork, err := s.pool.CreateNetwork(fmt.Sprintf("headscale-derp-%d", i)); err == nil {
s.containerNetworks[i] = *pnetwork
s.networks[i] = *pnetwork
} else {
s.FailNow(fmt.Sprintf("Could not create network: %s", err), "")
}
@@ -122,7 +112,7 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
}
headscaleOptions := &dockertest.RunOptions{
Name: headscaleDerpHostname,
Name: headscaleHostname,
Mounts: []string{
fmt.Sprintf(
"%s/integration_test/etc_embedded_derp:/etc/headscale",
@@ -130,7 +120,6 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
),
},
Cmd: []string{"headscale", "serve"},
Networks: []*dockertest.Network{&s.network},
ExposedPorts: []string{"8443/tcp", "3478/udp"},
PortBindings: map[docker.Port][]docker.PortBinding{
"8443/tcp": {{HostPort: "8443"}},
@@ -138,7 +127,7 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
},
}
err = s.pool.RemoveContainerByName(headscaleDerpHostname)
err = s.pool.RemoveContainerByName(headscaleHostname)
if err != nil {
s.FailNow(
fmt.Sprintf(
@@ -164,15 +153,13 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
hostname, container := s.tailscaleContainer(
fmt.Sprint(i),
version,
s.containerNetworks[i],
s.networks[i],
)
s.tailscales[hostname] = *container
}
log.Println("Waiting for headscale to be ready for embedded DERP tests")
hostEndpoint := fmt.Sprintf("%s:%s",
s.headscale.GetIPInNetwork(&s.network),
s.headscale.GetPort("8443/tcp"))
hostEndpoint := fmt.Sprintf("localhost:%s", s.headscale.GetPort("8443/tcp"))
if err := s.pool.Retry(func() error {
url := fmt.Sprintf("https://%s/health", hostEndpoint)
@@ -200,7 +187,7 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
log.Println("headscale container is ready for embedded DERP tests")
log.Printf("Creating headscale namespace: %s\n", namespaceName)
result, _, err := ExecuteCommand(
result, err := ExecuteCommand(
&s.headscale,
[]string{"headscale", "namespaces", "create", namespaceName},
[]string{},
@@ -209,7 +196,7 @@ func (s *IntegrationDERPTestSuite) SetupSuite() {
assert.Nil(s.T(), err)
log.Printf("Creating pre auth key for %s\n", namespaceName)
preAuthResult, _, err := ExecuteCommand(
preAuthResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
@@ -272,7 +259,7 @@ func (s *IntegrationDERPTestSuite) Join(
log.Println("Join command:", command)
log.Printf("Running join command for %s\n", hostname)
_, _, err := ExecuteCommand(
_, err := ExecuteCommand(
&tailscale,
command,
[]string{},
@@ -333,7 +320,7 @@ func (s *IntegrationDERPTestSuite) TearDownSuite() {
log.Printf("Could not purge resource: %s\n", err)
}
for _, network := range s.containerNetworks {
for _, network := range s.networks {
if err := network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err)
}
@@ -427,7 +414,7 @@ func (s *IntegrationDERPTestSuite) TestPingAllPeersByHostname() {
peername,
)
log.Println(command)
result, _, err := ExecuteCommand(
result, err := ExecuteCommand(
&tailscale,
command,
[]string{},
@@ -441,9 +428,7 @@ func (s *IntegrationDERPTestSuite) TestPingAllPeersByHostname() {
}
func (s *IntegrationDERPTestSuite) TestDERPSTUN() {
headscaleSTUNAddr := fmt.Sprintf("%s:%s",
s.headscale.GetIPInNetwork(&s.network),
s.headscale.GetPort("3478/udp"))
headscaleSTUNAddr := fmt.Sprintf("localhost:%s", s.headscale.GetPort("3478/udp"))
client := stun.NewClient()
client.SetVerbose(true)
client.SetVVerbose(true)

787
integration_general_test.go Normal file
View File

@@ -0,0 +1,787 @@
//go:build integration_general
package headscale
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"log"
"net/http"
"net/netip"
"os"
"path"
"strings"
"sync"
"testing"
"time"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"tailscale.com/client/tailscale/apitype"
"tailscale.com/ipn/ipnstate"
)
type IntegrationTestSuite struct {
suite.Suite
stats *suite.SuiteInformation
pool dockertest.Pool
network dockertest.Network
headscale dockertest.Resource
saveLogs bool
namespaces map[string]TestNamespace
joinWaitGroup sync.WaitGroup
}
func TestIntegrationTestSuite(t *testing.T) {
saveLogs, err := GetEnvBool("HEADSCALE_INTEGRATION_SAVE_LOG")
if err != nil {
saveLogs = false
}
s := new(IntegrationTestSuite)
s.namespaces = map[string]TestNamespace{
"thisspace": {
count: 10,
tailscales: make(map[string]dockertest.Resource),
},
"otherspace": {
count: 2,
tailscales: make(map[string]dockertest.Resource),
},
}
s.saveLogs = saveLogs
suite.Run(t, s)
// HandleStats, which allows us to check if we passed and save logs
// is called after TearDown, so we cannot tear down containers before
// we have potentially saved the logs.
if s.saveLogs {
for _, scales := range s.namespaces {
for _, tailscale := range scales.tailscales {
if err := s.pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
}
}
if !s.stats.Passed() {
err := s.saveLog(&s.headscale, "test_output")
if err != nil {
log.Printf("Could not save log: %s\n", err)
}
}
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
if err := s.network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err)
}
}
}
func (s *IntegrationTestSuite) saveLog(
resource *dockertest.Resource,
basePath string,
) error {
err := os.MkdirAll(basePath, os.ModePerm)
if err != nil {
return err
}
var stdout bytes.Buffer
var stderr bytes.Buffer
err = s.pool.Client.Logs(
docker.LogsOptions{
Context: context.TODO(),
Container: resource.Container.ID,
OutputStream: &stdout,
ErrorStream: &stderr,
Tail: "all",
RawTerminal: false,
Stdout: true,
Stderr: true,
Follow: false,
Timestamps: false,
},
)
if err != nil {
return err
}
log.Printf("Saving logs for %s to %s\n", resource.Container.Name, basePath)
err = os.WriteFile(
path.Join(basePath, resource.Container.Name+".stdout.log"),
[]byte(stdout.String()),
0o644,
)
if err != nil {
return err
}
err = os.WriteFile(
path.Join(basePath, resource.Container.Name+".stderr.log"),
[]byte(stdout.String()),
0o644,
)
if err != nil {
return err
}
return nil
}
func (s *IntegrationTestSuite) Join(
endpoint, key, hostname string,
tailscale dockertest.Resource,
) {
defer s.joinWaitGroup.Done()
command := []string{
"tailscale",
"up",
"-login-server",
endpoint,
"--authkey",
key,
"--hostname",
hostname,
}
log.Println("Join command:", command)
log.Printf("Running join command for %s\n", hostname)
_, err := ExecuteCommand(
&tailscale,
command,
[]string{},
)
assert.Nil(s.T(), err)
log.Printf("%s joined\n", hostname)
}
func (s *IntegrationTestSuite) tailscaleContainer(
namespace, identifier, version string,
) (string, *dockertest.Resource) {
tailscaleBuildOptions := getDockerBuildOptions(version)
hostname := fmt.Sprintf(
"%s-tailscale-%s-%s",
namespace,
strings.Replace(version, ".", "-", -1),
identifier,
)
tailscaleOptions := &dockertest.RunOptions{
Name: hostname,
Networks: []*dockertest.Network{&s.network},
Cmd: []string{
"tailscaled", "--tun=tsdev",
},
}
pts, err := s.pool.BuildAndRunWithBuildOptions(
tailscaleBuildOptions,
tailscaleOptions,
DockerRestartPolicy,
DockerAllowLocalIPv6,
DockerAllowNetworkAdministration,
)
if err != nil {
log.Fatalf("Could not start tailscale container version %s: %s", version, err)
}
log.Printf("Created %s container\n", hostname)
return hostname, pts
}
func (s *IntegrationTestSuite) SetupSuite() {
var err error
app = Headscale{
dbType: "sqlite3",
dbString: "integration_test_db.sqlite3",
}
if ppool, err := dockertest.NewPool(""); err == nil {
s.pool = *ppool
} else {
s.FailNow(fmt.Sprintf("Could not connect to docker: %s", err), "")
}
if pnetwork, err := s.pool.CreateNetwork("headscale-test"); err == nil {
s.network = *pnetwork
} else {
s.FailNow(fmt.Sprintf("Could not create network: %s", err), "")
}
headscaleBuildOptions := &dockertest.BuildOptions{
Dockerfile: "Dockerfile",
ContextDir: ".",
}
currentPath, err := os.Getwd()
if err != nil {
s.FailNow(fmt.Sprintf("Could not determine current path: %s", err), "")
}
headscaleOptions := &dockertest.RunOptions{
Name: "headscale",
Mounts: []string{
fmt.Sprintf("%s/integration_test/etc:/etc/headscale", currentPath),
},
Networks: []*dockertest.Network{&s.network},
Cmd: []string{"headscale", "serve"},
}
err = s.pool.RemoveContainerByName(headscaleHostname)
if err != nil {
s.FailNow(
fmt.Sprintf(
"Could not remove existing container before building test: %s",
err,
),
"",
)
}
log.Println("Creating headscale container for core integration tests")
if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil {
s.headscale = *pheadscale
} else {
s.FailNow(fmt.Sprintf("Could not start headscale container for core integration tests: %s", err), "")
}
log.Println("Created headscale container for core integration tests")
log.Println("Creating tailscale containers for core integration tests")
for namespace, scales := range s.namespaces {
for i := 0; i < scales.count; i++ {
version := tailscaleVersions[i%len(tailscaleVersions)]
hostname, container := s.tailscaleContainer(
namespace,
fmt.Sprint(i),
version,
)
scales.tailscales[hostname] = *container
}
}
log.Println("Waiting for headscale to be ready for core integration tests")
hostEndpoint := fmt.Sprintf("localhost:%s", s.headscale.GetPort("8080/tcp"))
if err := s.pool.Retry(func() error {
url := fmt.Sprintf("http://%s/health", hostEndpoint)
resp, err := http.Get(url)
if err != nil {
fmt.Printf("headscale for core integration test is not ready: %s\n", err)
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("status code not OK")
}
return nil
}); err != nil {
// TODO(kradalby): If we cannot access headscale, or any other fatal error during
// test setup, we need to abort and tear down. However, testify does not seem to
// support that at the moment:
// https://github.com/stretchr/testify/issues/849
return // fmt.Errorf("Could not connect to headscale: %s", err)
}
log.Println("headscale container is ready for core integration tests")
for namespace, scales := range s.namespaces {
log.Printf("Creating headscale namespace: %s\n", namespace)
result, err := ExecuteCommand(
&s.headscale,
[]string{"headscale", "namespaces", "create", namespace},
[]string{},
)
log.Println("headscale create namespace result: ", result)
assert.Nil(s.T(), err)
log.Printf("Creating pre auth key for %s\n", namespace)
preAuthResult, err := ExecuteCommand(
&s.headscale,
[]string{
"headscale",
"--namespace",
namespace,
"preauthkeys",
"create",
"--reusable",
"--expiration",
"24h",
"--output",
"json",
},
[]string{"LOG_LEVEL=error"},
)
assert.Nil(s.T(), err)
var preAuthKey v1.PreAuthKey
err = json.Unmarshal([]byte(preAuthResult), &preAuthKey)
assert.Nil(s.T(), err)
assert.True(s.T(), preAuthKey.Reusable)
headscaleEndpoint := "http://headscale:8080"
log.Printf(
"Joining tailscale containers to headscale at %s\n",
headscaleEndpoint,
)
for hostname, tailscale := range scales.tailscales {
s.joinWaitGroup.Add(1)
go s.Join(headscaleEndpoint, preAuthKey.Key, hostname, tailscale)
}
s.joinWaitGroup.Wait()
}
// The nodes need a bit of time to get their updated maps from headscale
// TODO: See if we can have a more deterministic wait here.
time.Sleep(60 * time.Second)
}
func (s *IntegrationTestSuite) TearDownSuite() {
if !s.saveLogs {
for _, scales := range s.namespaces {
for _, tailscale := range scales.tailscales {
if err := s.pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
}
}
if err := s.pool.Purge(&s.headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
if err := s.network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err)
}
}
}
func (s *IntegrationTestSuite) HandleStats(
suiteName string,
stats *suite.SuiteInformation,
) {
s.stats = stats
}
func (s *IntegrationTestSuite) TestListNodes() {
for namespace, scales := range s.namespaces {
log.Println("Listing nodes")
result, err := ExecuteCommand(
&s.headscale,
[]string{"headscale", "--namespace", namespace, "nodes", "list"},
[]string{},
)
assert.Nil(s.T(), err)
log.Printf("List nodes: \n%s\n", result)
// Chck that the correct count of host is present in node list
lines := strings.Split(result, "\n")
assert.Equal(s.T(), len(scales.tailscales), len(lines)-2)
for hostname := range scales.tailscales {
assert.Contains(s.T(), result, hostname)
}
}
}
func (s *IntegrationTestSuite) TestGetIpAddresses() {
for _, scales := range s.namespaces {
ips, err := getIPs(scales.tailscales)
assert.Nil(s.T(), err)
for hostname := range scales.tailscales {
ips := ips[hostname]
for _, ip := range ips {
s.T().Run(hostname, func(t *testing.T) {
assert.NotNil(t, ip)
log.Printf("IP for %s: %s\n", hostname, ip)
// c.Assert(ip.Valid(), check.IsTrue)
assert.True(t, ip.Is4() || ip.Is6())
switch {
case ip.Is4():
assert.True(t, IpPrefix4.Contains(ip))
case ip.Is6():
assert.True(t, IpPrefix6.Contains(ip))
}
})
}
}
}
}
// TODO(kradalby): fix this test
// We need some way to import ipnstate.Status from multiple go packages.
// Currently it will only work with 1.18.x since that is the last
// version we have in go.mod
// func (s *IntegrationTestSuite) TestStatus() {
// for _, scales := range s.namespaces {
// ips, err := getIPs(scales.tailscales)
// assert.Nil(s.T(), err)
//
// for hostname, tailscale := range scales.tailscales {
// s.T().Run(hostname, func(t *testing.T) {
// command := []string{"tailscale", "status", "--json"}
//
// log.Printf("Getting status for %s\n", hostname)
// result, err := ExecuteCommand(
// &tailscale,
// command,
// []string{},
// )
// assert.Nil(t, err)
//
// var status ipnstate.Status
// err = json.Unmarshal([]byte(result), &status)
// assert.Nil(s.T(), err)
//
// // TODO(kradalby): Replace this check with peer length of SAME namespace
// // Check if we have as many nodes in status
// // as we have IPs/tailscales
// // lines := strings.Split(result, "\n")
// // assert.Equal(t, len(ips), len(lines)-1)
// // assert.Equal(t, len(scales.tailscales), len(lines)-1)
//
// peerIps := getIPsfromIPNstate(status)
//
// // Check that all hosts is present in all hosts status
// for ipHostname, ip := range ips {
// if hostname != ipHostname {
// assert.Contains(t, peerIps, ip)
// }
// }
// })
// }
// }
// }
func getIPsfromIPNstate(status ipnstate.Status) []netip.Addr {
ips := make([]netip.Addr, 0)
for _, peer := range status.Peer {
ips = append(ips, peer.TailscaleIPs...)
}
return ips
}
// TODO: Adopt test for cross communication between namespaces
func (s *IntegrationTestSuite) TestPingAllPeersByAddress() {
for _, scales := range s.namespaces {
ips, err := getIPs(scales.tailscales)
assert.Nil(s.T(), err)
for hostname, tailscale := range scales.tailscales {
for peername, peerIPs := range ips {
for i, ip := range peerIPs {
// We currently cant ping ourselves, so skip that.
if peername == hostname {
continue
}
s.T().
Run(fmt.Sprintf("%s-%s-%d", hostname, peername, i), func(t *testing.T) {
// We are only interested in "direct ping" which means what we
// might need a couple of more attempts before reaching the node.
command := []string{
"tailscale", "ping",
"--timeout=1s",
"--c=10",
"--until-direct=true",
ip.String(),
}
log.Printf(
"Pinging from %s to %s (%s)\n",
hostname,
peername,
ip,
)
result, err := ExecuteCommand(
&tailscale,
command,
[]string{},
)
assert.Nil(t, err)
log.Printf("Result for %s: %s\n", hostname, result)
assert.Contains(t, result, "pong")
})
}
}
}
}
}
func (s *IntegrationTestSuite) TestTailDrop() {
for _, scales := range s.namespaces {
ips, err := getIPs(scales.tailscales)
assert.Nil(s.T(), err)
retry := func(times int, sleepInverval time.Duration, doWork func() error) (err error) {
for attempts := 0; attempts < times; attempts++ {
err = doWork()
if err == nil {
return
}
time.Sleep(sleepInverval)
}
return
}
for hostname, tailscale := range scales.tailscales {
command := []string{"touch", fmt.Sprintf("/tmp/file_from_%s", hostname)}
_, err := ExecuteCommand(
&tailscale,
command,
[]string{},
)
assert.Nil(s.T(), err)
for peername := range ips {
if peername == hostname {
continue
}
var ip4 netip.Addr
for _, ip := range ips[peername] {
if ip.Is4() {
ip4 = ip
break
}
}
if ip4.IsUnspecified() {
panic("no ipv4 address found")
}
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
command := []string{
"tailscale", "file", "cp",
fmt.Sprintf("/tmp/file_from_%s", hostname),
fmt.Sprintf("%s:", ip4),
}
err := retry(10, 1*time.Second, func() error {
log.Printf(
"Sending file from %s to %s\n",
hostname,
peername,
)
_, err := ExecuteCommand(
&tailscale,
command,
[]string{},
ExecuteCommandTimeout(60*time.Second),
)
return err
})
assert.Nil(t, err)
})
}
}
for hostname, tailscale := range scales.tailscales {
command := []string{
"tailscale", "file",
"get",
"/tmp/",
}
_, err := ExecuteCommand(
&tailscale,
command,
[]string{},
)
assert.Nil(s.T(), err)
for peername, ip := range ips {
if peername == hostname {
continue
}
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
command := []string{
"ls",
fmt.Sprintf("/tmp/file_from_%s", peername),
}
log.Printf(
"Checking file in %s (%s) from %s (%s)\n",
hostname,
ips[hostname][1],
peername,
ip,
)
result, err := ExecuteCommand(
&tailscale,
command,
[]string{},
)
assert.Nil(t, err)
log.Printf("Result for %s: %s\n", peername, result)
assert.Equal(
t,
fmt.Sprintf("/tmp/file_from_%s\n", peername),
result,
)
})
}
}
}
}
func (s *IntegrationTestSuite) TestPingAllPeersByHostname() {
hostnames, err := getMagicFQDN(&s.headscale)
assert.Nil(s.T(), err)
log.Printf("Resolved hostnames: %#v", hostnames)
for _, scales := range s.namespaces {
for hostname, tailscale := range scales.tailscales {
for _, peername := range hostnames {
if strings.Contains(peername, hostname) {
continue
}
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
command := []string{
"tailscale", "ping",
"--timeout=10s",
"--c=20",
"--until-direct=true",
peername,
}
log.Printf(
"Pinging using hostname from %s to %s\n",
hostname,
peername,
)
result, err := ExecuteCommand(
&tailscale,
command,
[]string{},
)
assert.Nil(t, err)
log.Printf("Result for %s: %s\n", hostname, result)
assert.Contains(t, result, "pong")
})
}
}
}
}
func (s *IntegrationTestSuite) TestMagicDNS() {
hostnames, err := getMagicFQDN(&s.headscale)
assert.Nil(s.T(), err)
log.Printf("Resolved hostnames: %#v", hostnames)
for _, scales := range s.namespaces {
ips, err := getIPs(scales.tailscales)
assert.Nil(s.T(), err)
retry := func(times int, sleepInverval time.Duration, doWork func() (string, error)) (result string, err error) {
for attempts := 0; attempts < times; attempts++ {
result, err = doWork()
if err == nil {
return
}
time.Sleep(sleepInverval)
}
return
}
for hostname, tailscale := range scales.tailscales {
for _, peername := range hostnames {
if strings.Contains(peername, hostname) {
continue
}
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
command := []string{
"tailscale", "ip", peername,
}
result, err := retry(10, 1*time.Second, func() (string, error) {
log.Printf(
"Resolving name %s from %s\n",
peername,
hostname,
)
result, err := ExecuteCommand(
&tailscale,
command,
[]string{},
)
return result, err
})
assert.Nil(t, err)
log.Printf("Result for %s: %s\n", hostname, result)
peerBaseName := peername[:len(peername)-MachineGivenNameHashLength-1]
expectedAddresses := ips[peerBaseName]
for _, ip := range expectedAddresses {
assert.Contains(t, result, ip.String())
}
})
}
}
}
}
func getAPIURLs(
tailscales map[string]dockertest.Resource,
) (map[netip.Addr]string, error) {
fts := make(map[netip.Addr]string)
for _, tailscale := range tailscales {
command := []string{
"curl",
"--unix-socket",
"/run/tailscale/tailscaled.sock",
"http://localhost/localapi/v0/file-targets",
}
result, err := ExecuteCommand(
&tailscale,
command,
[]string{},
)
if err != nil {
return nil, err
}
var pft []apitype.FileTarget
if err := json.Unmarshal([]byte(result), &pft); err != nil {
return nil, fmt.Errorf("invalid JSON: %w", err)
}
for _, ft := range pft {
n := ft.Node
for _, a := range n.Addresses { // just add all the addresses
if _, ok := fts[a.Addr()]; !ok {
if ft.PeerAPIURL == "" {
return nil, errors.New("api url is empty")
}
fts[a.Addr()] = ft.PeerAPIURL
}
}
}
}
return fts, nil
}

View File

@@ -14,7 +14,6 @@ derp:
urls:
- https://controlplane.tailscale.com/derpmap/default
dns_config:
override_local_dns: true
base_domain: headscale.net
domains: []
magic_dns: true
@@ -29,14 +28,11 @@ ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10
listen_addr: 0.0.0.0:18080
log:
level: disabled
format: text
log_level: disabled
logtail:
enabled: false
metrics_listen_addr: 127.0.0.1:19090
oidc:
only_start_if_oidc_is_available: true
scope:
- openid
- profile
@@ -46,6 +42,7 @@ private_key_path: private.key
noise:
private_key_path: noise_private.key
server_url: http://headscale:18080
tls_client_auth_mode: relaxed
tls_letsencrypt_cache_dir: /var/www/.cache
tls_letsencrypt_challenge_type: HTTP-01
unix_socket: /var/run/headscale.sock

View File

@@ -1,5 +1,4 @@
log:
level: trace
log_level: trace
acl_policy_path: ""
db_type: sqlite3
ephemeral_node_inactivity_timeout: 30m
@@ -8,7 +7,6 @@ ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10
dns_config:
override_local_dns: true
base_domain: headscale.net
magic_dns: true
domains: []

View File

@@ -14,7 +14,6 @@ derp:
urls:
- https://controlplane.tailscale.com/derpmap/default
dns_config:
override_local_dns: true
base_domain: headscale.net
domains: []
magic_dns: true
@@ -28,14 +27,11 @@ ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10
listen_addr: 0.0.0.0:18080
log:
level: disabled
format: text
log_level: disabled
logtail:
enabled: false
metrics_listen_addr: 127.0.0.1:19090
oidc:
only_start_if_oidc_is_available: true
scope:
- openid
- profile
@@ -45,6 +41,7 @@ private_key_path: private.key
noise:
private_key_path: noise_private.key
server_url: http://headscale:18080
tls_client_auth_mode: relaxed
tls_letsencrypt_cache_dir: /var/www/.cache
tls_letsencrypt_challenge_type: HTTP-01
unix_socket: /var/run/headscale.sock

View File

@@ -1,5 +1,4 @@
log:
level: trace
log_level: trace
acl_policy_path: ""
db_type: sqlite3
ephemeral_node_inactivity_timeout: 30m
@@ -8,7 +7,6 @@ ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10
dns_config:
override_local_dns: true
base_domain: headscale.net
magic_dns: true
domains: []

View File

@@ -14,7 +14,6 @@ derp:
urls:
- https://controlplane.tailscale.com/derpmap/default
dns_config:
override_local_dns: true
base_domain: headscale.net
domains: []
magic_dns: true
@@ -29,14 +28,11 @@ ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10
listen_addr: 0.0.0.0:8080
log:
format: text
level: disabled
log_level: disabled
logtail:
enabled: false
metrics_listen_addr: 127.0.0.1:9090
oidc:
only_start_if_oidc_is_available: true
scope:
- openid
- profile
@@ -46,6 +42,7 @@ private_key_path: private.key
noise:
private_key_path: noise_private.key
server_url: http://headscale:8080
tls_client_auth_mode: relaxed
tls_letsencrypt_cache_dir: /var/www/.cache
tls_letsencrypt_challenge_type: HTTP-01
unix_socket: /var/run/headscale.sock

View File

@@ -1,5 +1,4 @@
log:
level: trace
log_level: trace
acl_policy_path: ""
db_type: sqlite3
ephemeral_node_inactivity_timeout: 30m
@@ -8,7 +7,6 @@ ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10
dns_config:
override_local_dns: true
base_domain: headscale.net
magic_dns: true
domains: []

View File

@@ -1,21 +0,0 @@
log_level: trace
acl_policy_path: ""
db_type: sqlite3
ephemeral_node_inactivity_timeout: 30m
node_update_check_interval: 10s
ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10
db_path: /tmp/integration_test_db.sqlite3
private_key_path: private.key
noise:
private_key_path: noise_private.key
listen_addr: 0.0.0.0:8443
server_url: https://headscale-oidc:8443
tls_cert_path: "/etc/headscale/tls/server.crt"
tls_key_path: "/etc/headscale/tls/server.key"
derp:
urls:
- https://controlplane.tailscale.com/derpmap/default
auto_update_enabled: true
update_frequency: 1m

View File

@@ -1,22 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIC8jCCAdqgAwIBAgIULbu+UbSTMG/LtxooLLh7BgSEyqEwDQYJKoZIhvcNAQEL
BQAwFDESMBAGA1UEAwwJaGVhZHNjYWxlMCAXDTIyMDMwNTE2NDgwM1oYDzI1MjEx
MTA0MTY0ODAzWjAUMRIwEAYDVQQDDAloZWFkc2NhbGUwggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQDqcfpToLZUF0rlNwXkkt3lbyw4Cl4TJdx36o2PKaOK
U+tze/IjRsCWeMwrcR1o9TNZcxsD+c2J48D1WATuQJlMeg+2UJXGaTGRKkkbPMy3
5m7AFf/Q16UEOgm2NYjZaQ8faRGIMYURG/6sXmNeETJvBixpBev9yKJuVXgqHNS4
NpEkNwdOCuAZXrmw0HCbiusawJOay4tFvhH14rav8Uimonl8UTNVXufMzyUOuoaQ
TGflmzYX3hIoswRnTPlIWFoqObvx2Q8H+of3uQJXy0m8I6OrIoXLNxnqYMfFls79
9SYgVc2jPsCbh5fwyRbx2Hof7sIZ1K/mNgxJRG1E3ZiLAgMBAAGjOjA4MBQGA1Ud
EQQNMAuCCWhlYWRzY2FsZTALBgNVHQ8EBAMCB4AwEwYDVR0lBAwwCgYIKwYBBQUH
AwEwDQYJKoZIhvcNAQELBQADggEBANGlVN7NCsJaKz0k0nhlRGK+tcxn2p1PXN/i
Iy+JX8ahixPC4ocRwOhrXgb390ZXLLwq08HrWYRB/Wi1VUzCp5d8dVxvrR43dJ+v
L2EOBiIKgcu2C3pWW1qRR46/EoXUU9kSH2VNBvIhNufi32kEOidoDzxtQf6qVCoF
guUt1JkAqrynv1UvR/2ZRM/WzM/oJ8qfECwrwDxyYhkqU5Z5jCWg0C6kPIBvNdzt
B0eheWS+ZxVwkePTR4e17kIafwknth3lo+orxVrq/xC+OVM1bGrt2ZyD64ZvEqQl
w6kgbzBdLScAQptWOFThwhnJsg0UbYKimZsnYmjVEuN59TJv92M=
-----END CERTIFICATE-----
(Expires on Nov 4 16:48:03 2521 GMT)

View File

@@ -1,28 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDqcfpToLZUF0rl
NwXkkt3lbyw4Cl4TJdx36o2PKaOKU+tze/IjRsCWeMwrcR1o9TNZcxsD+c2J48D1
WATuQJlMeg+2UJXGaTGRKkkbPMy35m7AFf/Q16UEOgm2NYjZaQ8faRGIMYURG/6s
XmNeETJvBixpBev9yKJuVXgqHNS4NpEkNwdOCuAZXrmw0HCbiusawJOay4tFvhH1
4rav8Uimonl8UTNVXufMzyUOuoaQTGflmzYX3hIoswRnTPlIWFoqObvx2Q8H+of3
uQJXy0m8I6OrIoXLNxnqYMfFls799SYgVc2jPsCbh5fwyRbx2Hof7sIZ1K/mNgxJ
RG1E3ZiLAgMBAAECggEBALu1Ni/u5Qy++YA8ZcN0s6UXNdhItLmv/q0kZuLQ+9et
CT8VZfFInLndTdsaXenDKLHdryunviFA8SV+q7P2lMbek+Xs735EiyMnMBFWxLIZ
FWNGOeQERGL19QCmLEOmEi2b+iWJQHlKaMWpbPXL3w11a+lKjIBNO4ALfoJ5QveZ
cGMKsJdm/mpqBvLeNeh2eAFk3Gp6sT1g80Ge8NkgyzFBNIqnut0eerM15kPTc6Qz
12JLaOXUuV3PrcB4PN4nOwrTDg88GDNOQtc1Pc9r4nOHyLfr8X7QEtj1wXSwmOuK
d6ynMnAmoxVA9wEnupLbil1bzohRzpsTpkmDruYaBEECgYEA/Z09I8D6mt2NVqIE
KyvLjBK39ijSV9r3/lvB2Ple2OOL5YQEd+yTrIFy+3zdUnDgD1zmNnXjmjvHZ9Lc
IFf2o06AF84QLNB5gLPdDQkGNFdDqUxljBrfAfE3oANmPS/B0SijMGOOOiDO2FtO
xl1nfRr78mswuRs9awoUWCdNRKUCgYEA7KaTYKIQW/FEjw9lshp74q5vbn6zoXF5
7N8VkwI+bBVNvRbM9XZ8qhfgRdu9eXs5oL/N4mSYY54I8fA//pJ0Z2vpmureMm1V
mL5WBUmSD9DIbAchoK+sRiQhVmNMBQC6cHMABA7RfXvBeGvWrm9pKCS6ZLgLjkjp
PsmAcaXQcW8CgYEA2inAxljjOwUK6FNGsrxhxIT1qtNC3kCGxE+6WSNq67gSR8Vg
8qiX//T7LEslOB3RIGYRwxd2St7RkgZZRZllmOWWWuPwFhzf6E7RAL2akLvggGov
kG4tGEagSw2hjVDfsUT73ExHtMk0Jfmlsg33UC8+PDLpHtLH6qQpDAwC8+ECgYEA
o+AqOIWhvHmT11l7O915Ip1WzvZwYADbxLsrDnVEUsZh4epTHjvh0kvcY6PqTqCV
ZIrOANNWb811Nkz/k8NJVoD08PFp0xPBbZeIq/qpachTsfMyRzq/mobUiyUR9Hjv
ooUQYr78NOApNsG+lWbTNBhS9wI4BlzZIECbcJe5g4MCgYEAndRoy8S+S0Hx/S8a
O3hzXeDmivmgWqn8NVD4AKOovpkz4PaIVVQbAQkiNfAx8/DavPvjEKAbDezJ4ECV
j7IsOWtDVI7pd6eF9fTcECwisrda8aUoiOap8AQb48153Vx+g2N4Vy3uH0xJs4cz
TDALZPOBg8VlV+HEFDP43sp9Bf0=
-----END PRIVATE KEY-----

View File

@@ -26,22 +26,15 @@ const (
)
ErrCouldNotConvertMachineInterface = Error("failed to convert machine interface")
ErrHostnameTooLong = Error("Hostname too long")
ErrDifferentRegisteredNamespace = Error(
"machine was previously registered with a different namespace",
)
MachineGivenNameHashLength = 8
MachineGivenNameTrimSize = 2
ErrDifferentRegisteredNamespace = Error("machine was previously registered with a different namespace")
MachineGivenNameHashLength = 8
MachineGivenNameTrimSize = 2
)
const (
maxHostnameLength = 255
)
var (
ExitRouteV4 = netip.MustParsePrefix("0.0.0.0/0")
ExitRouteV6 = netip.MustParsePrefix("::/0")
)
// Machine is a Headscale client.
type Machine struct {
ID uint64 `gorm:"primary_key"`
@@ -332,15 +325,6 @@ func (h *Headscale) ListMachines() ([]Machine, error) {
return machines, nil
}
func (h *Headscale) ListMachinesByGivenName(givenName string) ([]Machine, error) {
machines := []Machine{}
if err := h.db.Preload("AuthKey").Preload("AuthKey.Namespace").Preload("Namespace").Find(&machines).Where("given_name = ?", givenName).Error; err != nil {
return nil, err
}
return machines, nil
}
// GetMachine finds a Machine by name and namespace and returns the Machine struct.
func (h *Headscale) GetMachine(namespace string, name string) (*Machine, error) {
machines, err := h.ListMachinesInNamespace(namespace)
@@ -357,22 +341,6 @@ func (h *Headscale) GetMachine(namespace string, name string) (*Machine, error)
return nil, ErrMachineNotFound
}
// GetMachineByGivenName finds a Machine by given name and namespace and returns the Machine struct.
func (h *Headscale) GetMachineByGivenName(namespace string, givenName string) (*Machine, error) {
machines, err := h.ListMachinesInNamespace(namespace)
if err != nil {
return nil, err
}
for _, m := range machines {
if m.GivenName == givenName {
return &m, nil
}
}
return nil, ErrMachineNotFound
}
// GetMachineByID finds a Machine by ID and returns the Machine struct.
func (h *Headscale) GetMachineByID(id uint64) (*Machine, error) {
m := Machine{}
@@ -598,11 +566,12 @@ func (machines MachinesP) String() string {
func (machines Machines) toNodes(
baseDomain string,
dnsConfig *tailcfg.DNSConfig,
includeRoutes bool,
) ([]*tailcfg.Node, error) {
nodes := make([]*tailcfg.Node, len(machines))
for index, machine := range machines {
node, err := machine.toNode(baseDomain, dnsConfig)
node, err := machine.toNode(baseDomain, dnsConfig, includeRoutes)
if err != nil {
return nil, err
}
@@ -618,6 +587,7 @@ func (machines Machines) toNodes(
func (machine Machine) toNode(
baseDomain string,
dnsConfig *tailcfg.DNSConfig,
includeRoutes bool,
) (*tailcfg.Node, error) {
var nodeKey key.NodePublic
err := nodeKey.UnmarshalText([]byte(NodePublicKeyEnsurePrefix(machine.NodeKey)))
@@ -663,22 +633,10 @@ func (machine Machine) toNode(
[]netip.Prefix{},
addrs...) // we append the node own IP, as it is required by the clients
allowedIPs = append(allowedIPs, machine.EnabledRoutes...)
// TODO(kradalby): This is kind of a hack where we say that
// all the announced routes (except exit), is presented as primary
// routes. This might be problematic if two nodes expose the same route.
// This was added to address an issue where subnet routers stopped working
// when we only populated AllowedIPs.
primaryRoutes := []netip.Prefix{}
if len(machine.EnabledRoutes) > 0 {
for _, route := range machine.EnabledRoutes {
if route == ExitRouteV4 || route == ExitRouteV6 {
continue
}
primaryRoutes = append(primaryRoutes, route)
}
// TODO(kradalby): Needs investigation, We probably dont need this condition
// now that we dont have shared nodes
if includeRoutes {
allowedIPs = append(allowedIPs, machine.EnabledRoutes...)
}
var derp string
@@ -725,17 +683,16 @@ func (machine Machine) toNode(
StableID: tailcfg.StableNodeID(
strconv.FormatUint(machine.ID, Base10),
), // in headscale, unlike tailcontrol server, IDs are permanent
Name: hostname,
User: tailcfg.UserID(machine.NamespaceID),
Key: nodeKey,
KeyExpiry: keyExpiry,
Machine: machineKey,
DiscoKey: discoKey,
Addresses: addrs,
AllowedIPs: allowedIPs,
PrimaryRoutes: primaryRoutes,
Endpoints: machine.Endpoints,
DERP: derp,
Name: hostname,
User: tailcfg.UserID(machine.NamespaceID),
Key: nodeKey,
KeyExpiry: keyExpiry,
Machine: machineKey,
DiscoKey: discoKey,
Addresses: addrs,
AllowedIPs: allowedIPs,
Endpoints: machine.Endpoints,
DERP: derp,
Online: &online,
Hostinfo: hostInfo.View(),
@@ -744,11 +701,7 @@ func (machine Machine) toNode(
KeepAlive: true,
MachineAuthorized: !machine.isExpired(),
Capabilities: []string{
tailcfg.CapabilityFileSharing,
tailcfg.CapabilityAdmin,
tailcfg.CapabilitySSH,
},
Capabilities: []string{tailcfg.CapabilityFileSharing},
}
return &node, nil
@@ -843,13 +796,7 @@ func (h *Headscale) RegisterMachineFromAuthCallback(
namespaceName string,
registrationMethod string,
) (*Machine, error) {
nodeKey := key.NodePublic{}
err := nodeKey.UnmarshalText([]byte(nodeKeyStr))
if err != nil {
return nil, err
}
if machineInterface, ok := h.registrationCache.Get(NodePublicKeyStripPrefix(nodeKey)); ok {
if machineInterface, ok := h.registrationCache.Get(nodeKeyStr); ok {
if registrationMachine, ok := machineInterface.(Machine); ok {
namespace, err := h.GetNamespace(namespaceName)
if err != nil {
@@ -860,8 +807,7 @@ func (h *Headscale) RegisterMachineFromAuthCallback(
}
// Registration of expired machine with different namespace
if registrationMachine.ID != 0 &&
registrationMachine.NamespaceID != namespace.ID {
if registrationMachine.ID != 0 && registrationMachine.NamespaceID != namespace.ID {
return nil, ErrDifferentRegisteredNamespace
}
@@ -984,64 +930,6 @@ func (h *Headscale) EnableRoutes(machine *Machine, routeStrs ...string) error {
return nil
}
// Enabled any routes advertised by a machine that match the ACL autoApprovers policy.
func (h *Headscale) EnableAutoApprovedRoutes(machine *Machine) {
if len(machine.IPAddresses) == 0 {
return // This machine has no IPAddresses, so can't possibly match any autoApprovers ACLs
}
approvedRoutes := make([]netip.Prefix, 0, len(machine.HostInfo.RoutableIPs))
thisMachine := []Machine{*machine}
for _, advertisedRoute := range machine.HostInfo.RoutableIPs {
if contains(machine.EnabledRoutes, advertisedRoute) {
continue // Skip routes that are already enabled for the node
}
routeApprovers, err := h.aclPolicy.AutoApprovers.GetRouteApprovers(
advertisedRoute,
)
if err != nil {
log.Err(err).
Str("advertisedRoute", advertisedRoute.String()).
Uint64("machineId", machine.ID).
Msg("Failed to resolve autoApprovers for advertised route")
return
}
for _, approvedAlias := range routeApprovers {
if approvedAlias == machine.Namespace.Name {
approvedRoutes = append(approvedRoutes, advertisedRoute)
} else {
approvedIps, err := expandAlias(thisMachine, *h.aclPolicy, approvedAlias, h.cfg.OIDC.StripEmaildomain)
if err != nil {
log.Err(err).
Str("alias", approvedAlias).
Msg("Failed to expand alias when processing autoApprovers policy")
return
}
// approvedIPs should contain all of machine's IPs if it matches the rule, so check for first
if contains(approvedIps, machine.IPAddresses[0].String()) {
approvedRoutes = append(approvedRoutes, advertisedRoute)
}
}
}
}
for _, approvedRoute := range approvedRoutes {
if !contains(machine.EnabledRoutes, approvedRoute) {
log.Info().
Str("route", approvedRoute.String()).
Uint64("client", machine.ID).
Msg("Enabling autoApproved route for client")
machine.EnabledRoutes = append(machine.EnabledRoutes, approvedRoute)
}
}
}
func (machine *Machine) RoutesToProto() *v1.Routes {
availableRoutes := machine.GetAdvertisedRoutes()
@@ -1053,7 +941,11 @@ func (machine *Machine) RoutesToProto() *v1.Routes {
}
}
func (h *Headscale) generateGivenName(suppliedName string, randomSuffix bool) (string, error) {
func (h *Headscale) GenerateGivenName(suppliedName string) (string, error) {
// If a hostname is or will be longer than 63 chars after adding the hash,
// it needs to be trimmed.
trimmedHostnameLength := labelHostnameLength - MachineGivenNameHashLength - MachineGivenNameTrimSize
normalizedHostname, err := NormalizeToFQDNRules(
suppliedName,
h.cfg.OIDC.StripEmaildomain,
@@ -1062,46 +954,18 @@ func (h *Headscale) generateGivenName(suppliedName string, randomSuffix bool) (s
return "", err
}
if randomSuffix {
// Trim if a hostname will be longer than 63 chars after adding the hash.
trimmedHostnameLength := labelHostnameLength - MachineGivenNameHashLength - MachineGivenNameTrimSize
if len(normalizedHostname) > trimmedHostnameLength {
normalizedHostname = normalizedHostname[:trimmedHostnameLength]
}
postfix, err := GenerateRandomStringDNSSafe(MachineGivenNameHashLength)
if err != nil {
return "", err
}
suffix, err := GenerateRandomStringDNSSafe(MachineGivenNameHashLength)
if err != nil {
return "", err
}
normalizedHostname += "-" + suffix
// Verify that that the new unique name is shorter than the maximum allowed
// DNS segment.
if len(normalizedHostname) <= trimmedHostnameLength {
normalizedHostname = fmt.Sprintf("%s-%s", normalizedHostname, postfix)
} else {
normalizedHostname = fmt.Sprintf("%s-%s", normalizedHostname[:trimmedHostnameLength], postfix)
}
return normalizedHostname, nil
}
func (h *Headscale) GenerateGivenName(machineKey string, suppliedName string) (string, error) {
givenName, err := h.generateGivenName(suppliedName, false)
if err != nil {
return "", err
}
// Tailscale rules (may differ) https://tailscale.com/kb/1098/machine-names/
machines, err := h.ListMachinesByGivenName(givenName)
if err != nil {
return "", err
}
for _, machine := range machines {
if machine.MachineKey != machineKey && machine.GivenName == givenName {
postfixedName, err := h.generateGivenName(suppliedName, true)
if err != nil {
return "", err
}
givenName = postfixedName
}
}
return givenName, nil
}

View File

@@ -4,8 +4,8 @@ import (
"fmt"
"net/netip"
"reflect"
"regexp"
"strconv"
"strings"
"testing"
"time"
@@ -18,7 +18,7 @@ func (s *Suite) TestGetMachine(c *check.C) {
namespace, err := app.CreateNamespace("test")
c.Assert(err, check.IsNil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = app.GetMachine("test", "testmachine")
@@ -44,7 +44,7 @@ func (s *Suite) TestGetMachineByID(c *check.C) {
namespace, err := app.CreateNamespace("test")
c.Assert(err, check.IsNil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = app.GetMachineByID(0)
@@ -70,7 +70,7 @@ func (s *Suite) TestGetMachineByNodeKey(c *check.C) {
namespace, err := app.CreateNamespace("test")
c.Assert(err, check.IsNil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = app.GetMachineByID(0)
@@ -98,7 +98,7 @@ func (s *Suite) TestGetMachineByAnyNodeKey(c *check.C) {
namespace, err := app.CreateNamespace("test")
c.Assert(err, check.IsNil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = app.GetMachineByID(0)
@@ -171,7 +171,7 @@ func (s *Suite) TestListPeers(c *check.C) {
namespace, err := app.CreateNamespace("test")
c.Assert(err, check.IsNil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = app.GetMachineByID(0)
@@ -214,7 +214,7 @@ func (s *Suite) TestGetACLFilteredPeers(c *check.C) {
for _, name := range []string{"test", "admin"} {
namespace, err := app.CreateNamespace(name)
c.Assert(err, check.IsNil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
c.Assert(err, check.IsNil)
stor = append(stor, base{namespace, pak})
}
@@ -294,7 +294,7 @@ func (s *Suite) TestExpireMachine(c *check.C) {
namespace, err := app.CreateNamespace("test")
c.Assert(err, check.IsNil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = app.GetMachine("test", "testmachine")
@@ -346,55 +346,11 @@ func (s *Suite) TestSerdeAddressStrignSlice(c *check.C) {
}
}
func (s *Suite) TestGenerateGivenName(c *check.C) {
namespace1, err := app.CreateNamespace("namespace-1")
c.Assert(err, check.IsNil)
pak, err := app.CreatePreAuthKey(namespace1.Name, false, false, nil, nil)
c.Assert(err, check.IsNil)
_, err = app.GetMachine("namespace-1", "testmachine")
c.Assert(err, check.NotNil)
machine := &Machine{
ID: 0,
MachineKey: "machine-key-1",
NodeKey: "node-key-1",
DiscoKey: "disco-key-1",
Hostname: "hostname-1",
GivenName: "hostname-1",
NamespaceID: namespace1.ID,
RegisterMethod: RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID),
}
app.db.Save(machine)
givenName, err := app.GenerateGivenName("machine-key-2", "hostname-2")
comment := check.Commentf("Same namespace, unique machines, unique hostnames, no conflict")
c.Assert(err, check.IsNil, comment)
c.Assert(givenName, check.Equals, "hostname-2", comment)
givenName, err = app.GenerateGivenName("machine-key-1", "hostname-1")
comment = check.Commentf("Same namespace, same machine, same hostname, no conflict")
c.Assert(err, check.IsNil, comment)
c.Assert(givenName, check.Equals, "hostname-1", comment)
givenName, err = app.GenerateGivenName("machine-key-2", "hostname-1")
comment = check.Commentf("Same namespace, unique machines, same hostname, conflict")
c.Assert(err, check.IsNil, comment)
c.Assert(givenName, check.Matches, fmt.Sprintf("^hostname-1-[a-z0-9]{%d}$", MachineGivenNameHashLength), comment)
givenName, err = app.GenerateGivenName("machine-key-2", "hostname-1")
comment = check.Commentf("Unique namespaces, unique machines, same hostname, conflict")
c.Assert(err, check.IsNil, comment)
c.Assert(givenName, check.Matches, fmt.Sprintf("^hostname-1-[a-z0-9]{%d}$", MachineGivenNameHashLength), comment)
}
func (s *Suite) TestSetTags(c *check.C) {
namespace, err := app.CreateNamespace("test")
c.Assert(err, check.IsNil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = app.GetMachine("test", "testmachine")
@@ -584,6 +540,7 @@ func Test_getTags(t *testing.T) {
}
}
// nolint
func Test_getFilteredByACLPeers(t *testing.T) {
type args struct {
machines []Machine
@@ -961,16 +918,15 @@ func Test_getFilteredByACLPeers(t *testing.T) {
}
}
func TestHeadscale_generateGivenName(t *testing.T) {
func TestHeadscale_GenerateGivenName(t *testing.T) {
type args struct {
suppliedName string
randomSuffix bool
}
tests := []struct {
name string
h *Headscale
args args
want *regexp.Regexp
want string
wantErr bool
}{
{
@@ -984,9 +940,8 @@ func TestHeadscale_generateGivenName(t *testing.T) {
},
args: args{
suppliedName: "testmachine",
randomSuffix: false,
},
want: regexp.MustCompile("^testmachine$"),
want: "testmachine",
wantErr: false,
},
{
@@ -1000,9 +955,23 @@ func TestHeadscale_generateGivenName(t *testing.T) {
},
args: args{
suppliedName: "testmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaachine",
randomSuffix: false,
},
want: regexp.MustCompile("^testmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaachine$"),
want: "testmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaachine",
wantErr: false,
},
{
name: "machine name with 60 chars",
h: &Headscale{
cfg: &Config{
OIDC: OIDCConfig{
StripEmaildomain: true,
},
},
},
args: args{
suppliedName: "testmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaachine1234567",
},
want: "testmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaachine",
wantErr: false,
},
{
@@ -1015,10 +984,9 @@ func TestHeadscale_generateGivenName(t *testing.T) {
},
},
args: args{
suppliedName: "machineeee12345678901234567890123456789012345678901234567890123",
randomSuffix: false,
suppliedName: "testmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaachine1234567890",
},
want: regexp.MustCompile("^machineeee12345678901234567890123456789012345678901234567890123$"),
want: "testmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
wantErr: false,
},
{
@@ -1031,11 +999,10 @@ func TestHeadscale_generateGivenName(t *testing.T) {
},
},
args: args{
suppliedName: "machineeee123456789012345678901234567890123456789012345678901234",
randomSuffix: false,
suppliedName: "testmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaachine1234567891",
},
want: nil,
wantErr: true,
want: "testmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
wantErr: false,
},
{
name: "machine name with 73 chars",
@@ -1047,48 +1014,15 @@ func TestHeadscale_generateGivenName(t *testing.T) {
},
},
args: args{
suppliedName: "machineeee123456789012345678901234567890123456789012345678901234567890123",
randomSuffix: false,
suppliedName: "testmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaachine12345678901234567890",
},
want: nil,
want: "",
wantErr: true,
},
{
name: "machine name with random suffix",
h: &Headscale{
cfg: &Config{
OIDC: OIDCConfig{
StripEmaildomain: true,
},
},
},
args: args{
suppliedName: "test",
randomSuffix: true,
},
want: regexp.MustCompile(fmt.Sprintf("^test-[a-z0-9]{%d}$", MachineGivenNameHashLength)),
wantErr: false,
},
{
name: "machine name with 63 chars with random suffix",
h: &Headscale{
cfg: &Config{
OIDC: OIDCConfig{
StripEmaildomain: true,
},
},
},
args: args{
suppliedName: "machineeee12345678901234567890123456789012345678901234567890123",
randomSuffix: true,
},
want: regexp.MustCompile(fmt.Sprintf("^machineeee1234567890123456789012345678901234567890123-[a-z0-9]{%d}$", MachineGivenNameHashLength)),
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := tt.h.generateGivenName(tt.args.suppliedName, tt.args.randomSuffix)
got, err := tt.h.GenerateGivenName(tt.args.suppliedName)
if (err != nil) != tt.wantErr {
t.Errorf(
"Headscale.GenerateGivenName() error = %v, wantErr %v",
@@ -1099,9 +1033,9 @@ func TestHeadscale_generateGivenName(t *testing.T) {
return
}
if tt.want != nil && !tt.want.MatchString(got) {
if tt.want != "" && strings.Contains(tt.want, got) {
t.Errorf(
"Headscale.GenerateGivenName() = %v, does not match %v",
"Headscale.GenerateGivenName() = %v, is not a substring of %v",
tt.want,
got,
)
@@ -1117,45 +1051,3 @@ func TestHeadscale_generateGivenName(t *testing.T) {
})
}
}
func (s *Suite) TestAutoApproveRoutes(c *check.C) {
err := app.LoadACLPolicy("./tests/acls/acl_policy_autoapprovers.hujson")
c.Assert(err, check.IsNil)
namespace, err := app.CreateNamespace("test")
c.Assert(err, check.IsNil)
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
c.Assert(err, check.IsNil)
nodeKey := key.NewNode()
defaultRoute := netip.MustParsePrefix("0.0.0.0/0")
route1 := netip.MustParsePrefix("10.10.0.0/16")
// Check if a subprefix of an autoapproved route is approved
route2 := netip.MustParsePrefix("10.11.0.0/24")
machine := Machine{
ID: 0,
MachineKey: "foo",
NodeKey: NodePublicKeyStripPrefix(nodeKey.Public()),
DiscoKey: "faa",
Hostname: "test",
NamespaceID: namespace.ID,
RegisterMethod: RegisterMethodAuthKey,
AuthKeyID: uint(pak.ID),
HostInfo: HostInfo{
RequestTags: []string{"tag:exit"},
RoutableIPs: []netip.Prefix{defaultRoute, route1, route2},
},
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
}
app.db.Save(&machine)
machine0ByID, err := app.GetMachineByID(0)
c.Assert(err, check.IsNil)
app.EnableAutoApprovedRoutes(machine0ByID)
c.Assert(machine0ByID.GetEnabledRoutes(), check.HasLen, 3)
}

View File

@@ -148,6 +148,21 @@ func (h *Headscale) ListNamespaces() ([]Namespace, error) {
return namespaces, nil
}
func (h *Headscale) ListNamespacesStr() ([]string, error) {
namespaces, err := h.ListNamespaces()
if err != nil {
return []string{}, err
}
namespaceStrs := make([]string, len(namespaces))
for index, namespace := range namespaces {
namespaceStrs[index] = namespace.Name
}
return namespaceStrs, nil
}
// ListMachinesInNamespace gets all the nodes in a given namespace.
func (h *Headscale) ListMachinesInNamespace(name string) ([]Machine, error) {
err := CheckForFQDNRules(name)
@@ -211,10 +226,7 @@ func (n *Namespace) toLogin() *tailcfg.Login {
return &login
}
func (h *Headscale) getMapResponseUserProfiles(
machine Machine,
peers Machines,
) []tailcfg.UserProfile {
func getMapResponseUserProfiles(machine Machine, peers Machines) []tailcfg.UserProfile {
namespaceMap := make(map[string]Namespace)
namespaceMap[machine.Namespace.Name] = machine.Namespace
for _, peer := range peers {
@@ -223,17 +235,11 @@ func (h *Headscale) getMapResponseUserProfiles(
profiles := []tailcfg.UserProfile{}
for _, namespace := range namespaceMap {
displayName := namespace.Name
if h.cfg.BaseDomain != "" {
displayName = fmt.Sprintf("%s@%s", namespace.Name, h.cfg.BaseDomain)
}
profiles = append(profiles,
tailcfg.UserProfile{
ID: tailcfg.UserID(namespace.ID),
LoginName: namespace.Name,
DisplayName: displayName,
DisplayName: namespace.Name,
})
}

Some files were not shown because too many files have changed in this diff Show More