Compare commits

..

1 Commits

Author SHA1 Message Date
Juan Font
422d124c7e Do not explicitly set the protocols when ommited in ACL 2022-12-05 19:12:33 +00:00
303 changed files with 21358 additions and 35535 deletions

3
.github/FUNDING.yml vendored
View File

@@ -1,3 +0,0 @@
# These are supported funding model platforms
ko_fi: headscale

View File

@@ -6,24 +6,19 @@ labels: ["bug"]
assignees: ""
---
<!--
Before posting a bug report, discuss the behaviour you are expecting with the Discord community
to make sure that it is truly a bug.
The issue tracker is not the place to ask for support or how to set up Headscale.
<!-- Headscale is a multinational community across the globe. Our common language is English. Please consider raising the bug report in this language. -->
Bug reports without the sufficient information will be closed.
Headscale is a multinational community across the globe. Our language is English.
All bug reports needs to be in English.
-->
## Bug description
**Bug description**
<!-- A clear and concise description of what the bug is. Describe the expected bahavior
and how it is currently different. If you are unsure if it is a bug, consider discussing
it on our Discord server first. -->
## Environment
**To Reproduce**
<!-- Steps to reproduce the behavior. -->
**Context info**
<!-- Please add relevant information about your system. For example:
- Version of headscale used
@@ -33,33 +28,3 @@ All bug reports needs to be in English.
- The relevant config parameters you used
- Log output
-->
- OS:
- Headscale version:
- Tailscale version:
<!--
We do not support running Headscale in a container nor behind a (reverse) proxy.
If either of these are true for your environment, ask the community in Discord
instead of filing a bug report.
-->
- [ ] Headscale is behind a (reverse) proxy
- [ ] Headscale runs in a container
## To Reproduce
<!-- Steps to reproduce the behavior. -->
## Logs and attachments
<!-- Please attach files with:
- Client netmap dump (see below)
- ACL configuration
- Headscale configuration
Dump the netmap of tailscale clients:
`tailscale debug netmap > DESCRIPTIVE_NAME.json`
Please provide information describing the netmap, which client, which headscale version etc.
-->

View File

@@ -6,21 +6,12 @@ labels: ["enhancement"]
assignees: ""
---
<!--
We typically have a clear roadmap for what we want to improve and reserve the right
to close feature requests that does not fit in the roadmap, or fit with the scope
of the project, or we actually want to implement ourselves.
<!-- Headscale is a multinational community across the globe. Our common language is English. Please consider raising the feature request in this language. -->
Headscale is a multinational community across the globe. Our language is English.
All bug reports needs to be in English.
-->
## Why
<!-- Include the reason, why you would need the feature. E.g. what problem
does it solve? Or which workflow is currently frustrating and will be improved by
this? -->
## Description
**Feature request**
<!-- A clear and precise description of what new or changed feature you want. -->
<!-- Please include the reason, why you would need the feature. E.g. what problem
does it solve? Or which workflow is currently frustrating and will be improved by
this? -->

30
.github/ISSUE_TEMPLATE/other_issue.md vendored Normal file
View File

@@ -0,0 +1,30 @@
---
name: "Other issue"
about: "Report a different issue"
title: ""
labels: ["bug"]
assignees: ""
---
<!-- Headscale is a multinational community across the globe. Our common language is English. Please consider raising the issue in this language. -->
<!-- If you have a question, please consider using our Discord for asking questions -->
**Issue description**
<!-- Please add your issue description. -->
**To Reproduce**
<!-- Steps to reproduce the behavior. -->
**Context info**
<!-- Please add relevant information about your system. For example:
- Version of headscale used
- Version of tailscale client
- OS (e.g. Linux, Mac, Cygwin, WSL, etc.) and version
- Kernel version
- The relevant config parameters you used
- Log output
-->

View File

@@ -1,15 +1,3 @@
<!--
Headscale is "Open Source, acknowledged contribution", this means that any
contribution will have to be discussed with the Maintainers before being submitted.
This model has been chosen to reduce the risk of burnout by limiting the
maintenance overhead of reviewing and validating third-party code.
Headscale is open to code contributions for bug fixes without discussion.
If you find mistakes in the documentation, please submit a fix to the documentation.
-->
<!-- Please tick if the following things apply. You… -->
- [ ] read the [CONTRIBUTING guidelines](README.md#contributing)

26
.github/renovate.json vendored
View File

@@ -6,27 +6,31 @@
"onboarding": false,
"extends": ["config:base", ":rebaseStalePrs"],
"ignorePresets": [":prHourlyLimit2"],
"enabledManagers": ["dockerfile", "gomod", "github-actions", "regex"],
"enabledManagers": ["dockerfile", "gomod", "github-actions","regex" ],
"includeForks": true,
"repositories": ["juanfont/headscale"],
"platform": "github",
"packageRules": [
{
"matchDatasources": ["go"],
"groupName": "Go modules",
"groupSlug": "gomod",
"separateMajorMinor": false
"matchDatasources": ["go"],
"groupName": "Go modules",
"groupSlug": "gomod",
"separateMajorMinor": false
},
{
"matchDatasources": ["docker"],
"groupName": "Dockerfiles",
"groupSlug": "dockerfiles"
}
"matchDatasources": ["docker"],
"groupName": "Dockerfiles",
"groupSlug": "dockerfiles"
}
],
"regexManagers": [
{
"fileMatch": [".github/workflows/.*.yml$"],
"matchStrings": ["\\s*go-version:\\s*\"?(?<currentValue>.*?)\"?\\n"],
"fileMatch": [
".github/workflows/.*.yml$"
],
"matchStrings": [
"\\s*go-version:\\s*\"?(?<currentValue>.*?)\"?\\n"
],
"datasourceTemplate": "golang-version",
"depNameTemplate": "actions/go-version"
}

View File

@@ -8,14 +8,9 @@ on:
branches:
- main
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build:
runs-on: ubuntu-latest
permissions: write-all
steps:
- uses: actions/checkout@v3
@@ -33,40 +28,14 @@ jobs:
integration_test/
config-example.yaml
- uses: DeterminateSystems/nix-installer-action@main
if: steps.changed-files.outputs.any_changed == 'true'
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run build
id: build
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix build |& tee build-result
BUILD_STATUS="${PIPESTATUS[0]}"
run: nix build
OLD_HASH=$(cat build-result | grep specified: | awk -F ':' '{print $2}' | sed 's/ //g')
NEW_HASH=$(cat build-result | grep got: | awk -F ':' '{print $2}' | sed 's/ //g')
echo "OLD_HASH=$OLD_HASH" >> $GITHUB_OUTPUT
echo "NEW_HASH=$NEW_HASH" >> $GITHUB_OUTPUT
exit $BUILD_STATUS
- name: Nix gosum diverging
uses: actions/github-script@v6
if: failure() && steps.build.outcome == 'failure'
with:
github-token: ${{secrets.GITHUB_TOKEN}}
script: |
github.rest.pulls.createReviewComment({
pull_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: 'Nix build failed with wrong gosum, please update "vendorSha256" (${{ steps.build.outputs.OLD_HASH }}) for the "headscale" package in flake.nix with the new SHA: ${{ steps.build.outputs.NEW_HASH }}'
})
- uses: actions/upload-artifact@v3
- uses: actions/upload-artifact@v2
if: steps.changed-files.outputs.any_changed == 'true'
with:
name: headscale-linux

View File

@@ -1,45 +0,0 @@
name: Build documentation
on:
push:
branches:
- main
workflow_dispatch:
permissions:
contents: read
pages: write
id-token: write
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Install python
uses: actions/setup-python@v4
with:
python-version: 3.x
- name: Setup cache
uses: actions/cache@v2
with:
key: ${{ github.ref }}
path: .cache
- name: Setup dependencies
run: pip install -r docs/requirements.txt
- name: Build docs
run: mkdocs build --strict
- name: Upload artifact
uses: actions/upload-pages-artifact@v1
with:
path: ./site
deploy:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
needs: build
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v1

View File

@@ -3,10 +3,6 @@ name: Lint
on: [push, pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
golangci-lint:
runs-on: ubuntu-latest
@@ -30,7 +26,7 @@ jobs:
if: steps.changed-files.outputs.any_changed == 'true'
uses: golangci/golangci-lint-action@v2
with:
version: v1.51.2
version: v1.49.0
# Only block PRs on new problems.
# If this is not enabled, we will end up having PRs
@@ -63,7 +59,7 @@ jobs:
- name: Prettify code
if: steps.changed-files.outputs.any_changed == 'true'
uses: creyD/prettier_action@v4.3
uses: creyD/prettier_action@v4.0
with:
prettier_options: >-
--check **/*.{ts,js,md,yaml,yml,sass,css,scss,html}

View File

@@ -16,23 +16,138 @@ jobs:
with:
fetch-depth: 0
- uses: cachix/install-nix-action@v16
- name: Run goreleaser
run: nix develop --command -- goreleaser release --rm-dist
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
docker-release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Set up QEMU for multiple platforms
uses: docker/setup-qemu-action@master
with:
platforms: arm64,amd64
- name: Cache Docker layers
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Docker meta
id: meta
uses: docker/metadata-action@v3
with:
# list of Docker images to use as base name for tags
images: |
${{ secrets.DOCKERHUB_USERNAME }}/headscale
ghcr.io/${{ github.repository_owner }}/headscale
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=sha
type=raw,value=develop
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GHCR
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push
id: docker_build
uses: docker/build-push-action@v2
with:
push: true
context: .
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/arm64
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache-new
build-args: |
VERSION=${{ steps.meta.outputs.version }}
- name: Prepare cache for next build
run: |
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- name: Run goreleaser
run: nix develop --command -- goreleaser release --clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
docker-debug-release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Set up QEMU for multiple platforms
uses: docker/setup-qemu-action@master
with:
platforms: arm64,amd64
- name: Cache Docker layers
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache-debug
key: ${{ runner.os }}-buildx-debug-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-debug-
- name: Docker meta
id: meta-debug
uses: docker/metadata-action@v3
with:
# list of Docker images to use as base name for tags
images: |
${{ secrets.DOCKERHUB_USERNAME }}/headscale
ghcr.io/${{ github.repository_owner }}/headscale
flavor: |
suffix=-debug,onlatest=true
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=sha
type=raw,value=develop
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GHCR
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push
id: docker_build
uses: docker/build-push-action@v2
with:
push: true
context: .
file: Dockerfile.debug
tags: ${{ steps.meta-debug.outputs.tags }}
labels: ${{ steps.meta-debug.outputs.labels }}
platforms: linux/amd64,linux/arm64
cache-from: type=local,src=/tmp/.buildx-cache-debug
cache-to: type=local,dest=/tmp/.buildx-cache-debug-new
build-args: |
VERSION=${{ steps.meta-debug.outputs.version }}
- name: Prepare cache for next build
run: |
rm -rf /tmp/.buildx-cache-debug
mv /tmp/.buildx-cache-debug-new /tmp/.buildx-cache-debug

View File

@@ -1,22 +0,0 @@
name: Close inactive issues
on:
schedule:
- cron: "30 1 * * *"
jobs:
close-issues:
runs-on: ubuntu-latest
permissions:
issues: write
pull-requests: write
steps:
- uses: actions/stale@v5
with:
days-before-issue-stale: 90
days-before-issue-close: 7
stale-issue-label: "stale"
stale-issue-message: "This issue is stale because it has been open for 90 days with no activity."
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
days-before-pr-stale: -1
days-before-pr-close: -1
repo-token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -0,0 +1,35 @@
name: Integration Test CLI
on: [pull_request]
jobs:
integration-test-cli:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Set Swap Space
uses: pierotofy/set-swap-space@master
with:
swap-size-gb: 10
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run CLI integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: nix develop --command -- make test_integration_cli

View File

@@ -0,0 +1,35 @@
name: Integration Test DERP
on: [pull_request]
jobs:
integration-test-derp:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Set Swap Space
uses: pierotofy/set-swap-space@master
with:
swap-size-gb: 10
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run Embedded DERP server integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: nix develop --command -- make test_integration_derp

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestACLAllowStarDst
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestACLAllowStarDst:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestACLAllowStarDst
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestACLAllowStarDst$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestACLAllowUser80Dst
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestACLAllowUser80Dst:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestACLAllowUser80Dst
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestACLAllowUser80Dst$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestACLAllowUserDst
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestACLAllowUserDst:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestACLAllowUserDst
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestACLAllowUserDst$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestACLDenyAllPort80
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestACLDenyAllPort80:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestACLDenyAllPort80
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestACLDenyAllPort80$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestACLDevice1CanAccessDevice2
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestACLDevice1CanAccessDevice2:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestACLDevice1CanAccessDevice2
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestACLDevice1CanAccessDevice2$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestACLHostsInNetMapTable
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestACLHostsInNetMapTable:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestACLHostsInNetMapTable
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestACLHostsInNetMapTable$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestACLNamedHostsCanReach
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestACLNamedHostsCanReach:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestACLNamedHostsCanReach
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestACLNamedHostsCanReach$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestACLNamedHostsCanReachBySubnet
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestACLNamedHostsCanReachBySubnet:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestACLNamedHostsCanReachBySubnet
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestACLNamedHostsCanReachBySubnet$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestApiKeyCommand
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestApiKeyCommand:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestApiKeyCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestApiKeyCommand$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestAuthKeyLogoutAndRelogin
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestAuthKeyLogoutAndRelogin:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestAuthKeyLogoutAndRelogin
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestAuthKeyLogoutAndRelogin$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestAuthWebFlowAuthenticationPingAll
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestAuthWebFlowAuthenticationPingAll:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestAuthWebFlowAuthenticationPingAll
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestAuthWebFlowAuthenticationPingAll$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestAuthWebFlowLogoutAndRelogin
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestAuthWebFlowLogoutAndRelogin:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestAuthWebFlowLogoutAndRelogin
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestAuthWebFlowLogoutAndRelogin$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestCreateTailscale
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestCreateTailscale:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestCreateTailscale
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestCreateTailscale$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestDERPServerScenario
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestDERPServerScenario:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestDERPServerScenario
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestDERPServerScenario$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestEnableDisableAutoApprovedRoute
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestEnableDisableAutoApprovedRoute:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestEnableDisableAutoApprovedRoute
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestEnableDisableAutoApprovedRoute$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestEnablingRoutes
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestEnablingRoutes:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestEnablingRoutes
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestEnablingRoutes$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestEphemeral
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestEphemeral:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestEphemeral
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestEphemeral$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestExpireNode
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestExpireNode:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestExpireNode
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestExpireNode$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestHASubnetRouterFailover
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestHASubnetRouterFailover:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestHASubnetRouterFailover
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestHASubnetRouterFailover$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestHeadscale
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestHeadscale:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestHeadscale
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestHeadscale$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestNodeAdvertiseTagNoACLCommand
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestNodeAdvertiseTagNoACLCommand:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestNodeAdvertiseTagNoACLCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestNodeAdvertiseTagNoACLCommand$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestNodeAdvertiseTagWithACLCommand
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestNodeAdvertiseTagWithACLCommand:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestNodeAdvertiseTagWithACLCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestNodeAdvertiseTagWithACLCommand$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestNodeCommand
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestNodeCommand:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestNodeCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestNodeCommand$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestNodeExpireCommand
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestNodeExpireCommand:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestNodeExpireCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestNodeExpireCommand$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestNodeMoveCommand
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestNodeMoveCommand:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestNodeMoveCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestNodeMoveCommand$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestNodeOnlineLastSeenStatus
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestNodeOnlineLastSeenStatus:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestNodeOnlineLastSeenStatus
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestNodeOnlineLastSeenStatus$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestNodeRenameCommand
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestNodeRenameCommand:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestNodeRenameCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestNodeRenameCommand$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestNodeTagCommand
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestNodeTagCommand:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestNodeTagCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestNodeTagCommand$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestOIDCAuthenticationPingAll
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestOIDCAuthenticationPingAll:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestOIDCAuthenticationPingAll
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestOIDCAuthenticationPingAll$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestOIDCExpireNodesBasedOnTokenExpiry
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestOIDCExpireNodesBasedOnTokenExpiry:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestOIDCExpireNodesBasedOnTokenExpiry
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestOIDCExpireNodesBasedOnTokenExpiry$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestPingAllByHostname
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestPingAllByHostname:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestPingAllByHostname
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestPingAllByHostname$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestPingAllByIP
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestPingAllByIP:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestPingAllByIP
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestPingAllByIP$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestPingAllByIPPublicDERP
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestPingAllByIPPublicDERP:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestPingAllByIPPublicDERP
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestPingAllByIPPublicDERP$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestPreAuthKeyCommand
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestPreAuthKeyCommand:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestPreAuthKeyCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestPreAuthKeyCommand$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestPreAuthKeyCommandReusableEphemeral
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestPreAuthKeyCommandReusableEphemeral:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestPreAuthKeyCommandReusableEphemeral
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestPreAuthKeyCommandReusableEphemeral$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestPreAuthKeyCommandWithoutExpiry
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestPreAuthKeyCommandWithoutExpiry:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestPreAuthKeyCommandWithoutExpiry
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestPreAuthKeyCommandWithoutExpiry$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestResolveMagicDNS
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestResolveMagicDNS:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestResolveMagicDNS
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestResolveMagicDNS$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestSSHIsBlockedInACL
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestSSHIsBlockedInACL:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestSSHIsBlockedInACL
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestSSHIsBlockedInACL$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestSSHMultipleUsersAllToAll
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestSSHMultipleUsersAllToAll:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestSSHMultipleUsersAllToAll
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestSSHMultipleUsersAllToAll$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestSSHNoSSHConfigured
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestSSHNoSSHConfigured:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestSSHNoSSHConfigured
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestSSHNoSSHConfigured$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestSSHOneUserToAll
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestSSHOneUserToAll:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestSSHOneUserToAll
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestSSHOneUserToAll$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestSSHUserOnlyIsolation
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestSSHUserOnlyIsolation:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestSSHUserOnlyIsolation
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestSSHUserOnlyIsolation$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestSubnetRouteACL
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestSubnetRouteACL:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestSubnetRouteACL
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestSubnetRouteACL$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestTaildrop
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestTaildrop:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestTaildrop
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestTaildrop$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestTailscaleNodesJoiningHeadcale
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestTailscaleNodesJoiningHeadcale:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestTailscaleNodesJoiningHeadcale
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestTailscaleNodesJoiningHeadcale$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -1,67 +0,0 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestUserCommand
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestUserCommand:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestUserCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestUserCommand$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -0,0 +1,27 @@
name: Integration Test v2 - kradalby
on: [pull_request]
jobs:
integration-test-v2-kradalby:
runs-on: [self-hosted, linux, x64, nixos, docker]
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run general integration tests
if: steps.changed-files.outputs.any_changed == 'true'
run: nix develop --command -- make test_integration_v2_general

View File

@@ -2,10 +2,6 @@ name: Tests
on: [push, pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
test:
runs-on: ubuntu-latest
@@ -26,9 +22,7 @@ jobs:
integration_test/
config-example.yaml
- uses: DeterminateSystems/nix-installer-action@main
if: steps.changed-files.outputs.any_changed == 'true'
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: cachix/install-nix-action@v16
if: steps.changed-files.outputs.any_changed == 'true'
- name: Run tests

View File

@@ -1,18 +0,0 @@
name: update-flake-lock
on:
workflow_dispatch: # allows manual triggering
schedule:
- cron: "0 0 * * 0" # runs weekly on Sunday at 00:00
jobs:
lockfile:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Install Nix
uses: DeterminateSystems/nix-installer-action@main
- name: Update flake.lock
uses: DeterminateSystems/update-flake-lock@main
with:
pr-title: "Update flake.lock"

16
.gitignore vendored
View File

@@ -1,7 +1,3 @@
ignored/
tailscale/
.vscode/
# Binaries for programs and plugins
*.exe
*.exe~
@@ -16,9 +12,8 @@ tailscale/
*.out
# Dependency directories (remove the comment below to include it)
vendor/
# vendor/
dist/
/headscale
config.json
config.yaml
@@ -31,17 +26,10 @@ derp.yaml
# Exclude Jetbrains Editors
.idea
test_output/
control_logs/
test_output/
# Nix build output
result
.direnv/
integration_test/etc/config.dump.yaml
# MkDocs
.cache
/site
__debug_bin

View File

@@ -10,8 +10,6 @@ issues:
linters:
enable-all: true
disable:
- depguard
- exhaustivestruct
- revive
- lll
@@ -31,15 +29,6 @@ linters:
- execinquery
- exhaustruct
- nolintlint
- musttag # causes issues with imported libs
- depguard
# deprecated
- structcheck # replaced by unused
- ifshort # deprecated by the owner
- varcheck # replaced by unused
- nosnakecase # replaced by revive
- deadcode # replaced by unused
# We should strive to enable these:
- wrapcheck

View File

@@ -1,28 +1,21 @@
---
before:
hooks:
- go mod tidy -compat=1.22
- go mod vendor
- go mod tidy -compat=1.19
release:
prerelease: auto
builds:
- id: headscale
main: ./cmd/headscale
- id: darwin-amd64
main: ./cmd/headscale/headscale.go
mod_timestamp: "{{ .CommitTimestamp }}"
env:
- CGO_ENABLED=0
targets:
- darwin_amd64
- darwin_arm64
- freebsd_amd64
- linux_386
- linux_amd64
- linux_arm64
- linux_arm_5
- linux_arm_6
- linux_arm_7
goos:
- darwin
goarch:
- amd64
flags:
- -mod=readonly
ldflags:
@@ -30,142 +23,60 @@ builds:
tags:
- ts2019
- id: darwin-arm64
main: ./cmd/headscale/headscale.go
mod_timestamp: "{{ .CommitTimestamp }}"
env:
- CGO_ENABLED=0
goos:
- darwin
goarch:
- arm64
flags:
- -mod=readonly
ldflags:
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
tags:
- ts2019
- id: linux-amd64
mod_timestamp: "{{ .CommitTimestamp }}"
env:
- CGO_ENABLED=0
goos:
- linux
goarch:
- amd64
main: ./cmd/headscale/headscale.go
ldflags:
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
tags:
- ts2019
- id: linux-arm64
mod_timestamp: "{{ .CommitTimestamp }}"
env:
- CGO_ENABLED=0
goos:
- linux
goarch:
- arm64
main: ./cmd/headscale/headscale.go
ldflags:
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
tags:
- ts2019
archives:
- id: golang-cross
name_template: '{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ with .Mips }}_{{ . }}{{ end }}{{ if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }}'
builds:
- darwin-amd64
- darwin-arm64
- linux-amd64
- linux-arm64
name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
format: binary
source:
enabled: true
name_template: "{{ .ProjectName }}_{{ .Version }}"
format: tar.gz
files:
- "vendor/"
nfpms:
# Configure nFPM for .deb and .rpm releases
#
# See https://nfpm.goreleaser.com/configuration/
# and https://goreleaser.com/customization/nfpm/
#
# Useful tools for debugging .debs:
# List file contents: dpkg -c dist/headscale...deb
# Package metadata: dpkg --info dist/headscale....deb
#
- builds:
- headscale
package_name: headscale
priority: optional
vendor: headscale
maintainer: Kristoffer Dalby <kristoffer@dalby.cc>
homepage: https://github.com/juanfont/headscale
license: BSD
bindir: /usr/bin
formats:
- deb
contents:
- src: ./config-example.yaml
dst: /etc/headscale/config.yaml
type: config|noreplace
file_info:
mode: 0644
- src: ./docs/packaging/headscale.systemd.service
dst: /usr/lib/systemd/system/headscale.service
- dst: /var/lib/headscale
type: dir
- dst: /var/run/headscale
type: dir
scripts:
postinstall: ./docs/packaging/postinstall.sh
postremove: ./docs/packaging/postremove.sh
kos:
- id: ghcr
repository: ghcr.io/juanfont/headscale
# bare tells KO to only use the repository
# for tagging and naming the container.
bare: true
base_image: gcr.io/distroless/base-debian12
build: headscale
main: ./cmd/headscale
env:
- CGO_ENABLED=0
platforms:
- linux/amd64
- linux/386
- linux/arm64
- linux/arm/v7
tags:
- latest
- "{{ .Tag }}"
- "{{ .Major }}.{{ .Minor }}.{{ .Patch }}"
- "{{ .Major }}.{{ .Minor }}"
- "{{ .Major }}"
- "sha-{{ .ShortCommit }}"
- "{{ if not .Prerelease }}stable{{ else }}unstable{{ end }}"
- id: dockerhub
build: headscale
base_image: gcr.io/distroless/base-debian12
repository: headscale/headscale
bare: true
platforms:
- linux/amd64
- linux/386
- linux/arm64
- linux/arm/v7
tags:
- latest
- "{{ .Tag }}"
- "{{ .Major }}.{{ .Minor }}.{{ .Patch }}"
- "{{ .Major }}.{{ .Minor }}"
- "{{ .Major }}"
- "sha-{{ .ShortCommit }}"
- "{{ if not .Prerelease }}stable{{ end }}"
- "{{ if not .Prerelease }}stable{{ else }}unstable{{ end }}"
- id: ghcr-debug
repository: ghcr.io/juanfont/headscale
bare: true
base_image: "debian:12"
build: headscale
main: ./cmd/headscale
env:
- CGO_ENABLED=0
platforms:
- linux/amd64
- linux/386
- linux/arm64
- linux/arm/v7
tags:
- latest
- "{{ .Tag }}-debug"
- "{{ .Major }}.{{ .Minor }}.{{ .Patch }}-debug"
- "{{ .Major }}.{{ .Minor }}-debug"
- "{{ .Major }}-debug"
- "sha-{{ .ShortCommit }}-debug"
- "{{ if not .Prerelease }}stable{{ else }}unstable{{ end }}-debug"
- id: dockerhub-debug
build: headscale
base_image: "debian:12"
repository: headscale/headscale
bare: true
platforms:
- linux/amd64
- linux/386
- linux/arm64
- linux/arm/v7
tags:
- latest
- "{{ .Tag }}-debug"
- "{{ .Major }}.{{ .Minor }}.{{ .Patch }}-debug"
- "{{ .Major }}.{{ .Minor }}-debug"
- "{{ .Major }}-debug"
- "sha-{{ .ShortCommit }}-debug"
- "{{ if not .Prerelease }}stable{{ else }}unstable{{ end }}-debug"
checksum:
name_template: "checksums.txt"
snapshot:

View File

@@ -1 +0,0 @@
.github/workflows/test-integration-v2*

View File

@@ -1,139 +1,10 @@
# CHANGELOG
## 0.23.0 (2023-XX-XX)
This release is mainly a code reorganisation and refactoring, significantly improving the maintainability of the codebase. This should allow us to improve further and make it easier for the maintainers to keep on top of the project.
**Please remember to always back up your database between versions**
#### Here is a short summary of the broad topics of changes:
Code has been organised into modules, reducing use of global variables/objects, isolating concerns and “putting the right things in the logical place”.
The new [policy](https://github.com/juanfont/headscale/tree/main/hscontrol/policy) and [mapper](https://github.com/juanfont/headscale/tree/main/hscontrol/mapper) package, containing the ACL/Policy logic and the logic for creating the data served to clients (the network “map”) has been rewritten and improved. This change has allowed us to finish SSH support and add additional tests throughout the code to ensure correctness.
The [“poller”, or streaming logic](https://github.com/juanfont/headscale/blob/main/hscontrol/poll.go) has been rewritten and instead of keeping track of the latest updates, checking at a fixed interval, it now uses go channels, implemented in our new [notifier](https://github.com/juanfont/headscale/tree/main/hscontrol/notifier) package and it allows us to send updates to connected clients immediately. This should both improve performance and potential latency before a client picks up an update.
Headscale now supports sending “delta” updates, thanks to the new mapper and poller logic, allowing us to only inform nodes about new nodes, changed nodes and removed nodes. Previously we sent the entire state of the network every time an update was due.
While we have a pretty good [test harness](https://github.com/search?q=repo%3Ajuanfont%2Fheadscale+path%3A_test.go&type=code) for validating our changes, we have rewritten over [10000 lines of code](https://github.com/juanfont/headscale/compare/b01f1f1867136d9b2d7b1392776eb363b482c525...main) and bugs are expected. We need help testing this release. In addition, while we think the performance should in general be better, there might be regressions in parts of the platform, particularly where we prioritised correctness over speed.
There are also several bugfixes that has been encountered and fixed as part of implementing these changes, particularly
after improving the test harness as part of adopting [#1460](https://github.com/juanfont/headscale/pull/1460).
### BREAKING
- Code reorganisation, a lot of code has moved, please review the following PRs accordingly [#1473](https://github.com/juanfont/headscale/pull/1473)
- API: Machine is now Node [#1553](https://github.com/juanfont/headscale/pull/1553)
- Remove support for older Tailscale clients [#1611](https://github.com/juanfont/headscale/pull/1611)
- The latest supported client is 1.38
- Headscale checks that _at least_ one DERP is defined at start [#1564](https://github.com/juanfont/headscale/pull/1564)
- If no DERP is configured, the server will fail to start, this can be because it cannot load the DERPMap from file or url.
- Embedded DERP server requires a private key [#1611](https://github.com/juanfont/headscale/pull/1611)
- Add a filepath entry to [`derp.server.private_key_path`](https://github.com/juanfont/headscale/blob/b35993981297e18393706b2c963d6db882bba6aa/config-example.yaml#L95)
### Changes
- Use versioned migrations [#1644](https://github.com/juanfont/headscale/pull/1644)
- Make the OIDC callback page better [#1484](https://github.com/juanfont/headscale/pull/1484)
- SSH support [#1487](https://github.com/juanfont/headscale/pull/1487)
- State management has been improved [#1492](https://github.com/juanfont/headscale/pull/1492)
- Use error group handling to ensure tests actually pass [#1535](https://github.com/juanfont/headscale/pull/1535) based on [#1460](https://github.com/juanfont/headscale/pull/1460)
- Fix hang on SIGTERM [#1492](https://github.com/juanfont/headscale/pull/1492) taken from [#1480](https://github.com/juanfont/headscale/pull/1480)
- Send logs to stderr by default [#1524](https://github.com/juanfont/headscale/pull/1524)
- Fix [TS-2023-006](https://tailscale.com/security-bulletins/#ts-2023-006) security UPnP issue [#1563](https://github.com/juanfont/headscale/pull/1563)
- Turn off gRPC logging [#1640](https://github.com/juanfont/headscale/pull/1640) fixes [#1259](https://github.com/juanfont/headscale/issues/1259)
- Added the possibility to manually create a DERP-map entry which can be customized, instead of automatically creating it. [#1565](https://github.com/juanfont/headscale/pull/1565)
- Change the structure of database configuration, see [config-example.yaml](./config-example.yaml) for the new structure. [#1700](https://github.com/juanfont/headscale/pull/1700)
- Old structure is now considered deprecated and will be removed in the future.
- Adds additional configuration for PostgreSQL for setting max open, idle conection and idle connection lifetime.
- Add support for deleting api keys [#1702](https://github.com/juanfont/headscale/pull/1702)
## 0.22.3 (2023-05-12)
### Changes
- Added missing ca-certificates in Docker image [#1463](https://github.com/juanfont/headscale/pull/1463)
## 0.22.2 (2023-05-10)
### Changes
- Add environment flags to enable pprof (profiling) [#1382](https://github.com/juanfont/headscale/pull/1382)
- Profiles are continously generated in our integration tests.
- Fix systemd service file location in `.deb` packages [#1391](https://github.com/juanfont/headscale/pull/1391)
- Improvements on Noise implementation [#1379](https://github.com/juanfont/headscale/pull/1379)
- Replace node filter logic, ensuring nodes with access can see eachother [#1381](https://github.com/juanfont/headscale/pull/1381)
- Disable (or delete) both exit routes at the same time [#1428](https://github.com/juanfont/headscale/pull/1428)
- Ditch distroless for Docker image, create default socket dir in `/var/run/headscale` [#1450](https://github.com/juanfont/headscale/pull/1450)
## 0.22.1 (2023-04-20)
### Changes
- Fix issue where systemd could not bind to port 80 [#1365](https://github.com/juanfont/headscale/pull/1365)
## 0.22.0 (2023-04-20)
### Changes
- Add `.deb` packages to release process [#1297](https://github.com/juanfont/headscale/pull/1297)
- Update and simplify the documentation to use new `.deb` packages [#1349](https://github.com/juanfont/headscale/pull/1349)
- Add 32-bit Arm platforms to release process [#1297](https://github.com/juanfont/headscale/pull/1297)
- Fix longstanding bug that would prevent "\*" from working properly in ACLs (issue [#699](https://github.com/juanfont/headscale/issues/699)) [#1279](https://github.com/juanfont/headscale/pull/1279)
- Fix issue where IPv6 could not be used in, or while using ACLs (part of [#809](https://github.com/juanfont/headscale/issues/809)) [#1339](https://github.com/juanfont/headscale/pull/1339)
- Target Go 1.20 and Tailscale 1.38 for Headscale [#1323](https://github.com/juanfont/headscale/pull/1323)
## 0.21.0 (2023-03-20)
### Changes
- Adding "configtest" CLI command. [#1230](https://github.com/juanfont/headscale/pull/1230)
- Add documentation on connecting with iOS to `/apple` [#1261](https://github.com/juanfont/headscale/pull/1261)
- Update iOS compatibility and added documentation for iOS [#1264](https://github.com/juanfont/headscale/pull/1264)
- Allow to delete routes [#1244](https://github.com/juanfont/headscale/pull/1244)
## 0.20.0 (2023-02-03)
### Changes
- Fix wrong behaviour in exit nodes [#1159](https://github.com/juanfont/headscale/pull/1159)
- Align behaviour of `dns_config.restricted_nameservers` to tailscale [#1162](https://github.com/juanfont/headscale/pull/1162)
- Make OpenID Connect authenticated client expiry time configurable [#1191](https://github.com/juanfont/headscale/pull/1191)
- defaults to 180 days like Tailscale SaaS
- adds option to use the expiry time from the OpenID token for the node (see config-example.yaml)
- Set ControlTime in Map info sent to nodes [#1195](https://github.com/juanfont/headscale/pull/1195)
- Populate Tags field on Node updates sent [#1195](https://github.com/juanfont/headscale/pull/1195)
## 0.19.0 (2023-01-29)
### BREAKING
- Rename Namespace to User [#1144](https://github.com/juanfont/headscale/pull/1144)
- **BACKUP your database before upgrading**
- Command line flags previously taking `--namespace` or `-n` will now require `--user` or `-u`
## 0.18.0 (2023-01-14)
### Changes
- Reworked routing and added support for subnet router failover [#1024](https://github.com/juanfont/headscale/pull/1024)
- Added an OIDC AllowGroups Configuration options and authorization check [#1041](https://github.com/juanfont/headscale/pull/1041)
- Set `db_ssl` to false by default [#1052](https://github.com/juanfont/headscale/pull/1052)
- Fix duplicate nodes due to incorrect implementation of the protocol [#1058](https://github.com/juanfont/headscale/pull/1058)
- Report if a machine is online in CLI more accurately [#1062](https://github.com/juanfont/headscale/pull/1062)
- Added config option for custom DNS records [#1035](https://github.com/juanfont/headscale/pull/1035)
- Expire nodes based on OIDC token expiry [#1067](https://github.com/juanfont/headscale/pull/1067)
- Remove ephemeral nodes on logout [#1098](https://github.com/juanfont/headscale/pull/1098)
- Performance improvements in ACLs [#1129](https://github.com/juanfont/headscale/pull/1129)
- OIDC client secret can be passed via a file [#1127](https://github.com/juanfont/headscale/pull/1127)
## 0.17.1 (2022-12-05)
## 0.17.1 (2022-x-x)
### Changes
- Correct typo on macOS standalone profile link [#1028](https://github.com/juanfont/headscale/pull/1028)
- Update platform docs with Fast User Switching [#1016](https://github.com/juanfont/headscale/pull/1016)
## 0.17.0 (2022-11-26)

23
Dockerfile Normal file
View File

@@ -0,0 +1,23 @@
# Builder image
FROM docker.io/golang:1.19-bullseye AS build
ARG VERSION=dev
ENV GOPATH /go
WORKDIR /go/src/headscale
COPY go.mod go.sum /go/src/headscale/
RUN go mod download
COPY . .
RUN CGO_ENABLED=0 GOOS=linux go install -tags ts2019 -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
RUN strip /go/bin/headscale
RUN test -e /go/bin/headscale
# Production image
FROM gcr.io/distroless/base-debian11
COPY --from=build /go/bin/headscale /bin/headscale
ENV TZ UTC
EXPOSE 8080/tcp
CMD ["headscale"]

View File

@@ -1,8 +1,5 @@
# This Dockerfile and the images produced are for testing headscale,
# and are in no way endorsed by Headscale's maintainers as an
# official nor supported release or distribution.
FROM docker.io/golang:1.22-bookworm AS build
# Builder image
FROM docker.io/golang:1.19-bullseye AS build
ARG VERSION=dev
ENV GOPATH /go
WORKDIR /go/src/headscale
@@ -12,21 +9,15 @@ RUN go mod download
COPY . .
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
RUN CGO_ENABLED=0 GOOS=linux go install -tags ts2019 -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
RUN test -e /go/bin/headscale
# Debug image
FROM docker.io/golang:1.22-bookworm
FROM docker.io/golang:1.19.0-bullseye
COPY --from=build /go/bin/headscale /bin/headscale
ENV TZ UTC
RUN apt-get update \
&& apt-get install --no-install-recommends --yes less jq \
&& rm -rf /var/lib/apt/lists/* \
&& apt-get clean
RUN mkdir -p /var/run/headscale
# Need to reset the entrypoint or everything will run as a busybox script
ENTRYPOINT []
EXPOSE 8080/tcp

19
Dockerfile.tailscale Normal file
View File

@@ -0,0 +1,19 @@
FROM ubuntu:latest
ARG TAILSCALE_VERSION=*
ARG TAILSCALE_CHANNEL=stable
RUN apt-get update \
&& apt-get install -y gnupg curl ssh \
&& curl -fsSL https://pkgs.tailscale.com/${TAILSCALE_CHANNEL}/ubuntu/focal.gpg | apt-key add - \
&& curl -fsSL https://pkgs.tailscale.com/${TAILSCALE_CHANNEL}/ubuntu/focal.list | tee /etc/apt/sources.list.d/tailscale.list \
&& apt-get update \
&& apt-get install -y ca-certificates tailscale=${TAILSCALE_VERSION} dnsutils \
&& rm -rf /var/lib/apt/lists/*
RUN adduser --shell=/bin/bash ssh-it-user
ADD integration_test/etc_embedded_derp/tls/server.crt /usr/local/share/ca-certificates/
RUN chmod 644 /usr/local/share/ca-certificates/server.crt
RUN update-ca-certificates

View File

@@ -1,11 +1,7 @@
# This Dockerfile and the images produced are for testing headscale,
# and are in no way endorsed by Headscale's maintainers as an
# official nor supported release or distribution.
FROM golang:latest
RUN apt-get update \
&& apt-get install -y dnsutils git iptables ssh ca-certificates \
&& apt-get install -y ca-certificates dnsutils git iptables ssh \
&& rm -rf /var/lib/apt/lists/*
RUN useradd --shell=/bin/bash --create-home ssh-it-user
@@ -14,8 +10,15 @@ RUN git clone https://github.com/tailscale/tailscale.git
WORKDIR /go/tailscale
RUN git checkout main \
&& sh build_dist.sh tailscale.com/cmd/tailscale \
&& sh build_dist.sh tailscale.com/cmd/tailscaled \
&& cp tailscale /usr/local/bin/ \
&& cp tailscaled /usr/local/bin/
RUN git checkout main
RUN sh build_dist.sh tailscale.com/cmd/tailscale
RUN sh build_dist.sh tailscale.com/cmd/tailscaled
RUN cp tailscale /usr/local/bin/
RUN cp tailscaled /usr/local/bin/
ADD integration_test/etc_embedded_derp/tls/server.crt /usr/local/share/ca-certificates/
RUN chmod 644 /usr/local/share/ca-certificates/server.crt
RUN update-ca-certificates

View File

@@ -10,6 +10,8 @@ ifeq ($(filter $(GOOS), openbsd netbsd soloaris plan9), )
else
endif
TAGS = -tags ts2019
# GO_SOURCES = $(wildcard *.go)
# PROTO_SOURCES = $(wildcard **/*.proto)
GO_SOURCES = $(call rwildcard,,*.go)
@@ -22,9 +24,31 @@ build:
dev: lint test build
test:
gotestsum -- -short -coverprofile=coverage.out ./...
@go test $(TAGS) -short -coverprofile=coverage.out ./...
test_integration:
test_integration: test_integration_cli test_integration_derp test_integration_oidc test_integration_v2_general
test_integration_cli:
docker network rm $$(docker network ls --filter name=headscale --quiet) || true
docker network create headscale-test || true
docker run -t --rm \
--network headscale-test \
-v ~/.cache/hs-integration-go:/go \
-v $$PWD:$$PWD -w $$PWD \
-v /var/run/docker.sock:/var/run/docker.sock golang:1 \
go test $(TAGS) -failfast -timeout 30m -count=1 -run IntegrationCLI ./...
test_integration_derp:
docker network rm $$(docker network ls --filter name=headscale --quiet) || true
docker network create headscale-test || true
docker run -t --rm \
--network headscale-test \
-v ~/.cache/hs-integration-go:/go \
-v $$PWD:$$PWD -w $$PWD \
-v /var/run/docker.sock:/var/run/docker.sock golang:1 \
go test $(TAGS) -failfast -timeout 30m -count=1 -run IntegrationDERP ./...
test_integration_v2_general:
docker run \
-t --rm \
-v ~/.cache/hs-integration-go:/go \
@@ -32,7 +56,13 @@ test_integration:
-v $$PWD:$$PWD -w $$PWD/integration \
-v /var/run/docker.sock:/var/run/docker.sock \
golang:1 \
go run gotest.tools/gotestsum@latest -- -failfast ./... -timeout 120m -parallel 8
go test $(TAGS) -failfast ./... -timeout 120m -parallel 8
coverprofile_func:
go tool cover -func=coverage.out
coverprofile_html:
go tool cover -html=coverage.out
lint:
golangci-lint run --fix --timeout 10m
@@ -50,4 +80,11 @@ compress: build
generate:
rm -rf gen
buf generate proto
go run github.com/bufbuild/buf/cmd/buf generate proto
install-protobuf-plugins:
go install \
github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway \
github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2 \
google.golang.org/protobuf/cmd/protoc-gen-go \
google.golang.org/grpc/cmd/protoc-gen-go-grpc

440
README.md
View File

@@ -32,18 +32,22 @@ organisation.
## Design goal
Headscale aims to implement a self-hosted, open source alternative to the Tailscale
control server.
Headscale's goal is to provide self-hosters and hobbyists with an open-source
server they can use for their projects and labs.
It implements a narrow scope, a single Tailnet, suitable for a personal use, or a small
open-source organisation.
`headscale` aims to implement a self-hosted, open source alternative to the Tailscale
control server. `headscale` has a narrower scope and an instance of `headscale`
implements a _single_ Tailnet, which is typically what a single organisation, or
home/personal setup would use.
## Supporting Headscale
`headscale` uses terms that maps to Tailscale's control server, consult the
[glossary](./docs/glossary.md) for explainations.
## Support
If you like `headscale` and find it useful, there is a sponsorship and donation
buttons available in the repo.
If you would like to sponsor features, bugs or prioritisation, reach out to
one of the maintainers.
## Features
- Full "base" support of Tailscale's features
@@ -71,39 +75,19 @@ buttons available in the repo.
| macOS | Yes (see `/apple` on your headscale for more information) |
| Windows | Yes [docs](./docs/windows-client.md) |
| Android | Yes [docs](./docs/android-client.md) |
| iOS | Yes [docs](./docs/iOS-client.md) |
| iOS | Not yet |
## Running headscale
**Please note that we do not support nor encourage the use of reverse proxies
and container to run Headscale.**
Please have a look at the [`documentation`](https://headscale.net/).
## Talks
- Fosdem 2023 (video): [Headscale: How we are using integration testing to reimplement Tailscale](https://fosdem.org/2023/schedule/event/goheadscale/)
- presented by Juan Font Alonso and Kristoffer Dalby
Please have a look at the documentation under [`docs/`](docs/).
## Disclaimer
1. This project is not associated with Tailscale Inc.
1. We have nothing to do with Tailscale, or Tailscale Inc.
2. The purpose of Headscale is maintaining a working, self-hosted Tailscale control panel.
## Contributing
Headscale is "Open Source, acknowledged contribution", this means that any
contribution will have to be discussed with the Maintainers before being submitted.
This model has been chosen to reduce the risk of burnout by limiting the
maintenance overhead of reviewing and validating third-party code.
Headscale is open to code contributions for bug fixes without discussion.
If you find mistakes in the documentation, please submit a fix to the documentation.
### Requirements
To contribute to headscale you would need the lastest version of [Go](https://golang.org)
and [Buf](https://buf.build)(Protobuf generator).
@@ -111,6 +95,8 @@ We recommend using [Nix](https://nixos.org/) to setup a development environment.
be done with `nix develop`, which will install the tools and give you a shell.
This guarantees that you will have the same dev env as `headscale` maintainers.
PRs and suggestions are welcome.
### Code style
To ensure we have some consistency with a growing number of contributions,
@@ -225,13 +211,6 @@ make build
<sub style="font-size:14px"><b>Nico</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/evenh>
<img src=https://avatars.githubusercontent.com/u/2701536?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Even Holthe/>
<br />
<sub style="font-size:14px"><b>Even Holthe</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/e-zk>
<img src=https://avatars.githubusercontent.com/u/58356365?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=e-zk/>
@@ -240,7 +219,7 @@ make build
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/ImpostorKeanu>
<a href=https://github.com/arch4ngel>
<img src=https://avatars.githubusercontent.com/u/11574161?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Justin Angel/>
<br />
<sub style="font-size:14px"><b>Justin Angel</b></sub>
@@ -253,6 +232,13 @@ make build
<sub style="font-size:14px"><b>Alessandro (Ale) Segala</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/unreality>
<img src=https://avatars.githubusercontent.com/u/352522?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=unreality/>
<br />
<sub style="font-size:14px"><b>unreality</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/ohdearaugustin>
<img src=https://avatars.githubusercontent.com/u/14001491?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ohdearaugustin/>
@@ -269,13 +255,6 @@ make build
<sub style="font-size:14px"><b>Moritz Poldrack</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/Orhideous>
<img src=https://avatars.githubusercontent.com/u/2265184?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Andriy Kushnir/>
<br />
<sub style="font-size:14px"><b>Andriy Kushnir</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/GrigoriyMikhalkin>
<img src=https://avatars.githubusercontent.com/u/3637857?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=GrigoriyMikhalkin/>
@@ -283,13 +262,6 @@ make build
<sub style="font-size:14px"><b>GrigoriyMikhalkin</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/christian-heusel>
<img src=https://avatars.githubusercontent.com/u/26827864?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Christian Heusel/>
<br />
<sub style="font-size:14px"><b>Christian Heusel</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/mike-lloyd03>
<img src=https://avatars.githubusercontent.com/u/49411532?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mike Lloyd/>
@@ -304,8 +276,6 @@ make build
<sub style="font-size:14px"><b>Anton Schubert</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/Niek>
<img src=https://avatars.githubusercontent.com/u/213140?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Niek van der Maas/>
@@ -320,6 +290,8 @@ make build
<sub style="font-size:14px"><b>Eugen Biegler</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/617a7a>
<img src=https://avatars.githubusercontent.com/u/67651251?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Azz/>
@@ -348,15 +320,6 @@ make build
<sub style="font-size:14px"><b>Laurent Marchaud</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/majst01>
<img src=https://avatars.githubusercontent.com/u/410110?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Stefan Majer/>
<br />
<sub style="font-size:14px"><b>Stefan Majer</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/fdelucchijr>
<img src=https://avatars.githubusercontent.com/u/69133647?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Fernando De Lucchi/>
@@ -371,6 +334,8 @@ make build
<sub style="font-size:14px"><b>Orville Q. Song</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/hdhoang>
<img src=https://avatars.githubusercontent.com/u/12537?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=hdhoang/>
@@ -392,8 +357,6 @@ make build
<sub style="font-size:14px"><b>Deon Thomas</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/madjam002>
<img src=https://avatars.githubusercontent.com/u/679137?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jamie Greeff/>
@@ -401,13 +364,6 @@ make build
<sub style="font-size:14px"><b>Jamie Greeff</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/jonathanspw>
<img src=https://avatars.githubusercontent.com/u/8390543?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jonathan Wright/>
<br />
<sub style="font-size:14px"><b>Jonathan Wright</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/ChibangLW>
<img src=https://avatars.githubusercontent.com/u/22293464?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ChibangLW/>
@@ -415,13 +371,6 @@ make build
<sub style="font-size:14px"><b>ChibangLW</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/majabojarska>
<img src=https://avatars.githubusercontent.com/u/33836570?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Maja Bojarska/>
<br />
<sub style="font-size:14px"><b>Maja Bojarska</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/mevansam>
<img src=https://avatars.githubusercontent.com/u/403630?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mevan Samaratunga/>
@@ -429,6 +378,8 @@ make build
<sub style="font-size:14px"><b>Mevan Samaratunga</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/dragetd>
<img src=https://avatars.githubusercontent.com/u/3639577?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Michael G./>
@@ -436,8 +387,6 @@ make build
<sub style="font-size:14px"><b>Michael G.</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/ptman>
<img src=https://avatars.githubusercontent.com/u/24669?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Paul Tötterman/>
@@ -453,24 +402,10 @@ make build
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/loprima-l>
<img src=https://avatars.githubusercontent.com/u/69201633?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=loprima-l/>
<a href=https://github.com/majst01>
<img src=https://avatars.githubusercontent.com/u/410110?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Stefan Majer/>
<br />
<sub style="font-size:14px"><b>loprima-l</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/unreality>
<img src=https://avatars.githubusercontent.com/u/352522?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=unreality/>
<br />
<sub style="font-size:14px"><b>unreality</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/vsychov>
<img src=https://avatars.githubusercontent.com/u/2186303?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=MichaelKo/>
<br />
<sub style="font-size:14px"><b>MichaelKo</b></sub>
<sub style="font-size:14px"><b>Stefan Majer</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
@@ -480,15 +415,6 @@ make build
<sub style="font-size:14px"><b>kevinlin</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/QZAiXH>
<img src=https://avatars.githubusercontent.com/u/23068780?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Snack/>
<br />
<sub style="font-size:14px"><b>Snack</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/artemklevtsov>
<img src=https://avatars.githubusercontent.com/u/603798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Artem Klevtsov/>
@@ -496,6 +422,8 @@ make build
<sub style="font-size:14px"><b>Artem Klevtsov</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/cmars>
<img src=https://avatars.githubusercontent.com/u/23741?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Casey Marshall/>
@@ -503,34 +431,11 @@ make build
<sub style="font-size:14px"><b>Casey Marshall</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/dbevacqua>
<img src=https://avatars.githubusercontent.com/u/6534306?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=dbevacqua/>
<br />
<sub style="font-size:14px"><b>dbevacqua</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/joshuataylor>
<img src=https://avatars.githubusercontent.com/u/225131?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Josh Taylor/>
<br />
<sub style="font-size:14px"><b>Josh Taylor</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/CNLHC>
<img src=https://avatars.githubusercontent.com/u/21005146?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=LIU HANCHENG/>
<img src=https://avatars.githubusercontent.com/u/21005146?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=LiuHanCheng/>
<br />
<sub style="font-size:14px"><b>LIU HANCHENG</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/motiejus>
<img src=https://avatars.githubusercontent.com/u/107720?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Motiejus Jakštys/>
<br />
<sub style="font-size:14px"><b>Motiejus Jakštys</b></sub>
<sub style="font-size:14px"><b>LiuHanCheng</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
@@ -561,15 +466,15 @@ make build
<sub style="font-size:14px"><b>Victor Freire</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/qzydustin>
<img src=https://avatars.githubusercontent.com/u/44362429?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zhenyu Qi/>
<br />
<sub style="font-size:14px"><b>Zhenyu Qi</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/lachy2849>
<img src=https://avatars.githubusercontent.com/u/98844035?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=lachy2849/>
<br />
<sub style="font-size:14px"><b>lachy2849</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/t56k>
<img src=https://avatars.githubusercontent.com/u/12165422?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=thomas/>
@@ -577,20 +482,6 @@ make build
<sub style="font-size:14px"><b>thomas</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/puzpuzpuz>
<img src=https://avatars.githubusercontent.com/u/37772591?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Andrei Pechkurov/>
<br />
<sub style="font-size:14px"><b>Andrei Pechkurov</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/linsomniac>
<img src=https://avatars.githubusercontent.com/u/466380?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Sean Reifschneider/>
<br />
<sub style="font-size:14px"><b>Sean Reifschneider</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/aberoham>
<img src=https://avatars.githubusercontent.com/u/586805?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Abraham Ingersoll/>
@@ -599,21 +490,12 @@ make build
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/iFargle>
<img src=https://avatars.githubusercontent.com/u/124551390?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Albert Copeland/>
<a href=https://github.com/puzpuzpuz>
<img src=https://avatars.githubusercontent.com/u/37772591?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Andrei Pechkurov/>
<br />
<sub style="font-size:14px"><b>Albert Copeland</b></sub>
<sub style="font-size:14px"><b>Andrei Pechkurov</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/theryecatcher>
<img src=https://avatars.githubusercontent.com/u/16442416?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Anoop Sundaresh/>
<br />
<sub style="font-size:14px"><b>Anoop Sundaresh</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/apognu>
<img src=https://avatars.githubusercontent.com/u/3017182?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Antoine POPINEAU/>
@@ -621,13 +503,6 @@ make build
<sub style="font-size:14px"><b>Antoine POPINEAU</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/tony1661>
<img src=https://avatars.githubusercontent.com/u/5287266?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Antonio Fernandez/>
<br />
<sub style="font-size:14px"><b>Antonio Fernandez</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/aofei>
<img src=https://avatars.githubusercontent.com/u/5037285?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Aofei Sheng/>
@@ -635,6 +510,8 @@ make build
<sub style="font-size:14px"><b>Aofei Sheng</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/arnarg>
<img src=https://avatars.githubusercontent.com/u/1291396?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Arnar/>
@@ -649,22 +526,6 @@ make build
<sub style="font-size:14px"><b>Arthur Woimbée</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/avirut>
<img src=https://avatars.githubusercontent.com/u/27095602?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Avirut Mehta/>
<br />
<sub style="font-size:14px"><b>Avirut Mehta</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/winterheart>
<img src=https://avatars.githubusercontent.com/u/81112?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Azamat H. Hackimov/>
<br />
<sub style="font-size:14px"><b>Azamat H. Hackimov</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/stensonb>
<img src=https://avatars.githubusercontent.com/u/933389?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Bryan Stenson/>
@@ -681,16 +542,9 @@ make build
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/kundel>
<img src=https://avatars.githubusercontent.com/u/10158899?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Darrell Kundel/>
<img src=https://avatars.githubusercontent.com/u/10158899?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=kundel/>
<br />
<sub style="font-size:14px"><b>Darrell Kundel</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/fatih-acar>
<img src=https://avatars.githubusercontent.com/u/15028881?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=fatih-acar/>
<br />
<sub style="font-size:14px"><b>fatih-acar</b></sub>
<sub style="font-size:14px"><b>kundel</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
@@ -709,13 +563,6 @@ make build
<sub style="font-size:14px"><b>Felix Yan</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/gabe565>
<img src=https://avatars.githubusercontent.com/u/7717888?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Gabe Cook/>
<br />
<sub style="font-size:14px"><b>Gabe Cook</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/JJGadgets>
<img src=https://avatars.githubusercontent.com/u/5709019?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=JJGadgets/>
@@ -723,20 +570,6 @@ make build
<sub style="font-size:14px"><b>JJGadgets</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/hrtkpf>
<img src=https://avatars.githubusercontent.com/u/42646788?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=hrtkpf/>
<br />
<sub style="font-size:14px"><b>hrtkpf</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/jessebot>
<img src=https://avatars.githubusercontent.com/u/2389292?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=JesseBot/>
<br />
<sub style="font-size:14px"><b>JesseBot</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/jimt>
<img src=https://avatars.githubusercontent.com/u/180326?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jim Tittsler/>
@@ -744,22 +577,6 @@ make build
<sub style="font-size:14px"><b>Jim Tittsler</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/jsiebens>
<img src=https://avatars.githubusercontent.com/u/499769?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Johan Siebens/>
<br />
<sub style="font-size:14px"><b>Johan Siebens</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/johnae>
<img src=https://avatars.githubusercontent.com/u/28332?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=John Axel Eriksson/>
<br />
<sub style="font-size:14px"><b>John Axel Eriksson</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/ShadowJonathan>
<img src=https://avatars.githubusercontent.com/u/22740616?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jonathan de Jong/>
@@ -767,57 +584,6 @@ make build
<sub style="font-size:14px"><b>Jonathan de Jong</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/JulienFloris>
<img src=https://avatars.githubusercontent.com/u/20380255?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Julien Zweverink/>
<br />
<sub style="font-size:14px"><b>Julien Zweverink</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/win-t>
<img src=https://avatars.githubusercontent.com/u/1589120?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Kurnia D Win/>
<br />
<sub style="font-size:14px"><b>Kurnia D Win</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/Lucalux>
<img src=https://avatars.githubusercontent.com/u/70356955?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Lucalux/>
<br />
<sub style="font-size:14px"><b>Lucalux</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/foxtrot>
<img src=https://avatars.githubusercontent.com/u/4153572?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Marc/>
<br />
<sub style="font-size:14px"><b>Marc</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/mhameed>
<img src=https://avatars.githubusercontent.com/u/447017?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mesar Hameed/>
<br />
<sub style="font-size:14px"><b>Mesar Hameed</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/mikejsavage>
<img src=https://avatars.githubusercontent.com/u/579299?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Michael Savage/>
<br />
<sub style="font-size:14px"><b>Michael Savage</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/pkrivanec>
<img src=https://avatars.githubusercontent.com/u/25530641?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Philipp Krivanec/>
<br />
<sub style="font-size:14px"><b>Philipp Krivanec</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/piec>
<img src=https://avatars.githubusercontent.com/u/781471?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pierre Carru/>
@@ -825,15 +591,6 @@ make build
<sub style="font-size:14px"><b>Pierre Carru</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/donran>
<img src=https://avatars.githubusercontent.com/u/4838348?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pontus N/>
<br />
<sub style="font-size:14px"><b>Pontus N</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/nnsee>
<img src=https://avatars.githubusercontent.com/u/36747857?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Rasmus Moorats/>
@@ -841,6 +598,8 @@ make build
<sub style="font-size:14px"><b>Rasmus Moorats</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/rcursaru>
<img src=https://avatars.githubusercontent.com/u/16259641?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=rcursaru/>
@@ -862,13 +621,6 @@ make build
<sub style="font-size:14px"><b>Ryan Fowler</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/muzy>
<img src=https://avatars.githubusercontent.com/u/321723?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Sebastian/>
<br />
<sub style="font-size:14px"><b>Sebastian</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/shaananc>
<img src=https://avatars.githubusercontent.com/u/2287839?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Shaanan Cohney/>
@@ -876,15 +628,6 @@ make build
<sub style="font-size:14px"><b>Shaanan Cohney</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/6ixfalls>
<img src=https://avatars.githubusercontent.com/u/23470032?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Six/>
<br />
<sub style="font-size:14px"><b>Six</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/stefanvanburen>
<img src=https://avatars.githubusercontent.com/u/622527?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Stefan VanBuren/>
@@ -899,6 +642,8 @@ make build
<sub style="font-size:14px"><b>sophware</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/m-tanner-dev0>
<img src=https://avatars.githubusercontent.com/u/97977342?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tanner/>
@@ -920,8 +665,6 @@ make build
<sub style="font-size:14px"><b>The Gitter Badger</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/tianon>
<img src=https://avatars.githubusercontent.com/u/161631?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tianon Gravi/>
@@ -943,29 +686,22 @@ make build
<sub style="font-size:14px"><b>Tjerk Woudsma</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/y0ngb1n>
<img src=https://avatars.githubusercontent.com/u/25719408?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=杨斌 Aben/>
<img src=https://avatars.githubusercontent.com/u/25719408?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Yang Bin/>
<br />
<sub style="font-size:14px"><b>杨斌 Aben</b></sub>
<sub style="font-size:14px"><b>Yang Bin</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/sleepymole>
<a href=https://github.com/gozssky>
<img src=https://avatars.githubusercontent.com/u/17199941?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Yujie Xia/>
<br />
<sub style="font-size:14px"><b>Yujie Xia</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/newellz2>
<img src=https://avatars.githubusercontent.com/u/52436542?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zachary Newell/>
<br />
<sub style="font-size:14px"><b>Zachary Newell</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/zekker6>
<img src=https://avatars.githubusercontent.com/u/1367798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zakhar Bessarab/>
@@ -987,13 +723,6 @@ make build
<sub style="font-size:14px"><b>Ziyuan Han</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/caelansar>
<img src=https://avatars.githubusercontent.com/u/31852257?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=caelansar/>
<br />
<sub style="font-size:14px"><b>caelansar</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/derelm>
<img src=https://avatars.githubusercontent.com/u/465155?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=derelm/>
@@ -1001,13 +730,6 @@ make build
<sub style="font-size:14px"><b>derelm</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/dnaq>
<img src=https://avatars.githubusercontent.com/u/1299717?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=dnaq/>
<br />
<sub style="font-size:14px"><b>dnaq</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
@@ -1024,13 +746,6 @@ make build
<sub style="font-size:14px"><b>ignoramous</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/jimyag>
<img src=https://avatars.githubusercontent.com/u/69233189?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=jimyag/>
<br />
<sub style="font-size:14px"><b>jimyag</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/magichuihui>
<img src=https://avatars.githubusercontent.com/u/10866198?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=suhelen/>
@@ -1045,15 +760,6 @@ make build
<sub style="font-size:14px"><b>sharkonet</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/ma6174>
<img src=https://avatars.githubusercontent.com/u/1449133?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ma6174/>
<br />
<sub style="font-size:14px"><b>ma6174</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/manju-rn>
<img src=https://avatars.githubusercontent.com/u/26291847?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=manju-rn/>
@@ -1061,20 +767,15 @@ make build
<sub style="font-size:14px"><b>manju-rn</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/nicholas-yap>
<img src=https://avatars.githubusercontent.com/u/38109533?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=nicholas-yap/>
<br />
<sub style="font-size:14px"><b>nicholas-yap</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/pernila>
<img src=https://avatars.githubusercontent.com/u/12460060?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tommi Pernila/>
<img src=https://avatars.githubusercontent.com/u/12460060?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=pernila/>
<br />
<sub style="font-size:14px"><b>Tommi Pernila</b></sub>
<sub style="font-size:14px"><b>pernila</b></sub>
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/phpmalik>
<img src=https://avatars.githubusercontent.com/u/26834645?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=phpmalik/>
@@ -1084,9 +785,9 @@ make build
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/Wakeful-Cloud>
<img src=https://avatars.githubusercontent.com/u/38930607?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Wakeful Cloud/>
<img src=https://avatars.githubusercontent.com/u/38930607?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Wakeful-Cloud/>
<br />
<sub style="font-size:14px"><b>Wakeful Cloud</b></sub>
<sub style="font-size:14px"><b>Wakeful-Cloud</b></sub>
</a>
</td>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
@@ -1097,13 +798,4 @@ make build
</a>
</td>
</tr>
<tr>
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
<a href=https://github.com/atorregrosa-smd>
<img src=https://avatars.githubusercontent.com/u/78434679?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Àlex Torregrosa/>
<br />
<sub style="font-size:14px"><b>Àlex Torregrosa</b></sub>
</a>
</td>
</tr>
</table>

699
acls.go Normal file
View File

@@ -0,0 +1,699 @@
package headscale
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/netip"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/rs/zerolog/log"
"github.com/tailscale/hujson"
"gopkg.in/yaml.v3"
"tailscale.com/envknob"
"tailscale.com/tailcfg"
)
const (
errEmptyPolicy = Error("empty policy")
errInvalidAction = Error("invalid action")
errInvalidGroup = Error("invalid group")
errInvalidTag = Error("invalid tag")
errInvalidPortFormat = Error("invalid port format")
errWildcardIsNeeded = Error("wildcard as port is required for the protocol")
)
const (
Base8 = 8
Base10 = 10
BitSize16 = 16
BitSize32 = 32
BitSize64 = 64
portRangeBegin = 0
portRangeEnd = 65535
expectedTokenItems = 2
)
// For some reason golang.org/x/net/internal/iana is an internal package.
const (
protocolICMP = 1 // Internet Control Message
protocolIGMP = 2 // Internet Group Management
protocolIPv4 = 4 // IPv4 encapsulation
protocolTCP = 6 // Transmission Control
protocolEGP = 8 // Exterior Gateway Protocol
protocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP)
protocolUDP = 17 // User Datagram
protocolGRE = 47 // Generic Routing Encapsulation
protocolESP = 50 // Encap Security Payload
protocolAH = 51 // Authentication Header
protocolIPv6ICMP = 58 // ICMP for IPv6
protocolSCTP = 132 // Stream Control Transmission Protocol
ProtocolFC = 133 // Fibre Channel
)
var featureEnableSSH = envknob.RegisterBool("HEADSCALE_EXPERIMENTAL_FEATURE_SSH")
// LoadACLPolicy loads the ACL policy from the specify path, and generates the ACL rules.
func (h *Headscale) LoadACLPolicy(path string) error {
log.Debug().
Str("func", "LoadACLPolicy").
Str("path", path).
Msg("Loading ACL policy from path")
policyFile, err := os.Open(path)
if err != nil {
return err
}
defer policyFile.Close()
var policy ACLPolicy
policyBytes, err := io.ReadAll(policyFile)
if err != nil {
return err
}
switch filepath.Ext(path) {
case ".yml", ".yaml":
log.Debug().
Str("path", path).
Bytes("file", policyBytes).
Msg("Loading ACLs from YAML")
err := yaml.Unmarshal(policyBytes, &policy)
if err != nil {
return err
}
log.Trace().
Interface("policy", policy).
Msg("Loaded policy from YAML")
default:
ast, err := hujson.Parse(policyBytes)
if err != nil {
return err
}
ast.Standardize()
policyBytes = ast.Pack()
err = json.Unmarshal(policyBytes, &policy)
if err != nil {
return err
}
}
if policy.IsZero() {
return errEmptyPolicy
}
h.aclPolicy = &policy
return h.UpdateACLRules()
}
func (h *Headscale) UpdateACLRules() error {
machines, err := h.ListMachines()
if err != nil {
return err
}
if h.aclPolicy == nil {
return errEmptyPolicy
}
rules, err := generateACLRules(machines, *h.aclPolicy, h.cfg.OIDC.StripEmaildomain)
if err != nil {
return err
}
log.Trace().Interface("ACL", rules).Msg("ACL rules generated")
h.aclRules = rules
if featureEnableSSH() {
sshRules, err := h.generateSSHRules()
if err != nil {
return err
}
log.Trace().Interface("SSH", sshRules).Msg("SSH rules generated")
if h.sshPolicy == nil {
h.sshPolicy = &tailcfg.SSHPolicy{}
}
h.sshPolicy.Rules = sshRules
} else if h.aclPolicy != nil && len(h.aclPolicy.SSHs) > 0 {
log.Info().Msg("SSH ACLs has been defined, but HEADSCALE_EXPERIMENTAL_FEATURE_SSH is not enabled, this is a unstable feature, check docs before activating")
}
return nil
}
func generateACLRules(machines []Machine, aclPolicy ACLPolicy, stripEmaildomain bool) ([]tailcfg.FilterRule, error) {
rules := []tailcfg.FilterRule{}
for index, acl := range aclPolicy.ACLs {
if acl.Action != "accept" {
return nil, errInvalidAction
}
srcIPs := []string{}
for innerIndex, src := range acl.Sources {
srcs, err := generateACLPolicySrcIP(machines, aclPolicy, src, stripEmaildomain)
if err != nil {
log.Error().
Msgf("Error parsing ACL %d, Source %d", index, innerIndex)
return nil, err
}
srcIPs = append(srcIPs, srcs...)
}
protocols, needsWildcard, err := parseProtocol(acl.Protocol)
if err != nil {
log.Error().
Msgf("Error parsing ACL %d. protocol unknown %s", index, acl.Protocol)
return nil, err
}
destPorts := []tailcfg.NetPortRange{}
for innerIndex, dest := range acl.Destinations {
dests, err := generateACLPolicyDest(
machines,
aclPolicy,
dest,
needsWildcard,
stripEmaildomain,
)
if err != nil {
log.Error().
Msgf("Error parsing ACL %d, Destination %d", index, innerIndex)
return nil, err
}
destPorts = append(destPorts, dests...)
}
rules = append(rules, tailcfg.FilterRule{
SrcIPs: srcIPs,
DstPorts: destPorts,
IPProto: protocols,
})
}
return rules, nil
}
func (h *Headscale) generateSSHRules() ([]*tailcfg.SSHRule, error) {
rules := []*tailcfg.SSHRule{}
if h.aclPolicy == nil {
return nil, errEmptyPolicy
}
machines, err := h.ListMachines()
if err != nil {
return nil, err
}
acceptAction := tailcfg.SSHAction{
Message: "",
Reject: false,
Accept: true,
SessionDuration: 0,
AllowAgentForwarding: false,
HoldAndDelegate: "",
AllowLocalPortForwarding: true,
}
rejectAction := tailcfg.SSHAction{
Message: "",
Reject: true,
Accept: false,
SessionDuration: 0,
AllowAgentForwarding: false,
HoldAndDelegate: "",
AllowLocalPortForwarding: false,
}
for index, sshACL := range h.aclPolicy.SSHs {
action := rejectAction
switch sshACL.Action {
case "accept":
action = acceptAction
case "check":
checkAction, err := sshCheckAction(sshACL.CheckPeriod)
if err != nil {
log.Error().
Msgf("Error parsing SSH %d, check action with unparsable duration '%s'", index, sshACL.CheckPeriod)
} else {
action = *checkAction
}
default:
log.Error().
Msgf("Error parsing SSH %d, unknown action '%s'", index, sshACL.Action)
return nil, err
}
principals := make([]*tailcfg.SSHPrincipal, 0, len(sshACL.Sources))
for innerIndex, rawSrc := range sshACL.Sources {
expandedSrcs, err := expandAlias(
machines,
*h.aclPolicy,
rawSrc,
h.cfg.OIDC.StripEmaildomain,
)
if err != nil {
log.Error().
Msgf("Error parsing SSH %d, Source %d", index, innerIndex)
return nil, err
}
for _, expandedSrc := range expandedSrcs {
principals = append(principals, &tailcfg.SSHPrincipal{
NodeIP: expandedSrc,
})
}
}
userMap := make(map[string]string, len(sshACL.Users))
for _, user := range sshACL.Users {
userMap[user] = "="
}
rules = append(rules, &tailcfg.SSHRule{
RuleExpires: nil,
Principals: principals,
SSHUsers: userMap,
Action: &action,
})
}
return rules, nil
}
func sshCheckAction(duration string) (*tailcfg.SSHAction, error) {
sessionLength, err := time.ParseDuration(duration)
if err != nil {
return nil, err
}
return &tailcfg.SSHAction{
Message: "",
Reject: false,
Accept: true,
SessionDuration: sessionLength,
AllowAgentForwarding: false,
HoldAndDelegate: "",
AllowLocalPortForwarding: true,
}, nil
}
func generateACLPolicySrcIP(
machines []Machine,
aclPolicy ACLPolicy,
src string,
stripEmaildomain bool,
) ([]string, error) {
return expandAlias(machines, aclPolicy, src, stripEmaildomain)
}
func generateACLPolicyDest(
machines []Machine,
aclPolicy ACLPolicy,
dest string,
needsWildcard bool,
stripEmaildomain bool,
) ([]tailcfg.NetPortRange, error) {
tokens := strings.Split(dest, ":")
if len(tokens) < expectedTokenItems || len(tokens) > 3 {
return nil, errInvalidPortFormat
}
var alias string
// We can have here stuff like:
// git-server:*
// 192.168.1.0/24:22
// tag:montreal-webserver:80,443
// tag:api-server:443
// example-host-1:*
if len(tokens) == expectedTokenItems {
alias = tokens[0]
} else {
alias = fmt.Sprintf("%s:%s", tokens[0], tokens[1])
}
expanded, err := expandAlias(
machines,
aclPolicy,
alias,
stripEmaildomain,
)
if err != nil {
return nil, err
}
ports, err := expandPorts(tokens[len(tokens)-1], needsWildcard)
if err != nil {
return nil, err
}
dests := []tailcfg.NetPortRange{}
for _, d := range expanded {
for _, p := range *ports {
pr := tailcfg.NetPortRange{
IP: d,
Ports: p,
}
dests = append(dests, pr)
}
}
return dests, nil
}
// parseProtocol reads the proto field of the ACL and generates a list of
// protocols that will be allowed, following the IANA IP protocol number
// https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
//
// If the ACL proto field is empty, it allows ICMPv4, ICMPv6, TCP, and UDP,
// as per Tailscale behaviour (see tailcfg.FilterRule).
//
// Also returns a boolean indicating if the protocol
// requires all the destinations to use wildcard as port number (only TCP,
// UDP and SCTP support specifying ports).
func parseProtocol(protocol string) ([]int, bool, error) {
switch protocol {
case "":
return nil, false, nil
case "igmp":
return []int{protocolIGMP}, true, nil
case "ipv4", "ip-in-ip":
return []int{protocolIPv4}, true, nil
case "tcp":
return []int{protocolTCP}, false, nil
case "egp":
return []int{protocolEGP}, true, nil
case "igp":
return []int{protocolIGP}, true, nil
case "udp":
return []int{protocolUDP}, false, nil
case "gre":
return []int{protocolGRE}, true, nil
case "esp":
return []int{protocolESP}, true, nil
case "ah":
return []int{protocolAH}, true, nil
case "sctp":
return []int{protocolSCTP}, false, nil
case "icmp":
return []int{protocolICMP, protocolIPv6ICMP}, true, nil
default:
protocolNumber, err := strconv.Atoi(protocol)
if err != nil {
return nil, false, err
}
needsWildcard := protocolNumber != protocolTCP &&
protocolNumber != protocolUDP &&
protocolNumber != protocolSCTP
return []int{protocolNumber}, needsWildcard, nil
}
}
// expandalias has an input of either
// - a namespace
// - a group
// - a tag
// and transform these in IPAddresses.
func expandAlias(
machines []Machine,
aclPolicy ACLPolicy,
alias string,
stripEmailDomain bool,
) ([]string, error) {
ips := []string{}
if alias == "*" {
return []string{"*"}, nil
}
log.Debug().
Str("alias", alias).
Msg("Expanding")
if strings.HasPrefix(alias, "group:") {
namespaces, err := expandGroup(aclPolicy, alias, stripEmailDomain)
if err != nil {
return ips, err
}
for _, n := range namespaces {
nodes := filterMachinesByNamespace(machines, n)
for _, node := range nodes {
ips = append(ips, node.IPAddresses.ToStringSlice()...)
}
}
return ips, nil
}
if strings.HasPrefix(alias, "tag:") {
// check for forced tags
for _, machine := range machines {
if contains(machine.ForcedTags, alias) {
ips = append(ips, machine.IPAddresses.ToStringSlice()...)
}
}
// find tag owners
owners, err := expandTagOwners(aclPolicy, alias, stripEmailDomain)
if err != nil {
if errors.Is(err, errInvalidTag) {
if len(ips) == 0 {
return ips, fmt.Errorf(
"%w. %v isn't owned by a TagOwner and no forced tags are defined",
errInvalidTag,
alias,
)
}
return ips, nil
} else {
return ips, err
}
}
// filter out machines per tag owner
for _, namespace := range owners {
machines := filterMachinesByNamespace(machines, namespace)
for _, machine := range machines {
hi := machine.GetHostInfo()
if contains(hi.RequestTags, alias) {
ips = append(ips, machine.IPAddresses.ToStringSlice()...)
}
}
}
return ips, nil
}
// if alias is a namespace
nodes := filterMachinesByNamespace(machines, alias)
nodes = excludeCorrectlyTaggedNodes(aclPolicy, nodes, alias, stripEmailDomain)
for _, n := range nodes {
ips = append(ips, n.IPAddresses.ToStringSlice()...)
}
if len(ips) > 0 {
return ips, nil
}
// if alias is an host
if h, ok := aclPolicy.Hosts[alias]; ok {
return []string{h.String()}, nil
}
// if alias is an IP
ip, err := netip.ParseAddr(alias)
if err == nil {
return []string{ip.String()}, nil
}
// if alias is an CIDR
cidr, err := netip.ParsePrefix(alias)
if err == nil {
return []string{cidr.String()}, nil
}
log.Warn().Msgf("No IPs found with the alias %v", alias)
return ips, nil
}
// excludeCorrectlyTaggedNodes will remove from the list of input nodes the ones
// that are correctly tagged since they should not be listed as being in the namespace
// we assume in this function that we only have nodes from 1 namespace.
func excludeCorrectlyTaggedNodes(
aclPolicy ACLPolicy,
nodes []Machine,
namespace string,
stripEmailDomain bool,
) []Machine {
out := []Machine{}
tags := []string{}
for tag := range aclPolicy.TagOwners {
owners, _ := expandTagOwners(aclPolicy, namespace, stripEmailDomain)
ns := append(owners, namespace)
if contains(ns, namespace) {
tags = append(tags, tag)
}
}
// for each machine if tag is in tags list, don't append it.
for _, machine := range nodes {
hi := machine.GetHostInfo()
found := false
for _, t := range hi.RequestTags {
if contains(tags, t) {
found = true
break
}
}
if len(machine.ForcedTags) > 0 {
found = true
}
if !found {
out = append(out, machine)
}
}
return out
}
func expandPorts(portsStr string, needsWildcard bool) (*[]tailcfg.PortRange, error) {
if portsStr == "*" {
return &[]tailcfg.PortRange{
{First: portRangeBegin, Last: portRangeEnd},
}, nil
}
if needsWildcard {
return nil, errWildcardIsNeeded
}
ports := []tailcfg.PortRange{}
for _, portStr := range strings.Split(portsStr, ",") {
rang := strings.Split(portStr, "-")
switch len(rang) {
case 1:
port, err := strconv.ParseUint(rang[0], Base10, BitSize16)
if err != nil {
return nil, err
}
ports = append(ports, tailcfg.PortRange{
First: uint16(port),
Last: uint16(port),
})
case expectedTokenItems:
start, err := strconv.ParseUint(rang[0], Base10, BitSize16)
if err != nil {
return nil, err
}
last, err := strconv.ParseUint(rang[1], Base10, BitSize16)
if err != nil {
return nil, err
}
ports = append(ports, tailcfg.PortRange{
First: uint16(start),
Last: uint16(last),
})
default:
return nil, errInvalidPortFormat
}
}
return &ports, nil
}
func filterMachinesByNamespace(machines []Machine, namespace string) []Machine {
out := []Machine{}
for _, machine := range machines {
if machine.Namespace.Name == namespace {
out = append(out, machine)
}
}
return out
}
// expandTagOwners will return a list of namespace. An owner can be either a namespace or a group
// a group cannot be composed of groups.
func expandTagOwners(
aclPolicy ACLPolicy,
tag string,
stripEmailDomain bool,
) ([]string, error) {
var owners []string
ows, ok := aclPolicy.TagOwners[tag]
if !ok {
return []string{}, fmt.Errorf(
"%w. %v isn't owned by a TagOwner. Please add one first. https://tailscale.com/kb/1018/acls/#tag-owners",
errInvalidTag,
tag,
)
}
for _, owner := range ows {
if strings.HasPrefix(owner, "group:") {
gs, err := expandGroup(aclPolicy, owner, stripEmailDomain)
if err != nil {
return []string{}, err
}
owners = append(owners, gs...)
} else {
owners = append(owners, owner)
}
}
return owners, nil
}
// expandGroup will return the list of namespace inside the group
// after some validation.
func expandGroup(
aclPolicy ACLPolicy,
group string,
stripEmailDomain bool,
) ([]string, error) {
outGroups := []string{}
aclGroups, ok := aclPolicy.Groups[group]
if !ok {
return []string{}, fmt.Errorf(
"group %v isn't registered. %w",
group,
errInvalidGroup,
)
}
for _, group := range aclGroups {
if strings.HasPrefix(group, "group:") {
return []string{}, fmt.Errorf(
"%w. A group cannot be composed of groups. https://tailscale.com/kb/1018/acls/#groups",
errInvalidGroup,
)
}
grp, err := NormalizeToFQDNRules(group, stripEmailDomain)
if err != nil {
return []string{}, fmt.Errorf(
"failed to normalize group %q, err: %w",
group,
errInvalidGroup,
)
}
outGroups = append(outGroups, grp)
}
return outGroups, nil
}

1543
acls_test.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
package policy
package headscale
import (
"encoding/json"
@@ -34,7 +34,7 @@ type Groups map[string][]string
// Hosts are alias for IP addresses or subnets.
type Hosts map[string]netip.Prefix
// TagOwners specify what users (users?) are allow to use certain tags.
// TagOwners specify what users (namespaces?) are allow to use certain tags.
type TagOwners map[string][]string
// ACLTest is not implemented, but should be use to check if a certain rule is allowed.
@@ -44,7 +44,7 @@ type ACLTest struct {
Deny []string `json:"deny,omitempty" yaml:"deny,omitempty"`
}
// AutoApprovers specify which users (users?), groups or tags have their advertised routes
// AutoApprovers specify which users (namespaces?), groups or tags have their advertised routes
// or exit node status automatically enabled.
type AutoApprovers struct {
Routes map[string][]string `json:"routes" yaml:"routes"`
@@ -111,15 +111,15 @@ func (hosts *Hosts) UnmarshalYAML(data []byte) error {
}
// IsZero is perhaps a bit naive here.
func (pol ACLPolicy) IsZero() bool {
if len(pol.Groups) == 0 && len(pol.Hosts) == 0 && len(pol.ACLs) == 0 {
func (policy ACLPolicy) IsZero() bool {
if len(policy.Groups) == 0 && len(policy.Hosts) == 0 && len(policy.ACLs) == 0 {
return true
}
return false
}
// Returns the list of autoApproving users, groups or tags for a given IPPrefix.
// Returns the list of autoApproving namespaces, groups or tags for a given IPPrefix.
func (autoApprovers *AutoApprovers) GetRouteApprovers(
prefix netip.Prefix,
) ([]string, error) {

View File

@@ -1,120 +1,29 @@
package hscontrol
package headscale
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"html/template"
"net/http"
"strconv"
"time"
"github.com/gorilla/mux"
"github.com/rs/zerolog/log"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
)
const (
// The CapabilityVersion is used by Tailscale clients to indicate
// their codebase version. Tailscale clients can communicate over TS2021
// from CapabilityVersion 28, but we only have good support for it
// since https://github.com/tailscale/tailscale/pull/4323 (Noise in any HTTPS port).
//
// Related to this change, there is https://github.com/tailscale/tailscale/pull/5379,
// where CapabilityVersion 39 is introduced to indicate #4323 was merged.
//
// See also https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go
NoiseCapabilityVersion = 39
// TODO(juan): remove this once https://github.com/juanfont/headscale/issues/727 is fixed.
registrationHoldoff = time.Second * 5
reservedResponseHeaderSize = 4
registrationHoldoff = time.Second * 5
reservedResponseHeaderSize = 4
RegisterMethodAuthKey = "authkey"
RegisterMethodOIDC = "oidc"
RegisterMethodCLI = "cli"
ErrRegisterMethodCLIDoesNotSupportExpire = Error(
"machines registered with CLI does not support expire",
)
)
var ErrRegisterMethodCLIDoesNotSupportExpire = errors.New(
"machines registered with CLI does not support expire",
)
var ErrNoCapabilityVersion = errors.New("no capability version set")
func parseCabailityVersion(req *http.Request) (tailcfg.CapabilityVersion, error) {
clientCapabilityStr := req.URL.Query().Get("v")
if clientCapabilityStr == "" {
return 0, ErrNoCapabilityVersion
}
clientCapabilityVersion, err := strconv.Atoi(clientCapabilityStr)
if err != nil {
return 0, fmt.Errorf("failed to parse capability version: %w", err)
}
return tailcfg.CapabilityVersion(clientCapabilityVersion), nil
}
// KeyHandler provides the Headscale pub key
// Listens in /key.
func (h *Headscale) KeyHandler(
writer http.ResponseWriter,
req *http.Request,
) {
// New Tailscale clients send a 'v' parameter to indicate the CurrentCapabilityVersion
capVer, err := parseCabailityVersion(req)
if err != nil {
log.Error().
Caller().
Err(err).
Msg("could not get capability version")
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusInternalServerError)
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Failed to write response")
}
return
}
log.Debug().
Str("handler", "/key").
Int("cap_ver", int(capVer)).
Msg("New noise client")
if err != nil {
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusBadRequest)
_, err := writer.Write([]byte("Wrong params"))
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Failed to write response")
}
return
}
// TS2021 (Tailscale v2 protocol) requires to have a different key
if capVer >= NoiseCapabilityVersion {
resp := tailcfg.OverTLSPublicKeyResponse{
PublicKey: h.noisePrivateKey.Public(),
}
writer.Header().Set("Content-Type", "application/json")
writer.WriteHeader(http.StatusOK)
err = json.NewEncoder(writer).Encode(resp)
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Failed to write response")
}
return
}
}
func (h *Headscale) HealthHandler(
writer http.ResponseWriter,
req *http.Request,
@@ -144,7 +53,7 @@ func (h *Headscale) HealthHandler(
}
}
if err := h.db.PingDB(req.Context()); err != nil {
if err := h.pingDB(req.Context()); err != nil {
respond(err)
return
@@ -169,7 +78,7 @@ var registerWebAPITemplate = template.Must(
<p>
Run the command below in the headscale server to add this machine to your network:
</p>
<pre><code>headscale nodes register --user USERNAME --key {{.Key}}</code></pre>
<pre><code>headscale -n NAMESPACE nodes register --key {{.Key}}</code></pre>
</body>
</html>
`))
@@ -184,16 +93,33 @@ func (h *Headscale) RegisterWebAPI(
req *http.Request,
) {
vars := mux.Vars(req)
machineKeyStr := vars["mkey"]
nodeKeyStr, ok := vars["nkey"]
if !NodePublicKeyRegex.Match([]byte(nodeKeyStr)) {
log.Warn().Str("node_key", nodeKeyStr).Msg("Invalid node key passed to registration url")
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusUnauthorized)
_, err := writer.Write([]byte("Unauthorized"))
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Failed to write response")
}
return
}
// We need to make sure we dont open for XSS style injections, if the parameter that
// is passed as a key is not parsable/validated as a NodePublic key, then fail to render
// the template and log an error.
var machineKey key.MachinePublic
err := machineKey.UnmarshalText(
[]byte(machineKeyStr),
var nodeKey key.NodePublic
err := nodeKey.UnmarshalText(
[]byte(NodePublicKeyEnsurePrefix(nodeKeyStr)),
)
if err != nil {
if !ok || nodeKeyStr == "" || err != nil {
log.Warn().Err(err).Msg("Failed to parse incoming nodekey")
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
@@ -211,7 +137,7 @@ func (h *Headscale) RegisterWebAPI(
var content bytes.Buffer
if err := registerWebAPITemplate.Execute(&content, registerWebAPITemplateConfig{
Key: machineKey.String(),
Key: nodeKeyStr,
}); err != nil {
log.Error().
Str("func", "RegisterWebAPI").

81
api_common.go Normal file
View File

@@ -0,0 +1,81 @@
package headscale
import (
"github.com/rs/zerolog/log"
"tailscale.com/tailcfg"
)
func (h *Headscale) generateMapResponse(
mapRequest tailcfg.MapRequest,
machine *Machine,
) (*tailcfg.MapResponse, error) {
log.Trace().
Str("func", "generateMapResponse").
Str("machine", mapRequest.Hostinfo.Hostname).
Msg("Creating Map response")
node, err := machine.toNode(h.cfg.BaseDomain, h.cfg.DNSConfig)
if err != nil {
log.Error().
Caller().
Str("func", "generateMapResponse").
Err(err).
Msg("Cannot convert to node")
return nil, err
}
peers, err := h.getValidPeers(machine)
if err != nil {
log.Error().
Caller().
Str("func", "generateMapResponse").
Err(err).
Msg("Cannot fetch peers")
return nil, err
}
profiles := h.getMapResponseUserProfiles(*machine, peers)
nodePeers, err := peers.toNodes(h.cfg.BaseDomain, h.cfg.DNSConfig)
if err != nil {
log.Error().
Caller().
Str("func", "generateMapResponse").
Err(err).
Msg("Failed to convert peers to Tailscale nodes")
return nil, err
}
dnsConfig := getMapResponseDNSConfig(
h.cfg.DNSConfig,
h.cfg.BaseDomain,
*machine,
peers,
)
resp := tailcfg.MapResponse{
KeepAlive: false,
Node: node,
Peers: nodePeers,
DNSConfig: dnsConfig,
Domain: h.cfg.BaseDomain,
PacketFilter: h.aclRules,
SSHPolicy: h.sshPolicy,
DERPMap: h.DERPMap,
UserProfiles: profiles,
Debug: &tailcfg.Debug{
DisableLogTail: !h.cfg.LogTail.Enabled,
RandomizeClientPort: h.cfg.RandomizeClientPort,
},
}
log.Trace().
Str("func", "generateMapResponse").
Str("machine", mapRequest.Hostinfo.Hostname).
// Interface("payload", resp).
Msgf("Generated map response: %s", tailMapResponseToString(resp))
return &resp, nil
}

157
api_key.go Normal file
View File

@@ -0,0 +1,157 @@
package headscale
import (
"fmt"
"strings"
"time"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"golang.org/x/crypto/bcrypt"
"google.golang.org/protobuf/types/known/timestamppb"
)
const (
apiPrefixLength = 7
apiKeyLength = 32
ErrAPIKeyFailedToParse = Error("Failed to parse ApiKey")
)
// APIKey describes the datamodel for API keys used to remotely authenticate with
// headscale.
type APIKey struct {
ID uint64 `gorm:"primary_key"`
Prefix string `gorm:"uniqueIndex"`
Hash []byte
CreatedAt *time.Time
Expiration *time.Time
LastSeen *time.Time
}
// CreateAPIKey creates a new ApiKey in a namespace, and returns it.
func (h *Headscale) CreateAPIKey(
expiration *time.Time,
) (string, *APIKey, error) {
prefix, err := GenerateRandomStringURLSafe(apiPrefixLength)
if err != nil {
return "", nil, err
}
toBeHashed, err := GenerateRandomStringURLSafe(apiKeyLength)
if err != nil {
return "", nil, err
}
// Key to return to user, this will only be visible _once_
keyStr := prefix + "." + toBeHashed
hash, err := bcrypt.GenerateFromPassword([]byte(toBeHashed), bcrypt.DefaultCost)
if err != nil {
return "", nil, err
}
key := APIKey{
Prefix: prefix,
Hash: hash,
Expiration: expiration,
}
if err := h.db.Save(&key).Error; err != nil {
return "", nil, fmt.Errorf("failed to save API key to database: %w", err)
}
return keyStr, &key, nil
}
// ListAPIKeys returns the list of ApiKeys for a namespace.
func (h *Headscale) ListAPIKeys() ([]APIKey, error) {
keys := []APIKey{}
if err := h.db.Find(&keys).Error; err != nil {
return nil, err
}
return keys, nil
}
// GetAPIKey returns a ApiKey for a given key.
func (h *Headscale) GetAPIKey(prefix string) (*APIKey, error) {
key := APIKey{}
if result := h.db.First(&key, "prefix = ?", prefix); result.Error != nil {
return nil, result.Error
}
return &key, nil
}
// GetAPIKeyByID returns a ApiKey for a given id.
func (h *Headscale) GetAPIKeyByID(id uint64) (*APIKey, error) {
key := APIKey{}
if result := h.db.Find(&APIKey{ID: id}).First(&key); result.Error != nil {
return nil, result.Error
}
return &key, nil
}
// DestroyAPIKey destroys a ApiKey. Returns error if the ApiKey
// does not exist.
func (h *Headscale) DestroyAPIKey(key APIKey) error {
if result := h.db.Unscoped().Delete(key); result.Error != nil {
return result.Error
}
return nil
}
// ExpireAPIKey marks a ApiKey as expired.
func (h *Headscale) ExpireAPIKey(key *APIKey) error {
if err := h.db.Model(&key).Update("Expiration", time.Now()).Error; err != nil {
return err
}
return nil
}
func (h *Headscale) ValidateAPIKey(keyStr string) (bool, error) {
prefix, hash, found := strings.Cut(keyStr, ".")
if !found {
return false, ErrAPIKeyFailedToParse
}
key, err := h.GetAPIKey(prefix)
if err != nil {
return false, fmt.Errorf("failed to validate api key: %w", err)
}
if key.Expiration.Before(time.Now()) {
return false, nil
}
if err := bcrypt.CompareHashAndPassword(key.Hash, []byte(hash)); err != nil {
return false, err
}
return true, nil
}
func (key *APIKey) toProto() *v1.ApiKey {
protoKey := v1.ApiKey{
Id: key.ID,
Prefix: key.Prefix,
}
if key.Expiration != nil {
protoKey.Expiration = timestamppb.New(*key.Expiration)
}
if key.CreatedAt != nil {
protoKey.CreatedAt = timestamppb.New(*key.CreatedAt)
}
if key.LastSeen != nil {
protoKey.LastSeen = timestamppb.New(*key.LastSeen)
}
return &protoKey
}

View File

@@ -1,4 +1,4 @@
package db
package headscale
import (
"time"
@@ -7,7 +7,7 @@ import (
)
func (*Suite) TestCreateAPIKey(c *check.C) {
apiKeyStr, apiKey, err := db.CreateAPIKey(nil)
apiKeyStr, apiKey, err := app.CreateAPIKey(nil)
c.Assert(err, check.IsNil)
c.Assert(apiKey, check.NotNil)
@@ -16,74 +16,74 @@ func (*Suite) TestCreateAPIKey(c *check.C) {
c.Assert(apiKey.Hash, check.NotNil)
c.Assert(apiKeyStr, check.Not(check.Equals), "")
_, err = db.ListAPIKeys()
_, err = app.ListAPIKeys()
c.Assert(err, check.IsNil)
keys, err := db.ListAPIKeys()
keys, err := app.ListAPIKeys()
c.Assert(err, check.IsNil)
c.Assert(len(keys), check.Equals, 1)
}
func (*Suite) TestAPIKeyDoesNotExist(c *check.C) {
key, err := db.GetAPIKey("does-not-exist")
key, err := app.GetAPIKey("does-not-exist")
c.Assert(err, check.NotNil)
c.Assert(key, check.IsNil)
}
func (*Suite) TestValidateAPIKeyOk(c *check.C) {
nowPlus2 := time.Now().Add(2 * time.Hour)
apiKeyStr, apiKey, err := db.CreateAPIKey(&nowPlus2)
apiKeyStr, apiKey, err := app.CreateAPIKey(&nowPlus2)
c.Assert(err, check.IsNil)
c.Assert(apiKey, check.NotNil)
valid, err := db.ValidateAPIKey(apiKeyStr)
valid, err := app.ValidateAPIKey(apiKeyStr)
c.Assert(err, check.IsNil)
c.Assert(valid, check.Equals, true)
}
func (*Suite) TestValidateAPIKeyNotOk(c *check.C) {
nowMinus2 := time.Now().Add(time.Duration(-2) * time.Hour)
apiKeyStr, apiKey, err := db.CreateAPIKey(&nowMinus2)
apiKeyStr, apiKey, err := app.CreateAPIKey(&nowMinus2)
c.Assert(err, check.IsNil)
c.Assert(apiKey, check.NotNil)
valid, err := db.ValidateAPIKey(apiKeyStr)
valid, err := app.ValidateAPIKey(apiKeyStr)
c.Assert(err, check.IsNil)
c.Assert(valid, check.Equals, false)
now := time.Now()
apiKeyStrNow, apiKey, err := db.CreateAPIKey(&now)
apiKeyStrNow, apiKey, err := app.CreateAPIKey(&now)
c.Assert(err, check.IsNil)
c.Assert(apiKey, check.NotNil)
validNow, err := db.ValidateAPIKey(apiKeyStrNow)
validNow, err := app.ValidateAPIKey(apiKeyStrNow)
c.Assert(err, check.IsNil)
c.Assert(validNow, check.Equals, false)
validSilly, err := db.ValidateAPIKey("nota.validkey")
validSilly, err := app.ValidateAPIKey("nota.validkey")
c.Assert(err, check.NotNil)
c.Assert(validSilly, check.Equals, false)
validWithErr, err := db.ValidateAPIKey("produceerrorkey")
validWithErr, err := app.ValidateAPIKey("produceerrorkey")
c.Assert(err, check.NotNil)
c.Assert(validWithErr, check.Equals, false)
}
func (*Suite) TestExpireAPIKey(c *check.C) {
nowPlus2 := time.Now().Add(2 * time.Hour)
apiKeyStr, apiKey, err := db.CreateAPIKey(&nowPlus2)
apiKeyStr, apiKey, err := app.CreateAPIKey(&nowPlus2)
c.Assert(err, check.IsNil)
c.Assert(apiKey, check.NotNil)
valid, err := db.ValidateAPIKey(apiKeyStr)
valid, err := app.ValidateAPIKey(apiKeyStr)
c.Assert(err, check.IsNil)
c.Assert(valid, check.Equals, true)
err = db.ExpireAPIKey(apiKey)
err = app.ExpireAPIKey(apiKey)
c.Assert(err, check.IsNil)
c.Assert(apiKey.Expiration, check.NotNil)
notValid, err := db.ValidateAPIKey(apiKeyStr)
notValid, err := app.ValidateAPIKey(apiKeyStr)
c.Assert(err, check.IsNil)
c.Assert(notValid, check.Equals, false)
}

View File

@@ -1,4 +1,4 @@
package hscontrol
package headscale
import (
"context"
@@ -8,10 +8,10 @@ import (
"io"
"net"
"net/http"
_ "net/http/pprof" //nolint
"os"
"os/signal"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"syscall"
@@ -20,20 +20,12 @@ import (
"github.com/coreos/go-oidc/v3/oidc"
"github.com/gorilla/mux"
grpcMiddleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpcRuntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/juanfont/headscale"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/db"
"github.com/juanfont/headscale/hscontrol/derp"
derpServer "github.com/juanfont/headscale/hscontrol/derp/server"
"github.com/juanfont/headscale/hscontrol/notifier"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/patrickmn/go-cache"
zerolog "github.com/philip-bui/grpc-zerolog"
"github.com/pkg/profile"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/puzpuzpuz/xsync/v2"
zl "github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"golang.org/x/crypto/acme"
@@ -49,74 +41,115 @@ import (
"google.golang.org/grpc/reflection"
"google.golang.org/grpc/status"
"gorm.io/gorm"
"tailscale.com/envknob"
"tailscale.com/tailcfg"
"tailscale.com/types/dnstype"
"tailscale.com/types/key"
)
var (
errSTUNAddressNotSet = errors.New("STUN address not set")
errUnsupportedDatabase = errors.New("unsupported DB")
errUnsupportedLetsEncryptChallengeType = errors.New(
const (
errSTUNAddressNotSet = Error("STUN address not set")
errUnsupportedDatabase = Error("unsupported DB")
errUnsupportedLetsEncryptChallengeType = Error(
"unknown value for Lets Encrypt challenge type",
)
errEmptyInitialDERPMap = errors.New(
"initial DERPMap is empty, Headscale requires at least one entry",
)
)
const (
AuthPrefix = "Bearer "
updateInterval = 5000
privateKeyFileMode = 0o600
AuthPrefix = "Bearer "
Postgres = "postgres"
Sqlite = "sqlite3"
updateInterval = 5000
HTTPReadTimeout = 30 * time.Second
HTTPShutdownTimeout = 3 * time.Second
privateKeyFileMode = 0o600
registerCacheExpiration = time.Minute * 15
registerCacheCleanup = time.Minute * 20
DisabledClientAuth = "disabled"
RelaxedClientAuth = "relaxed"
EnforcedClientAuth = "enforced"
)
// Headscale represents the base app of the service.
type Headscale struct {
cfg *types.Config
db *db.HSDatabase
cfg *Config
db *gorm.DB
dbString string
dbType string
dbDebug bool
privateKey *key.MachinePrivate
noisePrivateKey *key.MachinePrivate
noiseMux *mux.Router
DERPMap *tailcfg.DERPMap
DERPServer *derpServer.DERPServer
DERPServer *DERPServer
ACLPolicy *policy.ACLPolicy
aclPolicy *ACLPolicy
aclRules []tailcfg.FilterRule
sshPolicy *tailcfg.SSHPolicy
nodeNotifier *notifier.Notifier
lastStateChange *xsync.MapOf[string, time.Time]
oidcProvider *oidc.Provider
oauth2Config *oauth2.Config
registrationCache *cache.Cache
ipAllocationMutex sync.Mutex
shutdownChan chan struct{}
pollNetMapStreamWG sync.WaitGroup
}
var (
profilingEnabled = envknob.Bool("HEADSCALE_PROFILING_ENABLED")
tailsqlEnabled = envknob.Bool("HEADSCALE_DEBUG_TAILSQL_ENABLED")
tailsqlStateDir = envknob.String("HEADSCALE_DEBUG_TAILSQL_STATE_DIR")
tailsqlTSKey = envknob.String("TS_AUTHKEY")
)
func NewHeadscale(cfg *types.Config) (*Headscale, error) {
if profilingEnabled {
runtime.SetBlockProfileRate(1)
func NewHeadscale(cfg *Config) (*Headscale, error) {
privateKey, err := readOrCreatePrivateKey(cfg.PrivateKeyPath)
if err != nil {
return nil, fmt.Errorf("failed to read or create private key: %w", err)
}
// TS2021 requires to have a different key from the legacy protocol.
noisePrivateKey, err := readOrCreatePrivateKey(cfg.NoisePrivateKeyPath)
if err != nil {
return nil, fmt.Errorf("failed to read or create Noise protocol private key: %w", err)
}
if privateKey.Equal(*noisePrivateKey) {
return nil, fmt.Errorf("private key and noise private key are the same: %w", err)
}
var dbString string
switch cfg.DBtype {
case Postgres:
dbString = fmt.Sprintf(
"host=%s dbname=%s user=%s",
cfg.DBhost,
cfg.DBname,
cfg.DBuser,
)
if sslEnabled, err := strconv.ParseBool(cfg.DBssl); err == nil {
if !sslEnabled {
dbString += " sslmode=disable"
}
} else {
dbString += fmt.Sprintf(" sslmode=%s", cfg.DBssl)
}
if cfg.DBport != 0 {
dbString += fmt.Sprintf(" port=%d", cfg.DBport)
}
if cfg.DBpass != "" {
dbString += fmt.Sprintf(" password=%s", cfg.DBpass)
}
case Sqlite:
dbString = cfg.DBpath
default:
return nil, errUnsupportedDatabase
}
registrationCache := cache.New(
registerCacheExpiration,
registerCacheCleanup,
@@ -124,23 +157,20 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
app := Headscale{
cfg: cfg,
dbType: cfg.DBtype,
dbString: dbString,
privateKey: privateKey,
noisePrivateKey: noisePrivateKey,
aclRules: tailcfg.FilterAllowAll, // default allowall
registrationCache: registrationCache,
pollNetMapStreamWG: sync.WaitGroup{},
nodeNotifier: notifier.NewNotifier(),
}
database, err := db.NewHeadscaleDatabase(
cfg.Database,
app.nodeNotifier,
cfg.IPPrefixes,
cfg.BaseDomain)
err = app.initDB()
if err != nil {
return nil, err
}
app.db = database
if cfg.OIDC.Issuer != "" {
err = app.initOIDC()
if err != nil {
@@ -153,7 +183,7 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
}
if app.cfg.DNSConfig != nil && app.cfg.DNSConfig.Proxied { // if MagicDNS
magicDNSDomains := util.GenerateMagicDNSRootDomains(app.cfg.IPPrefixes)
magicDNSDomains := generateMagicDNSRootDomains(app.cfg.IPPrefixes)
// we might have routes already from Split DNS
if app.cfg.DNSConfig.Routes == nil {
app.cfg.DNSConfig.Routes = make(map[string][]*dnstype.Resolver)
@@ -164,23 +194,7 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
}
if cfg.DERP.ServerEnabled {
derpServerKey, err := readOrCreatePrivateKey(cfg.DERP.ServerPrivateKeyPath)
if err != nil {
return nil, fmt.Errorf("failed to read or create DERP server private key: %w", err)
}
if derpServerKey.Equal(*noisePrivateKey) {
return nil, fmt.Errorf(
"DERP server private key and noise private key are the same: %w",
err,
)
}
embeddedDERPServer, err := derpServer.NewDERPServer(
cfg.ServerURL,
key.NodePrivate(*derpServerKey),
&cfg.DERP,
)
embeddedDERPServer, err := app.NewDERPServer()
if err != nil {
return nil, err
}
@@ -196,88 +210,58 @@ func (h *Headscale) redirect(w http.ResponseWriter, req *http.Request) {
http.Redirect(w, req, target, http.StatusFound)
}
// expireEphemeralNodes deletes ephemeral node records that have not been
// expireEphemeralNodes deletes ephemeral machine records that have not been
// seen for longer than h.cfg.EphemeralNodeInactivityTimeout.
func (h *Headscale) expireEphemeralNodes(milliSeconds int64) {
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
var update types.StateUpdate
var changed bool
for range ticker.C {
if err := h.db.DB.Transaction(func(tx *gorm.DB) error {
update, changed = db.ExpireEphemeralNodes(tx, h.cfg.EphemeralNodeInactivityTimeout)
return nil
}); err != nil {
log.Error().Err(err).Msg("database error while expiring ephemeral nodes")
continue
}
if changed && update.Valid() {
ctx := types.NotifyCtx(context.Background(), "expire-ephemeral", "na")
h.nodeNotifier.NotifyAll(ctx, update)
}
h.expireEphemeralNodesWorker()
}
}
// expireExpiredMachines expires nodes that have an explicit expiry set
// after that expiry time has passed.
func (h *Headscale) expireExpiredMachines(intervalMs int64) {
interval := time.Duration(intervalMs) * time.Millisecond
ticker := time.NewTicker(interval)
func (h *Headscale) expireEphemeralNodesWorker() {
namespaces, err := h.ListNamespaces()
if err != nil {
log.Error().Err(err).Msg("Error listing namespaces")
lastCheck := time.Unix(0, 0)
var update types.StateUpdate
var changed bool
for range ticker.C {
if err := h.db.DB.Transaction(func(tx *gorm.DB) error {
lastCheck, update, changed = db.ExpireExpiredNodes(tx, lastCheck)
return nil
}); err != nil {
log.Error().Err(err).Msg("database error while expiring nodes")
continue
}
log.Trace().Str("nodes", update.ChangeNodes.String()).Msgf("expiring nodes")
if changed && update.Valid() {
ctx := types.NotifyCtx(context.Background(), "expire-expired", "na")
h.nodeNotifier.NotifyAll(ctx, update)
}
return
}
}
// scheduledDERPMapUpdateWorker refreshes the DERPMap stored on the global object
// at a set interval.
func (h *Headscale) scheduledDERPMapUpdateWorker(cancelChan <-chan struct{}) {
log.Info().
Dur("frequency", h.cfg.DERP.UpdateFrequency).
Msg("Setting up a DERPMap update worker")
ticker := time.NewTicker(h.cfg.DERP.UpdateFrequency)
for _, namespace := range namespaces {
machines, err := h.ListMachinesInNamespace(namespace.Name)
if err != nil {
log.Error().
Err(err).
Str("namespace", namespace.Name).
Msg("Error listing machines in namespace")
for {
select {
case <-cancelChan:
return
}
case <-ticker.C:
log.Info().Msg("Fetching DERPMap updates")
h.DERPMap = derp.GetDERPMap(h.cfg.DERP)
if h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
region, _ := h.DERPServer.GenerateRegion()
h.DERPMap.Regions[region.RegionID] = &region
}
expiredFound := false
for _, machine := range machines {
if machine.AuthKey != nil && machine.LastSeen != nil &&
machine.AuthKey.Ephemeral &&
time.Now().
After(machine.LastSeen.Add(h.cfg.EphemeralNodeInactivityTimeout)) {
expiredFound = true
log.Info().
Str("machine", machine.Hostname).
Msg("Ephemeral client removed from database")
stateUpdate := types.StateUpdate{
Type: types.StateDERPUpdated,
DERPMap: h.DERPMap,
}
if stateUpdate.Valid() {
ctx := types.NotifyCtx(context.Background(), "derpmap-update", "na")
h.nodeNotifier.NotifyAll(ctx, stateUpdate)
err = h.db.Unscoped().Delete(machine).Error
if err != nil {
log.Error().
Err(err).
Str("machine", machine.Hostname).
Msg("🤮 Cannot delete ephemeral machine from the database")
}
}
}
if expiredFound {
h.setLastStateChangeToNow()
}
}
}
@@ -338,7 +322,7 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
)
}
valid, err := h.db.ValidateAPIKey(strings.TrimPrefix(token, AuthPrefix))
valid, err := h.ValidateAPIKey(strings.TrimPrefix(token, AuthPrefix))
if err != nil {
log.Error().
Caller().
@@ -389,7 +373,7 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
return
}
valid, err := h.db.ValidateAPIKey(strings.TrimPrefix(authHeader, AuthPrefix))
valid, err := h.ValidateAPIKey(strings.TrimPrefix(authHeader, AuthPrefix))
if err != nil {
log.Error().
Caller().
@@ -441,17 +425,17 @@ func (h *Headscale) ensureUnixSocketIsAbsent() error {
return os.Remove(h.cfg.UnixSocket)
}
func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
func (h *Headscale) createRouter(grpcMux *runtime.ServeMux) *mux.Router {
router := mux.NewRouter()
router.PathPrefix("/debug/pprof/").Handler(http.DefaultServeMux)
router.HandleFunc(ts2021UpgradePath, h.NoiseUpgradeHandler).Methods(http.MethodPost)
router.HandleFunc("/health", h.HealthHandler).Methods(http.MethodGet)
router.HandleFunc("/key", h.KeyHandler).Methods(http.MethodGet)
router.HandleFunc("/register/{mkey}", h.RegisterWebAPI).Methods(http.MethodGet)
router.HandleFunc("/register/{nkey}", h.RegisterWebAPI).Methods(http.MethodGet)
h.addLegacyHandlers(router)
router.HandleFunc("/oidc/register/{mkey}", h.RegisterOIDC).Methods(http.MethodGet)
router.HandleFunc("/oidc/register/{nkey}", h.RegisterOIDC).Methods(http.MethodGet)
router.HandleFunc("/oidc/callback", h.OIDCCallback).Methods(http.MethodGet)
router.HandleFunc("/apple", h.AppleConfigMessage).Methods(http.MethodGet)
router.HandleFunc("/apple/{platform}", h.ApplePlatformConfig).
@@ -459,46 +443,41 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
router.HandleFunc("/windows", h.WindowsConfigMessage).Methods(http.MethodGet)
router.HandleFunc("/windows/tailscale.reg", h.WindowsRegConfig).
Methods(http.MethodGet)
// TODO(kristoffer): move swagger into a package
router.HandleFunc("/swagger", headscale.SwaggerUI).Methods(http.MethodGet)
router.HandleFunc("/swagger/v1/openapiv2.json", headscale.SwaggerAPIv1).
router.HandleFunc("/swagger", SwaggerUI).Methods(http.MethodGet)
router.HandleFunc("/swagger/v1/openapiv2.json", SwaggerAPIv1).
Methods(http.MethodGet)
if h.cfg.DERP.ServerEnabled {
router.HandleFunc("/derp", h.DERPServer.DERPHandler)
router.HandleFunc("/derp/probe", derpServer.DERPProbeHandler)
router.HandleFunc("/bootstrap-dns", derpServer.DERPBootstrapDNSHandler(h.DERPMap))
router.HandleFunc("/derp", h.DERPHandler)
router.HandleFunc("/derp/probe", h.DERPProbeHandler)
router.HandleFunc("/bootstrap-dns", h.DERPBootstrapDNSHandler)
}
apiRouter := router.PathPrefix("/api").Subrouter()
apiRouter.Use(h.httpAuthenticationMiddleware)
apiRouter.PathPrefix("/v1/").HandlerFunc(grpcMux.ServeHTTP)
router.PathPrefix("/").HandlerFunc(notFoundHandler)
router.PathPrefix("/").HandlerFunc(stdoutHandler)
return router
}
func (h *Headscale) createNoiseMux() *mux.Router {
router := mux.NewRouter()
router.HandleFunc("/machine/register", h.NoiseRegistrationHandler).
Methods(http.MethodPost)
router.HandleFunc("/machine/map", h.NoisePollNetMapHandler)
return router
}
// Serve launches a GIN server with the Headscale API.
func (h *Headscale) Serve() error {
if _, enableProfile := os.LookupEnv("HEADSCALE_PROFILING_ENABLED"); enableProfile {
if profilePath, ok := os.LookupEnv("HEADSCALE_PROFILING_PATH"); ok {
err := os.MkdirAll(profilePath, os.ModePerm)
if err != nil {
log.Fatal().Err(err).Msg("failed to create profiling directory")
}
defer profile.Start(profile.ProfilePath(profilePath)).Stop()
} else {
defer profile.Start().Stop()
}
}
var err error
// Fetch an initial DERP Map before we start serving
h.DERPMap = derp.GetDERPMap(h.cfg.DERP)
h.DERPMap = GetDERPMap(h.cfg.DERP)
if h.cfg.DERP.ServerEnabled {
// When embedded DERP is enabled we always need a STUN server
@@ -506,16 +485,8 @@ func (h *Headscale) Serve() error {
return errSTUNAddressNotSet
}
region, err := h.DERPServer.GenerateRegion()
if err != nil {
return err
}
if h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
h.DERPMap.Regions[region.RegionID] = &region
}
go h.DERPServer.ServeSTUN()
h.DERPMap.Regions[h.DERPServer.region.RegionID] = &h.DERPServer.region
go h.ServeSTUN()
}
if h.cfg.DERP.AutoUpdate {
@@ -524,14 +495,7 @@ func (h *Headscale) Serve() error {
go h.scheduledDERPMapUpdateWorker(derpMapCancelChannel)
}
if len(h.DERPMap.Regions) == 0 {
return errEmptyInitialDERPMap
}
// TODO(kradalby): These should have cancel channels and be cleaned
// up on shutdown.
go h.expireEphemeralNodes(updateInterval)
go h.expireExpiredMachines(updateInterval)
if zl.GlobalLevel() == zl.TraceLevel {
zerolog.RespLog = true
@@ -566,14 +530,14 @@ func (h *Headscale) Serve() error {
return fmt.Errorf("failed change permission of gRPC socket: %w", err)
}
grpcGatewayMux := grpcRuntime.NewServeMux()
grpcGatewayMux := runtime.NewServeMux()
// Make the grpc-gateway connect to grpc over socket
grpcGatewayConn, err := grpc.Dial(
h.cfg.UnixSocket,
[]grpc.DialOption{
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(util.GrpcSocketDialer),
grpc.WithContextDialer(GrpcSocketDialer),
}...,
)
if err != nil {
@@ -588,10 +552,7 @@ func (h *Headscale) Serve() error {
}
// Start the local gRPC server without TLS and without authentication
grpcSocket := grpc.NewServer(
// Uncomment to debug grpc communication.
// zerolog.UnaryInterceptor(),
)
grpcSocket := grpc.NewServer(zerolog.UnaryInterceptor())
v1.RegisterHeadscaleServiceServer(grpcSocket, newHeadscaleV1APIServer(h))
reflection.Register(grpcSocket)
@@ -631,8 +592,7 @@ func (h *Headscale) Serve() error {
grpc.UnaryInterceptor(
grpcMiddleware.ChainUnaryServer(
h.grpcAuthenticationInterceptor,
// Uncomment to debug grpc communication.
// zerolog.NewUnaryServerInterceptor(),
zerolog.NewUnaryServerInterceptor(),
),
),
}
@@ -669,10 +629,16 @@ func (h *Headscale) Serve() error {
// over our main Addr. It also serves the legacy Tailcale API
router := h.createRouter(grpcGatewayMux)
// This router is served only over the Noise connection, and exposes only the new API.
//
// The HTTP2 server that exposes this router is created for
// a single hijacked connection from /ts2021, using netutil.NewOneConnListener
h.noiseMux = h.createNoiseMux()
httpServer := &http.Server{
Addr: h.cfg.Addr,
Handler: router,
ReadTimeout: types.HTTPReadTimeout,
ReadTimeout: HTTPReadTimeout,
// Go does not handle timeouts in HTTP very well, and there is
// no good way to handle streaming timeouts, therefore we need to
// keep this at unlimited and be careful to clean up connections
@@ -702,7 +668,7 @@ func (h *Headscale) Serve() error {
promHTTPServer := &http.Server{
Addr: h.cfg.MetricsAddr,
Handler: promMux,
ReadTimeout: types.HTTPReadTimeout,
ReadTimeout: HTTPReadTimeout,
WriteTimeout: 0,
}
@@ -718,20 +684,6 @@ func (h *Headscale) Serve() error {
log.Info().
Msgf("listening and serving metrics on: %s", h.cfg.MetricsAddr)
var tailsqlContext context.Context
if tailsqlEnabled {
if h.cfg.Database.Type != types.DatabaseSqlite {
log.Fatal().
Str("type", h.cfg.Database.Type).
Msgf("tailsql only support %q", types.DatabaseSqlite)
}
if tailsqlTSKey == "" {
log.Fatal().Msg("tailsql requires TS_AUTHKEY to be set")
}
tailsqlContext = context.Background()
go runTailSQLService(ctx, util.TSLogfWrapper(), tailsqlStateDir, h.cfg.Database.Sqlite.Path)
}
// Handle common process-killing signals so we can gracefully shut down:
h.shutdownChan = make(chan struct{})
sigc := make(chan os.Signal, 1)
@@ -754,21 +706,16 @@ func (h *Headscale) Serve() error {
// TODO(kradalby): Reload config on SIGHUP
if h.cfg.ACL.PolicyPath != "" {
aclPath := util.AbsolutePathFromConfigPath(h.cfg.ACL.PolicyPath)
pol, err := policy.LoadACLPolicyFromPath(aclPath)
aclPath := AbsolutePathFromConfigPath(h.cfg.ACL.PolicyPath)
err := h.LoadACLPolicy(aclPath)
if err != nil {
log.Error().Err(err).Msg("Failed to reload ACL policy")
}
h.ACLPolicy = pol
log.Info().
Str("path", aclPath).
Msg("ACL policy successfully reloaded, notifying nodes of change")
ctx := types.NotifyCtx(context.Background(), "acl-sighup", "na")
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
Type: types.StateFullUpdate,
})
h.setLastStateChangeToNow()
}
default:
@@ -777,13 +724,12 @@ func (h *Headscale) Serve() error {
Msg("Received signal to stop, shutting down gracefully")
close(h.shutdownChan)
h.pollNetMapStreamWG.Wait()
// Gracefully shut down servers
ctx, cancel := context.WithTimeout(
context.Background(),
types.HTTPShutdownTimeout,
HTTPShutdownTimeout,
)
if err := promHTTPServer.Shutdown(ctx); err != nil {
log.Error().Err(err).Msg("Failed to shutdown prometheus http")
@@ -798,10 +744,6 @@ func (h *Headscale) Serve() error {
grpcListener.Close()
}
if tailsqlContext != nil {
tailsqlContext.Done()
}
// Close network listeners
promHTTPListener.Close()
httpListener.Close()
@@ -811,7 +753,11 @@ func (h *Headscale) Serve() error {
socketListener.Close()
// Close db connections
err = h.db.Close()
db, err := h.db.DB()
if err != nil {
log.Error().Err(err).Msg("Failed to get db handle")
}
err = db.Close()
if err != nil {
log.Error().Err(err).Msg("Failed to close db")
}
@@ -821,8 +767,7 @@ func (h *Headscale) Serve() error {
// And we're done:
cancel()
return
os.Exit(0)
}
}
}
@@ -854,13 +799,13 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
}
switch h.cfg.TLS.LetsEncrypt.ChallengeType {
case types.TLSALPN01ChallengeType:
case tlsALPN01ChallengeType:
// Configuration via autocert with TLS-ALPN-01 (https://tools.ietf.org/html/rfc8737)
// The RFC requires that the validation is done on port 443; in other words, headscale
// must be reachable on port 443.
return certManager.TLSConfig(), nil
case types.HTTP01ChallengeType:
case http01ChallengeType:
// Configuration via autocert with HTTP-01. This requires listening on
// port 80 for the certificate validation in addition to the headscale
// service, which can be configured to run on any other port.
@@ -868,7 +813,7 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
server := &http.Server{
Addr: h.cfg.TLS.LetsEncrypt.Listen,
Handler: certManager.HTTPHandler(http.HandlerFunc(h.redirect)),
ReadTimeout: types.HTTPReadTimeout,
ReadTimeout: HTTPReadTimeout,
}
go func() {
@@ -907,7 +852,61 @@ func (h *Headscale) getTLSSettings() (*tls.Config, error) {
}
}
func notFoundHandler(
func (h *Headscale) setLastStateChangeToNow() {
var err error
now := time.Now().UTC()
namespaces, err := h.ListNamespaces()
if err != nil {
log.Error().
Caller().
Err(err).
Msg("failed to fetch all namespaces, failing to update last changed state.")
}
for _, namespace := range namespaces {
lastStateUpdate.WithLabelValues(namespace.Name, "headscale").Set(float64(now.Unix()))
if h.lastStateChange == nil {
h.lastStateChange = xsync.NewMapOf[time.Time]()
}
h.lastStateChange.Store(namespace.Name, now)
}
}
func (h *Headscale) getLastStateChange(namespaces ...Namespace) time.Time {
times := []time.Time{}
// getLastStateChange takes a list of namespaces as a "filter", if no namespaces
// are past, then use the entier list of namespaces and look for the last update
if len(namespaces) > 0 {
for _, namespace := range namespaces {
if lastChange, ok := h.lastStateChange.Load(namespace.Name); ok {
times = append(times, lastChange)
}
}
} else {
h.lastStateChange.Range(func(key string, value time.Time) bool {
times = append(times, value)
return true
})
}
sort.Slice(times, func(i, j int) bool {
return times[i].After(times[j])
})
log.Trace().Msgf("Latest times %#v", times)
if len(times) == 0 {
return time.Now().UTC()
} else {
return times[0]
}
}
func stdoutHandler(
writer http.ResponseWriter,
req *http.Request,
) {
@@ -919,7 +918,6 @@ func notFoundHandler(
Interface("url", req.URL).
Bytes("body", body).
Msg("Request did not match")
writer.WriteHeader(http.StatusNotFound)
}
func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
@@ -939,8 +937,7 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
err = os.WriteFile(path, machineKeyStr, privateKeyFileMode)
if err != nil {
return nil, fmt.Errorf(
"failed to save private key to disk at path %q: %w",
path,
"failed to save private key to disk: %w",
err,
)
}
@@ -951,9 +948,16 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
}
trimmedPrivateKey := strings.TrimSpace(string(privateKey))
privateKeyEnsurePrefix := PrivateKeyEnsurePrefix(trimmedPrivateKey)
var machineKey key.MachinePrivate
if err = machineKey.UnmarshalText([]byte(trimmedPrivateKey)); err != nil {
if err = machineKey.UnmarshalText([]byte(privateKeyEnsurePrefix)); err != nil {
log.Info().
Str("path", path).
Msg("This might be due to a legacy (headscale pre-0.12) private key. " +
"If the key is in WireGuard format, delete the key and restart headscale. " +
"A new key will automatically be generated. All Tailscale clients will have to be restarted")
return nil, fmt.Errorf("failed to parse private key: %w", err)
}

View File

@@ -1,11 +1,10 @@
package hscontrol
package headscale
import (
"net/netip"
"os"
"testing"
"github.com/juanfont/headscale/hscontrol/types"
"gopkg.in/check.v1"
)
@@ -19,7 +18,7 @@ type Suite struct{}
var (
tmpDir string
app *Headscale
app Headscale
)
func (s *Suite) SetUpTest(c *check.C) {
@@ -35,28 +34,28 @@ func (s *Suite) ResetDB(c *check.C) {
os.RemoveAll(tmpDir)
}
var err error
tmpDir, err = os.MkdirTemp("", "autoygg-client-test2")
tmpDir, err = os.MkdirTemp("", "autoygg-client-test")
if err != nil {
c.Fatal(err)
}
cfg := types.Config{
NoisePrivateKeyPath: tmpDir + "/noise_private.key",
Database: types.DatabaseConfig{
Type: "sqlite3",
Sqlite: types.SqliteConfig{
Path: tmpDir + "/headscale_test.db",
},
},
cfg := Config{
IPPrefixes: []netip.Prefix{
netip.MustParsePrefix("10.27.0.0/23"),
},
OIDC: types.OIDCConfig{
StripEmaildomain: false,
},
}
app, err = NewHeadscale(&cfg)
app = Headscale{
cfg: &cfg,
dbType: "sqlite3",
dbString: tmpDir + "/headscale_test.db",
}
err = app.initDB()
if err != nil {
c.Fatal(err)
}
db, err := app.openDB()
if err != nil {
c.Fatal(err)
}
app.db = db
}

View File

@@ -1,173 +0,0 @@
package main
//go:generate go run ./main.go
import (
"bytes"
"fmt"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"text/template"
)
var (
githubWorkflowPath = "../../.github/workflows/"
jobFileNameTemplate = `test-integration-v2-%s.yaml`
jobTemplate = template.Must(
template.New("jobTemplate").
Parse(`# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - {{.Name}}
on: [pull_request]
concurrency:
group: {{ "${{ github.workflow }}-$${{ github.head_ref || github.run_id }}" }}
cancel-in-progress: true
jobs:
{{.Name}}:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run {{.Name}}
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^{{.Name}}$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"
`),
)
)
const workflowFilePerm = 0o600
func removeTests() {
glob := fmt.Sprintf(jobFileNameTemplate, "*")
files, err := filepath.Glob(filepath.Join(githubWorkflowPath, glob))
if err != nil {
log.Fatalf("failed to find test files")
}
for _, file := range files {
err := os.Remove(file)
if err != nil {
log.Printf("failed to remove: %s", err)
}
}
}
func findTests() []string {
rgBin, err := exec.LookPath("rg")
if err != nil {
log.Fatalf("failed to find rg (ripgrep) binary")
}
args := []string{
"--regexp", "func (Test.+)\\(.*",
"../../integration/",
"--replace", "$1",
"--sort", "path",
"--no-line-number",
"--no-filename",
"--no-heading",
}
log.Printf("executing: %s %s", rgBin, strings.Join(args, " "))
ripgrep := exec.Command(
rgBin,
args...,
)
result, err := ripgrep.CombinedOutput()
if err != nil {
log.Printf("out: %s", result)
log.Fatalf("failed to run ripgrep: %s", err)
}
tests := strings.Split(string(result), "\n")
tests = tests[:len(tests)-1]
return tests
}
func main() {
type testConfig struct {
Name string
}
tests := findTests()
removeTests()
for _, test := range tests {
log.Printf("generating workflow for %s", test)
var content bytes.Buffer
if err := jobTemplate.Execute(&content, testConfig{
Name: test,
}); err != nil {
log.Fatalf("failed to render template: %s", err)
}
testPath := path.Join(githubWorkflowPath, fmt.Sprintf(jobFileNameTemplate, test))
err := os.WriteFile(testPath, content.Bytes(), workflowFilePerm)
if err != nil {
log.Fatalf("failed to write github job: %s", err)
}
}
}

View File

@@ -5,14 +5,13 @@ import (
"strconv"
"time"
"github.com/juanfont/headscale"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/prometheus/common/model"
"github.com/pterm/pterm"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"google.golang.org/protobuf/types/known/timestamppb"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/util"
)
const (
@@ -30,16 +29,11 @@ func init() {
apiKeysCmd.AddCommand(createAPIKeyCmd)
expireAPIKeyCmd.Flags().StringP("prefix", "p", "", "ApiKey prefix")
if err := expireAPIKeyCmd.MarkFlagRequired("prefix"); err != nil {
err := expireAPIKeyCmd.MarkFlagRequired("prefix")
if err != nil {
log.Fatal().Err(err).Msg("")
}
apiKeysCmd.AddCommand(expireAPIKeyCmd)
deleteAPIKeyCmd.Flags().StringP("prefix", "p", "", "ApiKey prefix")
if err := deleteAPIKeyCmd.MarkFlagRequired("prefix"); err != nil {
log.Fatal().Err(err).Msg("")
}
apiKeysCmd.AddCommand(deleteAPIKeyCmd)
}
var apiKeysCmd = &cobra.Command{
@@ -73,7 +67,7 @@ var listAPIKeys = &cobra.Command{
}
if output != "" {
SuccessOutput(response.GetApiKeys(), "", output)
SuccessOutput(response.ApiKeys, "", output)
return
}
@@ -81,15 +75,15 @@ var listAPIKeys = &cobra.Command{
tableData := pterm.TableData{
{"ID", "Prefix", "Expiration", "Created"},
}
for _, key := range response.GetApiKeys() {
for _, key := range response.ApiKeys {
expiration := "-"
if key.GetExpiration() != nil {
expiration = ColourTime(key.GetExpiration().AsTime())
expiration = ColourTime(key.Expiration.AsTime())
}
tableData = append(tableData, []string{
strconv.FormatUint(key.GetId(), util.Base10),
strconv.FormatUint(key.GetId(), headscale.Base10),
key.GetPrefix(),
expiration,
key.GetCreatedAt().AsTime().Format(HeadscaleDateTimeFormat),
@@ -161,7 +155,7 @@ If you loose a key, create a new one and revoke (expire) the old one.`,
return
}
SuccessOutput(response.GetApiKey(), response.GetApiKey(), output)
SuccessOutput(response.ApiKey, response.ApiKey, output)
},
}
@@ -205,44 +199,3 @@ var expireAPIKeyCmd = &cobra.Command{
SuccessOutput(response, "Key expired", output)
},
}
var deleteAPIKeyCmd = &cobra.Command{
Use: "delete",
Short: "Delete an ApiKey",
Aliases: []string{"remove", "del"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
prefix, err := cmd.Flags().GetString("prefix")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error getting prefix from CLI flag: %s", err),
output,
)
return
}
ctx, client, conn, cancel := getHeadscaleCLIClient()
defer cancel()
defer conn.Close()
request := &v1.DeleteApiKeyRequest{
Prefix: prefix,
}
response, err := client.DeleteApiKey(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Cannot delete Api Key: %s\n", err),
output,
)
return
}
SuccessOutput(response, "Key deleted", output)
},
}

View File

@@ -1,22 +0,0 @@
package cli
import (
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(configTestCmd)
}
var configTestCmd = &cobra.Command{
Use: "configtest",
Short: "Test the configuration.",
Long: "Run a test of the configuration and exit.",
Run: func(cmd *cobra.Command, args []string) {
_, err := getHeadscaleApp()
if err != nil {
log.Fatal().Caller().Err(err).Msg("Error initializing")
}
},
}

View File

@@ -3,11 +3,11 @@ package cli
import (
"fmt"
"github.com/juanfont/headscale"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"google.golang.org/grpc/status"
"tailscale.com/types/key"
)
const (
@@ -27,14 +27,8 @@ func init() {
if err != nil {
log.Fatal().Err(err).Msg("")
}
createNodeCmd.Flags().StringP("user", "u", "", "User")
createNodeCmd.Flags().StringP("namespace", "n", "", "User")
createNodeNamespaceFlag := createNodeCmd.Flags().Lookup("namespace")
createNodeNamespaceFlag.Deprecated = deprecateNamespaceMessage
createNodeNamespaceFlag.Hidden = true
err = createNodeCmd.MarkFlagRequired("user")
createNodeCmd.Flags().StringP("namespace", "n", "", "Namespace")
err = createNodeCmd.MarkFlagRequired("namespace")
if err != nil {
log.Fatal().Err(err).Msg("")
}
@@ -57,13 +51,13 @@ var debugCmd = &cobra.Command{
var createNodeCmd = &cobra.Command{
Use: "create-node",
Short: "Create a node that can be registered with `nodes register <>` command",
Short: "Create a node (machine) that can be registered with `nodes register <>` command",
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
user, err := cmd.Flags().GetString("user")
namespace, err := cmd.Flags().GetString("namespace")
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
return
}
@@ -93,13 +87,11 @@ var createNodeCmd = &cobra.Command{
return
}
var mkey key.MachinePublic
err = mkey.UnmarshalText([]byte(machineKey))
if err != nil {
if !headscale.NodePublicKeyRegex.Match([]byte(machineKey)) {
err = errPreAuthKeyMalformed
ErrorOutput(
err,
fmt.Sprintf("Failed to parse machine key from flag: %s", err),
fmt.Sprintf("Error: %s", err),
output,
)
@@ -117,24 +109,24 @@ var createNodeCmd = &cobra.Command{
return
}
request := &v1.DebugCreateNodeRequest{
Key: machineKey,
Name: name,
User: user,
Routes: routes,
request := &v1.DebugCreateMachineRequest{
Key: machineKey,
Name: name,
Namespace: namespace,
Routes: routes,
}
response, err := client.DebugCreateNode(ctx, request)
response, err := client.DebugCreateMachine(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Cannot create node: %s", status.Convert(err).Message()),
fmt.Sprintf("Cannot create machine: %s", status.Convert(err).Message()),
output,
)
return
}
SuccessOutput(response.GetNode(), "Node created", output)
SuccessOutput(response.Machine, "Machine created", output)
},
}

View File

@@ -16,11 +16,10 @@ const (
errMockOidcClientIDNotDefined = Error("MOCKOIDC_CLIENT_ID not defined")
errMockOidcClientSecretNotDefined = Error("MOCKOIDC_CLIENT_SECRET not defined")
errMockOidcPortNotDefined = Error("MOCKOIDC_PORT not defined")
accessTTL = 10 * time.Minute
refreshTTL = 60 * time.Minute
)
var accessTTL = 2 * time.Minute
func init() {
rootCmd.AddCommand(mockOidcCmd)
}
@@ -55,16 +54,6 @@ func mockOIDC() error {
if portStr == "" {
return errMockOidcPortNotDefined
}
accessTTLOverride := os.Getenv("MOCKOIDC_ACCESS_TTL")
if accessTTLOverride != "" {
newTTL, err := time.ParseDuration(accessTTLOverride)
if err != nil {
return err
}
accessTTL = newTTL
}
log.Info().Msgf("Access token TTL: %s", accessTTL)
port, err := strconv.Atoi(portStr)
if err != nil {

View File

@@ -1,10 +1,10 @@
package cli
import (
"errors"
"fmt"
survey "github.com/AlecAivazis/survey/v2"
"github.com/juanfont/headscale"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/pterm/pterm"
"github.com/rs/zerolog/log"
@@ -13,24 +13,26 @@ import (
)
func init() {
rootCmd.AddCommand(userCmd)
userCmd.AddCommand(createUserCmd)
userCmd.AddCommand(listUsersCmd)
userCmd.AddCommand(destroyUserCmd)
userCmd.AddCommand(renameUserCmd)
rootCmd.AddCommand(namespaceCmd)
namespaceCmd.AddCommand(createNamespaceCmd)
namespaceCmd.AddCommand(listNamespacesCmd)
namespaceCmd.AddCommand(destroyNamespaceCmd)
namespaceCmd.AddCommand(renameNamespaceCmd)
}
var errMissingParameter = errors.New("missing parameters")
const (
errMissingParameter = headscale.Error("missing parameters")
)
var userCmd = &cobra.Command{
Use: "users",
Short: "Manage the users of Headscale",
Aliases: []string{"user", "namespace", "namespaces", "ns"},
var namespaceCmd = &cobra.Command{
Use: "namespaces",
Short: "Manage the namespaces of Headscale",
Aliases: []string{"namespace", "ns", "user", "users"},
}
var createUserCmd = &cobra.Command{
var createNamespaceCmd = &cobra.Command{
Use: "create NAME",
Short: "Creates a new user",
Short: "Creates a new namespace",
Aliases: []string{"c", "new"},
Args: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
@@ -42,7 +44,7 @@ var createUserCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
userName := args[0]
namespaceName := args[0]
ctx, client, conn, cancel := getHeadscaleCLIClient()
defer cancel()
@@ -50,15 +52,15 @@ var createUserCmd = &cobra.Command{
log.Trace().Interface("client", client).Msg("Obtained gRPC client")
request := &v1.CreateUserRequest{Name: userName}
request := &v1.CreateNamespaceRequest{Name: namespaceName}
log.Trace().Interface("request", request).Msg("Sending CreateUser request")
response, err := client.CreateUser(ctx, request)
log.Trace().Interface("request", request).Msg("Sending CreateNamespace request")
response, err := client.CreateNamespace(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf(
"Cannot create user: %s",
"Cannot create namespace: %s",
status.Convert(err).Message(),
),
output,
@@ -67,13 +69,13 @@ var createUserCmd = &cobra.Command{
return
}
SuccessOutput(response.GetUser(), "User created", output)
SuccessOutput(response.Namespace, "Namespace created", output)
},
}
var destroyUserCmd = &cobra.Command{
var destroyNamespaceCmd = &cobra.Command{
Use: "destroy NAME",
Short: "Destroys a user",
Short: "Destroys a namespace",
Aliases: []string{"delete"},
Args: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
@@ -85,17 +87,17 @@ var destroyUserCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
userName := args[0]
namespaceName := args[0]
request := &v1.GetUserRequest{
Name: userName,
request := &v1.GetNamespaceRequest{
Name: namespaceName,
}
ctx, client, conn, cancel := getHeadscaleCLIClient()
defer cancel()
defer conn.Close()
_, err := client.GetUser(ctx, request)
_, err := client.GetNamespace(ctx, request)
if err != nil {
ErrorOutput(
err,
@@ -111,8 +113,8 @@ var destroyUserCmd = &cobra.Command{
if !force {
prompt := &survey.Confirm{
Message: fmt.Sprintf(
"Do you want to remove the user '%s' and any associated preauthkeys?",
userName,
"Do you want to remove the namespace '%s' and any associated preauthkeys?",
namespaceName,
),
}
err := survey.AskOne(prompt, &confirm)
@@ -122,14 +124,14 @@ var destroyUserCmd = &cobra.Command{
}
if confirm || force {
request := &v1.DeleteUserRequest{Name: userName}
request := &v1.DeleteNamespaceRequest{Name: namespaceName}
response, err := client.DeleteUser(ctx, request)
response, err := client.DeleteNamespace(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf(
"Cannot destroy user: %s",
"Cannot destroy namespace: %s",
status.Convert(err).Message(),
),
output,
@@ -137,16 +139,16 @@ var destroyUserCmd = &cobra.Command{
return
}
SuccessOutput(response, "User destroyed", output)
SuccessOutput(response, "Namespace destroyed", output)
} else {
SuccessOutput(map[string]string{"Result": "User not destroyed"}, "User not destroyed", output)
SuccessOutput(map[string]string{"Result": "Namespace not destroyed"}, "Namespace not destroyed", output)
}
},
}
var listUsersCmd = &cobra.Command{
var listNamespacesCmd = &cobra.Command{
Use: "list",
Short: "List all the users",
Short: "List all the namespaces",
Aliases: []string{"ls", "show"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
@@ -155,13 +157,13 @@ var listUsersCmd = &cobra.Command{
defer cancel()
defer conn.Close()
request := &v1.ListUsersRequest{}
request := &v1.ListNamespacesRequest{}
response, err := client.ListUsers(ctx, request)
response, err := client.ListNamespaces(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Cannot get users: %s", status.Convert(err).Message()),
fmt.Sprintf("Cannot get namespaces: %s", status.Convert(err).Message()),
output,
)
@@ -169,19 +171,19 @@ var listUsersCmd = &cobra.Command{
}
if output != "" {
SuccessOutput(response.GetUsers(), "", output)
SuccessOutput(response.Namespaces, "", output)
return
}
tableData := pterm.TableData{{"ID", "Name", "Created"}}
for _, user := range response.GetUsers() {
for _, namespace := range response.GetNamespaces() {
tableData = append(
tableData,
[]string{
user.GetId(),
user.GetName(),
user.GetCreatedAt().AsTime().Format("2006-01-02 15:04:05"),
namespace.GetId(),
namespace.GetName(),
namespace.GetCreatedAt().AsTime().Format("2006-01-02 15:04:05"),
},
)
}
@@ -198,9 +200,9 @@ var listUsersCmd = &cobra.Command{
},
}
var renameUserCmd = &cobra.Command{
var renameNamespaceCmd = &cobra.Command{
Use: "rename OLD_NAME NEW_NAME",
Short: "Renames a user",
Short: "Renames a namespace",
Aliases: []string{"mv"},
Args: func(cmd *cobra.Command, args []string) error {
expectedArguments := 2
@@ -217,17 +219,17 @@ var renameUserCmd = &cobra.Command{
defer cancel()
defer conn.Close()
request := &v1.RenameUserRequest{
request := &v1.RenameNamespaceRequest{
OldName: args[0],
NewName: args[1],
}
response, err := client.RenameUser(ctx, request)
response, err := client.RenameNamespace(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf(
"Cannot rename user: %s",
"Cannot rename namespace: %s",
status.Convert(err).Message(),
),
output,
@@ -236,6 +238,6 @@ var renameUserCmd = &cobra.Command{
return
}
SuccessOutput(response.GetUser(), "User renamed", output)
SuccessOutput(response.Namespace, "Namespace renamed", output)
},
}

View File

@@ -9,8 +9,8 @@ import (
"time"
survey "github.com/AlecAivazis/survey/v2"
"github.com/juanfont/headscale"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/pterm/pterm"
"github.com/spf13/cobra"
"google.golang.org/grpc/status"
@@ -19,24 +19,12 @@ import (
func init() {
rootCmd.AddCommand(nodeCmd)
listNodesCmd.Flags().StringP("user", "u", "", "Filter by user")
listNodesCmd.Flags().StringP("namespace", "n", "", "Filter by namespace")
listNodesCmd.Flags().BoolP("tags", "t", false, "Show tags")
listNodesCmd.Flags().StringP("namespace", "n", "", "User")
listNodesNamespaceFlag := listNodesCmd.Flags().Lookup("namespace")
listNodesNamespaceFlag.Deprecated = deprecateNamespaceMessage
listNodesNamespaceFlag.Hidden = true
nodeCmd.AddCommand(listNodesCmd)
registerNodeCmd.Flags().StringP("user", "u", "", "User")
registerNodeCmd.Flags().StringP("namespace", "n", "", "User")
registerNodeNamespaceFlag := registerNodeCmd.Flags().Lookup("namespace")
registerNodeNamespaceFlag.Deprecated = deprecateNamespaceMessage
registerNodeNamespaceFlag.Hidden = true
err := registerNodeCmd.MarkFlagRequired("user")
registerNodeCmd.Flags().StringP("namespace", "n", "", "Namespace")
err := registerNodeCmd.MarkFlagRequired("namespace")
if err != nil {
log.Fatalf(err.Error())
}
@@ -75,14 +63,9 @@ func init() {
log.Fatalf(err.Error())
}
moveNodeCmd.Flags().StringP("user", "u", "", "New user")
moveNodeCmd.Flags().StringP("namespace", "n", "", "New namespace")
moveNodeCmd.Flags().StringP("namespace", "n", "", "User")
moveNodeNamespaceFlag := moveNodeCmd.Flags().Lookup("namespace")
moveNodeNamespaceFlag.Deprecated = deprecateNamespaceMessage
moveNodeNamespaceFlag.Hidden = true
err = moveNodeCmd.MarkFlagRequired("user")
err = moveNodeCmd.MarkFlagRequired("namespace")
if err != nil {
log.Fatalf(err.Error())
}
@@ -107,12 +90,12 @@ var nodeCmd = &cobra.Command{
var registerNodeCmd = &cobra.Command{
Use: "register",
Short: "Registers a node to your network",
Short: "Registers a machine to your network",
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
user, err := cmd.Flags().GetString("user")
namespace, err := cmd.Flags().GetString("namespace")
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
return
}
@@ -132,17 +115,17 @@ var registerNodeCmd = &cobra.Command{
return
}
request := &v1.RegisterNodeRequest{
Key: machineKey,
User: user,
request := &v1.RegisterMachineRequest{
Key: machineKey,
Namespace: namespace,
}
response, err := client.RegisterNode(ctx, request)
response, err := client.RegisterMachine(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf(
"Cannot register node: %s\n",
"Cannot register machine: %s\n",
status.Convert(err).Message(),
),
output,
@@ -152,8 +135,8 @@ var registerNodeCmd = &cobra.Command{
}
SuccessOutput(
response.GetNode(),
fmt.Sprintf("Node %s registered", response.GetNode().GetGivenName()), output)
response.Machine,
fmt.Sprintf("Machine %s registered", response.Machine.GivenName), output)
},
}
@@ -163,9 +146,9 @@ var listNodesCmd = &cobra.Command{
Aliases: []string{"ls", "show"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
user, err := cmd.Flags().GetString("user")
namespace, err := cmd.Flags().GetString("namespace")
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
return
}
@@ -180,11 +163,11 @@ var listNodesCmd = &cobra.Command{
defer cancel()
defer conn.Close()
request := &v1.ListNodesRequest{
User: user,
request := &v1.ListMachinesRequest{
Namespace: namespace,
}
response, err := client.ListNodes(ctx, request)
response, err := client.ListMachines(ctx, request)
if err != nil {
ErrorOutput(
err,
@@ -196,12 +179,12 @@ var listNodesCmd = &cobra.Command{
}
if output != "" {
SuccessOutput(response.GetNodes(), "", output)
SuccessOutput(response.Machines, "", output)
return
}
tableData, err := nodesToPtables(user, showTags, response.GetNodes())
tableData, err := nodesToPtables(namespace, showTags, response.Machines)
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
@@ -223,7 +206,7 @@ var listNodesCmd = &cobra.Command{
var expireNodeCmd = &cobra.Command{
Use: "expire",
Short: "Expire (log out) a node in your network",
Short: "Expire (log out) a machine in your network",
Long: "Expiring a node will keep the node in the database and force it to reauthenticate.",
Aliases: []string{"logout", "exp", "e"},
Run: func(cmd *cobra.Command, args []string) {
@@ -244,16 +227,16 @@ var expireNodeCmd = &cobra.Command{
defer cancel()
defer conn.Close()
request := &v1.ExpireNodeRequest{
NodeId: identifier,
request := &v1.ExpireMachineRequest{
MachineId: identifier,
}
response, err := client.ExpireNode(ctx, request)
response, err := client.ExpireMachine(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf(
"Cannot expire node: %s\n",
"Cannot expire machine: %s\n",
status.Convert(err).Message(),
),
output,
@@ -262,13 +245,13 @@ var expireNodeCmd = &cobra.Command{
return
}
SuccessOutput(response.GetNode(), "Node expired", output)
SuccessOutput(response.Machine, "Machine expired", output)
},
}
var renameNodeCmd = &cobra.Command{
Use: "rename NEW_NAME",
Short: "Renames a node in your network",
Short: "Renames a machine in your network",
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
@@ -291,17 +274,17 @@ var renameNodeCmd = &cobra.Command{
if len(args) > 0 {
newName = args[0]
}
request := &v1.RenameNodeRequest{
NodeId: identifier,
NewName: newName,
request := &v1.RenameMachineRequest{
MachineId: identifier,
NewName: newName,
}
response, err := client.RenameNode(ctx, request)
response, err := client.RenameMachine(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf(
"Cannot rename node: %s\n",
"Cannot rename machine: %s\n",
status.Convert(err).Message(),
),
output,
@@ -310,7 +293,7 @@ var renameNodeCmd = &cobra.Command{
return
}
SuccessOutput(response.GetNode(), "Node renamed", output)
SuccessOutput(response.Machine, "Machine renamed", output)
},
}
@@ -336,11 +319,11 @@ var deleteNodeCmd = &cobra.Command{
defer cancel()
defer conn.Close()
getRequest := &v1.GetNodeRequest{
NodeId: identifier,
getRequest := &v1.GetMachineRequest{
MachineId: identifier,
}
getResponse, err := client.GetNode(ctx, getRequest)
getResponse, err := client.GetMachine(ctx, getRequest)
if err != nil {
ErrorOutput(
err,
@@ -354,8 +337,8 @@ var deleteNodeCmd = &cobra.Command{
return
}
deleteRequest := &v1.DeleteNodeRequest{
NodeId: identifier,
deleteRequest := &v1.DeleteMachineRequest{
MachineId: identifier,
}
confirm := false
@@ -364,7 +347,7 @@ var deleteNodeCmd = &cobra.Command{
prompt := &survey.Confirm{
Message: fmt.Sprintf(
"Do you want to remove the node %s?",
getResponse.GetNode().GetName(),
getResponse.GetMachine().Name,
),
}
err = survey.AskOne(prompt, &confirm)
@@ -374,7 +357,7 @@ var deleteNodeCmd = &cobra.Command{
}
if confirm || force {
response, err := client.DeleteNode(ctx, deleteRequest)
response, err := client.DeleteMachine(ctx, deleteRequest)
if output != "" {
SuccessOutput(response, "", output)
@@ -405,7 +388,7 @@ var deleteNodeCmd = &cobra.Command{
var moveNodeCmd = &cobra.Command{
Use: "move",
Short: "Move node to another user",
Short: "Move node to another namespace",
Aliases: []string{"mv"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
@@ -421,11 +404,11 @@ var moveNodeCmd = &cobra.Command{
return
}
user, err := cmd.Flags().GetString("user")
namespace, err := cmd.Flags().GetString("namespace")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error getting user: %s", err),
fmt.Sprintf("Error getting namespace: %s", err),
output,
)
@@ -436,11 +419,11 @@ var moveNodeCmd = &cobra.Command{
defer cancel()
defer conn.Close()
getRequest := &v1.GetNodeRequest{
NodeId: identifier,
getRequest := &v1.GetMachineRequest{
MachineId: identifier,
}
_, err = client.GetNode(ctx, getRequest)
_, err = client.GetMachine(ctx, getRequest)
if err != nil {
ErrorOutput(
err,
@@ -454,12 +437,12 @@ var moveNodeCmd = &cobra.Command{
return
}
moveRequest := &v1.MoveNodeRequest{
NodeId: identifier,
User: user,
moveRequest := &v1.MoveMachineRequest{
MachineId: identifier,
Namespace: namespace,
}
moveResponse, err := client.MoveNode(ctx, moveRequest)
moveResponse, err := client.MoveMachine(ctx, moveRequest)
if err != nil {
ErrorOutput(
err,
@@ -473,27 +456,25 @@ var moveNodeCmd = &cobra.Command{
return
}
SuccessOutput(moveResponse.GetNode(), "Node moved to another user", output)
SuccessOutput(moveResponse.Machine, "Node moved to another namespace", output)
},
}
func nodesToPtables(
currentUser string,
currentNamespace string,
showTags bool,
nodes []*v1.Node,
machines []*v1.Machine,
) (pterm.TableData, error) {
tableHeader := []string{
"ID",
"Hostname",
"Name",
"MachineKey",
"NodeKey",
"User",
"Namespace",
"IP addresses",
"Ephemeral",
"Last seen",
"Expiration",
"Connected",
"Online",
"Expired",
}
if showTags {
@@ -505,46 +486,36 @@ func nodesToPtables(
}
tableData := pterm.TableData{tableHeader}
for _, node := range nodes {
for _, machine := range machines {
var ephemeral bool
if node.GetPreAuthKey() != nil && node.GetPreAuthKey().GetEphemeral() {
if machine.PreAuthKey != nil && machine.PreAuthKey.Ephemeral {
ephemeral = true
}
var lastSeen time.Time
var lastSeenTime string
if node.GetLastSeen() != nil {
lastSeen = node.GetLastSeen().AsTime()
if machine.LastSeen != nil {
lastSeen = machine.LastSeen.AsTime()
lastSeenTime = lastSeen.Format("2006-01-02 15:04:05")
}
var expiry time.Time
var expiryTime string
if node.GetExpiry() != nil {
expiry = node.GetExpiry().AsTime()
expiryTime = expiry.Format("2006-01-02 15:04:05")
} else {
expiryTime = "N/A"
}
var machineKey key.MachinePublic
err := machineKey.UnmarshalText(
[]byte(node.GetMachineKey()),
)
if err != nil {
machineKey = key.MachinePublic{}
if machine.Expiry != nil {
expiry = machine.Expiry.AsTime()
}
var nodeKey key.NodePublic
err = nodeKey.UnmarshalText(
[]byte(node.GetNodeKey()),
err := nodeKey.UnmarshalText(
[]byte(headscale.NodePublicKeyEnsurePrefix(machine.NodeKey)),
)
if err != nil {
return nil, err
}
var online string
if node.GetOnline() {
if lastSeen.After(
time.Now().Add(-5 * time.Minute),
) { // TODO: Find a better way to reliably show if online
online = pterm.LightGreen("online")
} else {
online = pterm.LightRed("offline")
@@ -558,36 +529,36 @@ func nodesToPtables(
}
var forcedTags string
for _, tag := range node.GetForcedTags() {
for _, tag := range machine.ForcedTags {
forcedTags += "," + tag
}
forcedTags = strings.TrimLeft(forcedTags, ",")
var invalidTags string
for _, tag := range node.GetInvalidTags() {
if !contains(node.GetForcedTags(), tag) {
for _, tag := range machine.InvalidTags {
if !contains(machine.ForcedTags, tag) {
invalidTags += "," + pterm.LightRed(tag)
}
}
invalidTags = strings.TrimLeft(invalidTags, ",")
var validTags string
for _, tag := range node.GetValidTags() {
if !contains(node.GetForcedTags(), tag) {
for _, tag := range machine.ValidTags {
if !contains(machine.ForcedTags, tag) {
validTags += "," + pterm.LightGreen(tag)
}
}
validTags = strings.TrimLeft(validTags, ",")
var user string
if currentUser == "" || (currentUser == node.GetUser().GetName()) {
user = pterm.LightMagenta(node.GetUser().GetName())
var namespace string
if currentNamespace == "" || (currentNamespace == machine.Namespace.Name) {
namespace = pterm.LightMagenta(machine.Namespace.Name)
} else {
// Shared into this user
user = pterm.LightYellow(node.GetUser().GetName())
// Shared into this namespace
namespace = pterm.LightYellow(machine.Namespace.Name)
}
var IPV4Address string
var IPV6Address string
for _, addr := range node.GetIpAddresses() {
for _, addr := range machine.IpAddresses {
if netip.MustParseAddr(addr).Is4() {
IPV4Address = addr
} else {
@@ -596,16 +567,14 @@ func nodesToPtables(
}
nodeData := []string{
strconv.FormatUint(node.GetId(), util.Base10),
node.GetName(),
node.GetGivenName(),
machineKey.ShortString(),
strconv.FormatUint(machine.Id, headscale.Base10),
machine.Name,
machine.GetGivenName(),
nodeKey.ShortString(),
user,
namespace,
strings.Join([]string{IPV4Address, IPV6Address}, ", "),
strconv.FormatBool(ephemeral),
lastSeenTime,
expiryTime,
online,
expired,
}
@@ -646,17 +615,17 @@ var tagCmd = &cobra.Command{
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error retrieving list of tags to add to node, %v", err),
fmt.Sprintf("Error retrieving list of tags to add to machine, %v", err),
output,
)
return
}
// Sending tags to node
// Sending tags to machine
request := &v1.SetTagsRequest{
NodeId: identifier,
Tags: tagsToSet,
MachineId: identifier,
Tags: tagsToSet,
}
resp, err := client.SetTags(ctx, request)
if err != nil {
@@ -671,8 +640,8 @@ var tagCmd = &cobra.Command{
if resp != nil {
SuccessOutput(
resp.GetNode(),
"Node updated",
resp.GetMachine(),
"Machine updated",
output,
)
}

View File

@@ -20,14 +20,8 @@ const (
func init() {
rootCmd.AddCommand(preauthkeysCmd)
preauthkeysCmd.PersistentFlags().StringP("user", "u", "", "User")
preauthkeysCmd.PersistentFlags().StringP("namespace", "n", "", "User")
pakNamespaceFlag := preauthkeysCmd.PersistentFlags().Lookup("namespace")
pakNamespaceFlag.Deprecated = deprecateNamespaceMessage
pakNamespaceFlag.Hidden = true
err := preauthkeysCmd.MarkPersistentFlagRequired("user")
preauthkeysCmd.PersistentFlags().StringP("namespace", "n", "", "Namespace")
err := preauthkeysCmd.MarkPersistentFlagRequired("namespace")
if err != nil {
log.Fatal().Err(err).Msg("")
}
@@ -52,14 +46,14 @@ var preauthkeysCmd = &cobra.Command{
var listPreAuthKeys = &cobra.Command{
Use: "list",
Short: "List the preauthkeys for this user",
Short: "List the preauthkeys for this namespace",
Aliases: []string{"ls", "show"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
user, err := cmd.Flags().GetString("user")
namespace, err := cmd.Flags().GetString("namespace")
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
return
}
@@ -69,7 +63,7 @@ var listPreAuthKeys = &cobra.Command{
defer conn.Close()
request := &v1.ListPreAuthKeysRequest{
User: user,
Namespace: namespace,
}
response, err := client.ListPreAuthKeys(ctx, request)
@@ -84,7 +78,7 @@ var listPreAuthKeys = &cobra.Command{
}
if output != "" {
SuccessOutput(response.GetPreAuthKeys(), "", output)
SuccessOutput(response.PreAuthKeys, "", output)
return
}
@@ -101,10 +95,10 @@ var listPreAuthKeys = &cobra.Command{
"Tags",
},
}
for _, key := range response.GetPreAuthKeys() {
for _, key := range response.PreAuthKeys {
expiration := "-"
if key.GetExpiration() != nil {
expiration = ColourTime(key.GetExpiration().AsTime())
expiration = ColourTime(key.Expiration.AsTime())
}
var reusable string
@@ -116,7 +110,7 @@ var listPreAuthKeys = &cobra.Command{
aclTags := ""
for _, tag := range key.GetAclTags() {
for _, tag := range key.AclTags {
aclTags += "," + tag
}
@@ -149,14 +143,14 @@ var listPreAuthKeys = &cobra.Command{
var createPreAuthKeyCmd = &cobra.Command{
Use: "create",
Short: "Creates a new preauthkey in the specified user",
Short: "Creates a new preauthkey in the specified namespace",
Aliases: []string{"c", "new"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
user, err := cmd.Flags().GetString("user")
namespace, err := cmd.Flags().GetString("namespace")
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
return
}
@@ -168,11 +162,11 @@ var createPreAuthKeyCmd = &cobra.Command{
log.Trace().
Bool("reusable", reusable).
Bool("ephemeral", ephemeral).
Str("user", user).
Str("namespace", namespace).
Msg("Preparing to create preauthkey")
request := &v1.CreatePreAuthKeyRequest{
User: user,
Namespace: namespace,
Reusable: reusable,
Ephemeral: ephemeral,
AclTags: tags,
@@ -214,7 +208,7 @@ var createPreAuthKeyCmd = &cobra.Command{
return
}
SuccessOutput(response.GetPreAuthKey(), response.GetPreAuthKey().GetKey(), output)
SuccessOutput(response.PreAuthKey, response.PreAuthKey.Key, output)
},
}
@@ -231,9 +225,9 @@ var expirePreAuthKeyCmd = &cobra.Command{
},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
user, err := cmd.Flags().GetString("user")
namespace, err := cmd.Flags().GetString("namespace")
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
return
}
@@ -243,8 +237,8 @@ var expirePreAuthKeyCmd = &cobra.Command{
defer conn.Close()
request := &v1.ExpirePreAuthKeyRequest{
User: user,
Key: args[0],
Namespace: namespace,
Key: args[0],
}
response, err := client.ExpirePreAuthKey(ctx, request)

View File

@@ -5,22 +5,17 @@ import (
"os"
"runtime"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/tcnksm/go-latest"
)
const (
deprecateNamespaceMessage = "use --user"
)
var cfgFile string = ""
func init() {
if len(os.Args) > 1 &&
(os.Args[1] == "version" || os.Args[1] == "mockoidc" || os.Args[1] == "completion") {
if len(os.Args) > 1 && (os.Args[1] == "version" || os.Args[1] == "mockoidc" || os.Args[1] == "completion") {
return
}
@@ -38,33 +33,33 @@ func initConfig() {
cfgFile = os.Getenv("HEADSCALE_CONFIG")
}
if cfgFile != "" {
err := types.LoadConfig(cfgFile, true)
err := headscale.LoadConfig(cfgFile, true)
if err != nil {
log.Fatal().Caller().Err(err).Msgf("Error loading config file %s", cfgFile)
}
} else {
err := types.LoadConfig("", false)
err := headscale.LoadConfig("", false)
if err != nil {
log.Fatal().Caller().Err(err).Msgf("Error loading config")
}
}
cfg, err := types.GetHeadscaleConfig()
cfg, err := headscale.GetHeadscaleConfig()
if err != nil {
log.Fatal().Caller().Err(err).Msg("Failed to get headscale configuration")
log.Fatal().Caller().Err(err)
}
machineOutput := HasMachineOutputFlag()
zerolog.SetGlobalLevel(cfg.Log.Level)
// If the user has requested a "node" readable format,
// If the user has requested a "machine" readable format,
// then disable login so the output remains valid.
if machineOutput {
zerolog.SetGlobalLevel(zerolog.Disabled)
}
if cfg.Log.Format == types.JSONLogFormat {
if cfg.Log.Format == headscale.JSONLogFormat {
log.Logger = log.Output(os.Stdout)
}

View File

@@ -3,45 +3,37 @@ package cli
import (
"fmt"
"log"
"net/netip"
"strconv"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/pterm/pterm"
"github.com/spf13/cobra"
"google.golang.org/grpc/status"
)
const (
Base10 = 10
)
func init() {
rootCmd.AddCommand(routesCmd)
listRoutesCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
err := listRoutesCmd.MarkFlagRequired("identifier")
if err != nil {
log.Fatalf(err.Error())
}
routesCmd.AddCommand(listRoutesCmd)
enableRouteCmd.Flags().Uint64P("route", "r", 0, "Route identifier (ID)")
err := enableRouteCmd.MarkFlagRequired("route")
enableRouteCmd.Flags().
StringSliceP("route", "r", []string{}, "List (or repeated flags) of routes to enable")
enableRouteCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
enableRouteCmd.Flags().BoolP("all", "a", false, "All routes from host")
err = enableRouteCmd.MarkFlagRequired("identifier")
if err != nil {
log.Fatalf(err.Error())
}
routesCmd.AddCommand(enableRouteCmd)
disableRouteCmd.Flags().Uint64P("route", "r", 0, "Route identifier (ID)")
err = disableRouteCmd.MarkFlagRequired("route")
if err != nil {
log.Fatalf(err.Error())
}
routesCmd.AddCommand(disableRouteCmd)
deleteRouteCmd.Flags().Uint64P("route", "r", 0, "Route identifier (ID)")
err = deleteRouteCmd.MarkFlagRequired("route")
if err != nil {
log.Fatalf(err.Error())
}
routesCmd.AddCommand(deleteRouteCmd)
nodeCmd.AddCommand(routesCmd)
}
var routesCmd = &cobra.Command{
@@ -52,7 +44,7 @@ var routesCmd = &cobra.Command{
var listRoutesCmd = &cobra.Command{
Use: "list",
Short: "List all routes",
Short: "List routes advertised and enabled by a given node",
Aliases: []string{"ls", "show"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
@@ -72,51 +64,28 @@ var listRoutesCmd = &cobra.Command{
defer cancel()
defer conn.Close()
var routes []*v1.Route
if machineID == 0 {
response, err := client.GetRoutes(ctx, &v1.GetRoutesRequest{})
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Cannot get nodes: %s", status.Convert(err).Message()),
output,
)
return
}
if output != "" {
SuccessOutput(response.GetRoutes(), "", output)
return
}
routes = response.GetRoutes()
} else {
response, err := client.GetNodeRoutes(ctx, &v1.GetNodeRoutesRequest{
NodeId: machineID,
})
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Cannot get routes for node %d: %s", machineID, status.Convert(err).Message()),
output,
)
return
}
if output != "" {
SuccessOutput(response.GetRoutes(), "", output)
return
}
routes = response.GetRoutes()
request := &v1.GetMachineRouteRequest{
MachineId: machineID,
}
tableData := routesToPtables(routes)
response, err := client.GetMachineRoute(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Cannot get nodes: %s", status.Convert(err).Message()),
output,
)
return
}
if output != "" {
SuccessOutput(response.Routes, "", output)
return
}
tableData := routesToPtables(response.Routes)
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
@@ -138,12 +107,16 @@ var listRoutesCmd = &cobra.Command{
var enableRouteCmd = &cobra.Command{
Use: "enable",
Short: "Set a route as enabled",
Long: `This command will make as enabled a given route.`,
Short: "Set the enabled routes for a given node",
Long: `This command will take a list of routes that will _replace_
the current set of routes on a given node.
If you would like to disable a route, simply run the command again, but
omit the route you do not want to enable.
`,
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
routeID, err := cmd.Flags().GetUint64("route")
machineID, err := cmd.Flags().GetUint64("identifier")
if err != nil {
ErrorOutput(
err,
@@ -158,13 +131,52 @@ var enableRouteCmd = &cobra.Command{
defer cancel()
defer conn.Close()
response, err := client.EnableRoute(ctx, &v1.EnableRouteRequest{
RouteId: routeID,
})
var routes []string
isAll, _ := cmd.Flags().GetBool("all")
if isAll {
response, err := client.GetMachineRoute(ctx, &v1.GetMachineRouteRequest{
MachineId: machineID,
})
if err != nil {
ErrorOutput(
err,
fmt.Sprintf(
"Cannot get machine routes: %s\n",
status.Convert(err).Message(),
),
output,
)
return
}
routes = response.GetRoutes().GetAdvertisedRoutes()
} else {
routes, err = cmd.Flags().GetStringSlice("route")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error getting routes from flag: %s", err),
output,
)
return
}
}
request := &v1.EnableMachineRoutesRequest{
MachineId: machineID,
Routes: routes,
}
response, err := client.EnableMachineRoutes(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Cannot enable route %d: %s", routeID, status.Convert(err).Message()),
fmt.Sprintf(
"Cannot register machine: %s\n",
status.Convert(err).Message(),
),
output,
)
@@ -172,127 +184,50 @@ var enableRouteCmd = &cobra.Command{
}
if output != "" {
SuccessOutput(response, "", output)
SuccessOutput(response.Routes, "", output)
return
}
},
}
var disableRouteCmd = &cobra.Command{
Use: "disable",
Short: "Set as disabled a given route",
Long: `This command will make as disabled a given route.`,
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
tableData := routesToPtables(response.Routes)
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
routeID, err := cmd.Flags().GetUint64("route")
return
}
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error getting machine id from flag: %s", err),
fmt.Sprintf("Failed to render pterm table: %s", err),
output,
)
return
}
ctx, client, conn, cancel := getHeadscaleCLIClient()
defer cancel()
defer conn.Close()
response, err := client.DisableRoute(ctx, &v1.DisableRouteRequest{
RouteId: routeID,
})
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Cannot disable route %d: %s", routeID, status.Convert(err).Message()),
output,
)
return
}
if output != "" {
SuccessOutput(response, "", output)
return
}
},
}
var deleteRouteCmd = &cobra.Command{
Use: "delete",
Short: "Delete a given route",
Long: `This command will delete a given route.`,
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
routeID, err := cmd.Flags().GetUint64("route")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error getting machine id from flag: %s", err),
output,
)
return
}
ctx, client, conn, cancel := getHeadscaleCLIClient()
defer cancel()
defer conn.Close()
response, err := client.DeleteRoute(ctx, &v1.DeleteRouteRequest{
RouteId: routeID,
})
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Cannot delete route %d: %s", routeID, status.Convert(err).Message()),
output,
)
return
}
if output != "" {
SuccessOutput(response, "", output)
return
}
},
}
// routesToPtables converts the list of routes to a nice table.
func routesToPtables(routes []*v1.Route) pterm.TableData {
tableData := pterm.TableData{{"ID", "Node", "Prefix", "Advertised", "Enabled", "Primary"}}
func routesToPtables(routes *v1.Routes) pterm.TableData {
tableData := pterm.TableData{{"Route", "Enabled"}}
for _, route := range routes {
var isPrimaryStr string
prefix, err := netip.ParsePrefix(route.GetPrefix())
if err != nil {
log.Printf("Error parsing prefix %s: %s", route.GetPrefix(), err)
for _, route := range routes.GetAdvertisedRoutes() {
enabled := isStringInSlice(routes.EnabledRoutes, route)
continue
}
if prefix == types.ExitRouteV4 || prefix == types.ExitRouteV6 {
isPrimaryStr = "-"
} else {
isPrimaryStr = strconv.FormatBool(route.GetIsPrimary())
}
tableData = append(tableData,
[]string{
strconv.FormatUint(route.GetId(), Base10),
route.GetNode().GetGivenName(),
route.GetPrefix(),
strconv.FormatBool(route.GetAdvertised()),
strconv.FormatBool(route.GetEnabled()),
isPrimaryStr,
})
tableData = append(tableData, []string{route, strconv.FormatBool(enabled)})
}
return tableData
}
func isStringInSlice(strs []string, s string) bool {
for _, s2 := range strs {
if s == s2 {
return true
}
}
return false
}

View File

@@ -8,16 +8,13 @@ import (
"os"
"reflect"
"github.com/juanfont/headscale"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
"gopkg.in/yaml.v3"
"gopkg.in/yaml.v2"
)
const (
@@ -25,8 +22,8 @@ const (
SocketWritePermissions = 0o666
)
func getHeadscaleApp() (*hscontrol.Headscale, error) {
cfg, err := types.GetHeadscaleConfig()
func getHeadscaleApp() (*headscale.Headscale, error) {
cfg, err := headscale.GetHeadscaleConfig()
if err != nil {
return nil, fmt.Errorf(
"failed to load configuration while creating headscale instance: %w",
@@ -34,7 +31,7 @@ func getHeadscaleApp() (*hscontrol.Headscale, error) {
)
}
app, err := hscontrol.NewHeadscale(cfg)
app, err := headscale.NewHeadscale(cfg)
if err != nil {
return nil, err
}
@@ -42,23 +39,21 @@ func getHeadscaleApp() (*hscontrol.Headscale, error) {
// We are doing this here, as in the future could be cool to have it also hot-reload
if cfg.ACL.PolicyPath != "" {
aclPath := util.AbsolutePathFromConfigPath(cfg.ACL.PolicyPath)
pol, err := policy.LoadACLPolicyFromPath(aclPath)
aclPath := headscale.AbsolutePathFromConfigPath(cfg.ACL.PolicyPath)
err = app.LoadACLPolicy(aclPath)
if err != nil {
log.Fatal().
Str("path", aclPath).
Err(err).
Msg("Could not load the ACL policy")
}
app.ACLPolicy = pol
}
return app, nil
}
func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc.ClientConn, context.CancelFunc) {
cfg, err := types.GetHeadscaleConfig()
cfg, err := headscale.GetHeadscaleConfig()
if err != nil {
log.Fatal().
Err(err).
@@ -79,7 +74,7 @@ func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc.
address := cfg.CLI.Address
// If the address is not set, we assume that we are on the server hosting hscontrol.
// If the address is not set, we assume that we are on the server hosting headscale.
if address == "" {
log.Debug().
Str("socket", cfg.UnixSocket).
@@ -103,7 +98,7 @@ func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc.
grpcOptions = append(
grpcOptions,
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithContextDialer(util.GrpcSocketDialer),
grpc.WithContextDialer(headscale.GrpcSocketDialer),
)
} else {
// If we are not connecting to a local server, require an API key for authentication
@@ -154,17 +149,17 @@ func SuccessOutput(result interface{}, override string, outputFormat string) {
case "json":
jsonBytes, err = json.MarshalIndent(result, "", "\t")
if err != nil {
log.Fatal().Err(err).Msg("failed to unmarshal output")
log.Fatal().Err(err)
}
case "json-line":
jsonBytes, err = json.Marshal(result)
if err != nil {
log.Fatal().Err(err).Msg("failed to unmarshal output")
log.Fatal().Err(err)
}
case "yaml":
jsonBytes, err = yaml.Marshal(result)
if err != nil {
log.Fatal().Err(err).Msg("failed to unmarshal output")
log.Fatal().Err(err)
}
default:
//nolint

View File

@@ -34,7 +34,7 @@ func main() {
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
log.Logger = log.Output(zerolog.ConsoleWriter{
Out: os.Stderr,
Out: os.Stdout,
TimeFormat: time.RFC3339,
NoColor: !colors,
})

View File

@@ -7,8 +7,7 @@ import (
"strings"
"testing"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/juanfont/headscale"
"github.com/spf13/viper"
"gopkg.in/check.v1"
)
@@ -51,23 +50,21 @@ func (*Suite) TestConfigFileLoading(c *check.C) {
}
// Load example config, it should load without validation errors
err = types.LoadConfig(cfgFile, true)
err = headscale.LoadConfig(cfgFile, true)
c.Assert(err, check.IsNil)
// Test that config file was interpreted correctly
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
c.Assert(viper.GetString("listen_addr"), check.Equals, "127.0.0.1:8080")
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite")
c.Assert(viper.GetString("db_path"), check.Equals, "/var/lib/headscale/db.sqlite")
c.Assert(viper.GetString("database.type"), check.Equals, "sqlite")
c.Assert(viper.GetString("database.sqlite.path"), check.Equals, "/var/lib/headscale/db.sqlite")
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite3")
c.Assert(viper.GetString("db_path"), check.Equals, "./db.sqlite")
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
c.Assert(viper.GetStringSlice("dns_config.nameservers")[0], check.Equals, "1.1.1.1")
c.Assert(
util.GetFileMode("unix_socket_permission"),
headscale.GetFileMode("unix_socket_permission"),
check.Equals,
fs.FileMode(0o770),
)
@@ -96,21 +93,21 @@ func (*Suite) TestConfigLoading(c *check.C) {
}
// Load example config, it should load without validation errors
err = types.LoadConfig(tmpDir, false)
err = headscale.LoadConfig(tmpDir, false)
c.Assert(err, check.IsNil)
// Test that config file was interpreted correctly
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
c.Assert(viper.GetString("listen_addr"), check.Equals, "127.0.0.1:8080")
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite")
c.Assert(viper.GetString("db_path"), check.Equals, "/var/lib/headscale/db.sqlite")
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite3")
c.Assert(viper.GetString("db_path"), check.Equals, "./db.sqlite")
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
c.Assert(viper.GetStringSlice("dns_config.nameservers")[0], check.Equals, "1.1.1.1")
c.Assert(
util.GetFileMode("unix_socket_permission"),
headscale.GetFileMode("unix_socket_permission"),
check.Equals,
fs.FileMode(0o770),
)
@@ -140,10 +137,10 @@ func (*Suite) TestDNSConfigLoading(c *check.C) {
}
// Load example config, it should load without validation errors
err = types.LoadConfig(tmpDir, false)
err = headscale.LoadConfig(tmpDir, false)
c.Assert(err, check.IsNil)
dnsConfig, baseDomain := types.GetDNSConfig()
dnsConfig, baseDomain := headscale.GetDNSConfig()
c.Assert(dnsConfig.Nameservers[0].String(), check.Equals, "1.1.1.1")
c.Assert(dnsConfig.Resolvers[0].Addr, check.Equals, "1.1.1.1")
@@ -175,7 +172,7 @@ noise:
writeConfig(c, tmpDir, configYaml)
// Check configuration validation errors (1)
err = types.LoadConfig(tmpDir, false)
err = headscale.LoadConfig(tmpDir, false)
c.Assert(err, check.NotNil)
// check.Matches can not handle multiline strings
tmp := strings.ReplaceAll(err.Error(), "\n", "***")
@@ -204,6 +201,6 @@ tls_letsencrypt_hostname: example.com
tls_letsencrypt_challenge_type: TLS-ALPN-01
`)
writeConfig(c, tmpDir, configYaml)
err = types.LoadConfig(tmpDir, false)
err = headscale.LoadConfig(tmpDir, false)
c.Assert(err, check.IsNil)
}

View File

@@ -40,23 +40,29 @@ grpc_listen_addr: 127.0.0.1:50443
# are doing.
grpc_allow_insecure: false
# Private key used to encrypt the traffic between headscale
# and Tailscale clients.
# The private key file will be autogenerated if it's missing.
#
# For production:
# /var/lib/headscale/private.key
private_key_path: ./private.key
# The Noise section includes specific configuration for the
# TS2021 Noise protocol
noise:
# The Noise private key is used to encrypt the
# traffic between headscale and Tailscale clients when
# using the new Noise-based protocol.
private_key_path: /var/lib/headscale/noise_private.key
# using the new Noise-based protocol. It must be different
# from the legacy private key.
#
# For production:
# private_key_path: /var/lib/headscale/noise_private.key
private_key_path: ./noise_private.key
# List of IP prefixes to allocate tailaddresses from.
# Each prefix consists of either an IPv4 or IPv6 address,
# and the associated prefix length, delimited by a slash.
# It must be within IP ranges supported by the Tailscale
# client - i.e., subnets of 100.64.0.0/10 and fd7a:115c:a1e0::/48.
# See below:
# IPv6: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#LL81C52-L81C71
# IPv4: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#L33
# Any other range is NOT supported, and it will cause unexpected issues.
ip_prefixes:
- fd7a:115c:a1e0::/48
- 100.64.0.0/10
@@ -88,22 +94,6 @@ derp:
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
stun_listen_addr: "0.0.0.0:3478"
# Private key used to encrypt the traffic between headscale DERP
# and Tailscale clients.
# The private key file will be autogenerated if it's missing.
#
private_key_path: /var/lib/headscale/derp_server_private.key
# This flag can be used, so the DERP map entry for the embedded DERP server is not written automatically,
# it enables the creation of your very own DERP map entry using a locally available file with the parameter DERP.paths
# If you enable the DERP server and set this to false, it is required to add the DERP server to the DERP map using DERP.paths
automatically_add_embedded_derp_region: true
# For better connection stability (especially when using an Exit-Node and DNS is not working),
# it is possible to optionall add the public IPv4 and IPv6 address to the Derp-Map using:
ipv4: 1.2.3.4
ipv6: 2001:db8::1
# List of externally available DERP maps encoded in JSON
urls:
- https://controlplane.tailscale.com/derpmap/default
@@ -138,28 +128,25 @@ ephemeral_node_inactivity_timeout: 30m
# In case of doubts, do not touch the default 10s.
node_update_check_interval: 10s
database:
type: sqlite
# SQLite config
db_type: sqlite3
# SQLite config
sqlite:
path: /var/lib/headscale/db.sqlite
# For production:
# db_path: /var/lib/headscale/db.sqlite
db_path: ./db.sqlite
# # Postgres config
# postgres:
# # If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank.
# host: localhost
# port: 5432
# name: headscale
# user: foo
# pass: bar
# max_open_conns: 10
# max_idle_conns: 10
# conn_max_idle_time_secs: 3600
# # Postgres config
# If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank.
# db_type: postgres
# db_host: localhost
# db_port: 5432
# db_name: headscale
# db_user: foo
# db_pass: bar
# # If other 'sslmode' is required instead of 'require(true)' and 'disabled(false)', set the 'sslmode' you need
# # in the 'db_ssl' field. Refers to https://www.postgresql.org/docs/current/libpq-ssl.html Table 34.1.
# ssl: false
# If other 'sslmode' is required instead of 'require(true)' and 'disabled(false)', set the 'sslmode' you need
# in the 'db_ssl' field. Refers to https://www.postgresql.org/docs/current/libpq-ssl.html Table 34.1.
# db_ssl: false
### TLS configuration
#
@@ -180,7 +167,8 @@ tls_letsencrypt_hostname: ""
# Path to store certificates and metadata needed by
# letsencrypt
# For production:
tls_letsencrypt_cache_dir: /var/lib/headscale/cache
# tls_letsencrypt_cache_dir: /var/lib/headscale/cache
tls_letsencrypt_cache_dir: ./cache
# Type of ACME challenge to use, currently supported types:
# HTTP-01 or TLS-ALPN-01
@@ -247,17 +235,6 @@ dns_config:
# Search domains to inject.
domains: []
# Extra DNS records
# so far only A-records are supported (on the tailscale side)
# See https://github.com/juanfont/headscale/blob/main/docs/dns-records.md#Limitations
# extra_records:
# - name: "grafana.myvpn.example.com"
# type: "A"
# value: "100.64.0.3"
#
# # you can also put it in one line
# - { name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.3" }
# Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
# Only works if there is at least a nameserver defined.
magic_dns: true
@@ -265,12 +242,13 @@ dns_config:
# Defines the base domain to create the hostnames for MagicDNS.
# `base_domain` must be a FQDNs, without the trailing dot.
# The FQDN of the hosts will be
# `hostname.user.base_domain` (e.g., _myhost.myuser.example.com_).
# `hostname.namespace.base_domain` (e.g., _myhost.mynamespace.example.com_).
base_domain: example.com
# Unix socket used for the CLI to connect without authentication
# Note: for production you will want to set this to something like:
unix_socket: /var/run/headscale/headscale.sock
# unix_socket: /var/run/headscale.sock
unix_socket: ./headscale.sock
unix_socket_permission: "0770"
#
# headscale supports experimental OpenID connect support,
@@ -282,45 +260,26 @@ unix_socket_permission: "0770"
# issuer: "https://your-oidc.issuer.com/path"
# client_id: "your-oidc-client-id"
# client_secret: "your-oidc-client-secret"
# # Alternatively, set `client_secret_path` to read the secret from the file.
# # It resolves environment variables, making integration to systemd's
# # `LoadCredential` straightforward:
# client_secret_path: "${CREDENTIALS_DIRECTORY}/oidc_client_secret"
# # client_secret and client_secret_path are mutually exclusive.
#
# # The amount of time from a node is authenticated with OpenID until it
# # expires and needs to reauthenticate.
# # Setting the value to "0" will mean no expiry.
# expiry: 180d
#
# # Use the expiry from the token received from OpenID when the user logged
# # in, this will typically lead to frequent need to reauthenticate and should
# # only been enabled if you know what you are doing.
# # Note: enabling this will cause `oidc.expiry` to be ignored.
# use_expiry_from_token: false
#
# # Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
# # parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".
# Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
# parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".
#
# scope: ["openid", "profile", "email", "custom"]
# extra_params:
# domain_hint: example.com
#
# # List allowed principal domains and/or users. If an authenticated user's domain is not in this list, the
# # authentication request will be rejected.
# List allowed principal domains and/or users. If an authenticated user's domain is not in this list, the
# authentication request will be rejected.
#
# allowed_domains:
# - example.com
# # Note: Groups from keycloak have a leading '/'
# allowed_groups:
# - /headscale
# allowed_users:
# - alice@example.com
#
# # If `strip_email_domain` is set to `true`, the domain part of the username email address will be removed.
# # This will transform `first-name.last-name@example.com` to the user `first-name.last-name`
# # If `strip_email_domain` is set to `false` the domain part will NOT be removed resulting to the following
# user: `first-name.last-name.example.com`
# If `strip_email_domain` is set to `true`, the domain part of the username email address will be removed.
# This will transform `first-name.last-name@example.com` to the namespace `first-name.last-name`
# If `strip_email_domain` is set to `false` the domain part will NOT be removed resulting to the following
# namespace: `first-name.last-name.example.com`
#
# strip_email_domain: true

View File

@@ -1,4 +1,4 @@
package types
package headscale
import (
"errors"
@@ -6,30 +6,24 @@ import (
"io/fs"
"net/netip"
"net/url"
"os"
"strings"
"time"
"github.com/coreos/go-oidc/v3/oidc"
"github.com/prometheus/common/model"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/spf13/viper"
"go4.org/netipx"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
"tailscale.com/types/dnstype"
"github.com/juanfont/headscale/hscontrol/util"
)
const (
defaultOIDCExpiryTime = 180 * 24 * time.Hour // 180 Days
maxDuration time.Duration = 1<<63 - 1
)
tlsALPN01ChallengeType = "TLS-ALPN-01"
http01ChallengeType = "HTTP-01"
var errOidcMutuallyExclusive = errors.New(
"oidc_client_secret and oidc_client_secret_path are mutually exclusive",
JSONLogFormat = "json"
TextLogFormat = "text"
)
// Config contains the initial Headscale configuration.
@@ -42,15 +36,23 @@ type Config struct {
EphemeralNodeInactivityTimeout time.Duration
NodeUpdateCheckInterval time.Duration
IPPrefixes []netip.Prefix
PrivateKeyPath string
NoisePrivateKeyPath string
BaseDomain string
Log LogConfig
DisableUpdateCheck bool
Database DatabaseConfig
DERP DERPConfig
DBtype string
DBpath string
DBhost string
DBport int
DBname string
DBuser string
DBpass string
DBssl string
TLS TLSConfig
ACMEURL string
@@ -71,31 +73,6 @@ type Config struct {
ACL ACLConfig
}
type SqliteConfig struct {
Path string
}
type PostgresConfig struct {
Host string
Port int
Name string
User string
Pass string
Ssl string
MaxOpenConnections int
MaxIdleConnections int
ConnMaxIdleTimeSecs int
}
type DatabaseConfig struct {
// Type sets the database type, either "sqlite3" or "postgres"
Type string
Debug bool
Sqlite SqliteConfig
Postgres PostgresConfig
}
type TLSConfig struct {
CertPath string
KeyPath string
@@ -119,26 +96,19 @@ type OIDCConfig struct {
ExtraParams map[string]string
AllowedDomains []string
AllowedUsers []string
AllowedGroups []string
StripEmaildomain bool
Expiry time.Duration
UseExpiryFromToken bool
}
type DERPConfig struct {
ServerEnabled bool
AutomaticallyAddEmbeddedDerpRegion bool
ServerRegionID int
ServerRegionCode string
ServerRegionName string
ServerPrivateKeyPath string
STUNAddr string
URLs []url.URL
Paths []string
AutoUpdate bool
UpdateFrequency time.Duration
IPv4 string
IPv6 string
ServerEnabled bool
ServerRegionID int
ServerRegionCode string
ServerRegionName string
STUNAddr string
URLs []url.URL
Paths []string
AutoUpdate bool
UpdateFrequency time.Duration
}
type LogTailConfig struct {
@@ -180,21 +150,8 @@ func LoadConfig(path string, isFile bool) error {
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
viper.AutomaticEnv()
viper.RegisterAlias("db_type", "database.type")
// SQLite aliases
viper.RegisterAlias("db_path", "database.sqlite.path")
// Postgres aliases
viper.RegisterAlias("db_host", "database.postgres.host")
viper.RegisterAlias("db_port", "database.postgres.port")
viper.RegisterAlias("db_name", "database.postgres.name")
viper.RegisterAlias("db_user", "database.postgres.user")
viper.RegisterAlias("db_pass", "database.postgres.pass")
viper.RegisterAlias("db_ssl", "database.postgres.ssl")
viper.SetDefault("tls_letsencrypt_cache_dir", "/var/www/.cache")
viper.SetDefault("tls_letsencrypt_challenge_type", HTTP01ChallengeType)
viper.SetDefault("tls_letsencrypt_challenge_type", http01ChallengeType)
viper.SetDefault("log.level", "info")
viper.SetDefault("log.format", TextLogFormat)
@@ -204,9 +161,8 @@ func LoadConfig(path string, isFile bool) error {
viper.SetDefault("derp.server.enabled", false)
viper.SetDefault("derp.server.stun.enabled", true)
viper.SetDefault("derp.server.automatically_add_embedded_derp_region", true)
viper.SetDefault("unix_socket", "/var/run/headscale/headscale.sock")
viper.SetDefault("unix_socket", "/var/run/headscale.sock")
viper.SetDefault("unix_socket_permission", "0o770")
viper.SetDefault("grpc_listen_addr", ":50443")
@@ -215,17 +171,9 @@ func LoadConfig(path string, isFile bool) error {
viper.SetDefault("cli.timeout", "5s")
viper.SetDefault("cli.insecure", false)
viper.SetDefault("db_ssl", false)
viper.SetDefault("database.postgres.ssl", false)
viper.SetDefault("database.postgres.max_open_conns", 10)
viper.SetDefault("database.postgres.max_idle_conns", 10)
viper.SetDefault("database.postgres.conn_max_idle_time_secs", 3600)
viper.SetDefault("oidc.scope", []string{oidc.ScopeOpenID, "profile", "email"})
viper.SetDefault("oidc.strip_email_domain", true)
viper.SetDefault("oidc.only_start_if_oidc_is_available", true)
viper.SetDefault("oidc.expiry", "180d")
viper.SetDefault("oidc.use_expiry_from_token", false)
viper.SetDefault("logtail.enabled", false)
viper.SetDefault("randomize_client_port", false)
@@ -256,15 +204,15 @@ func LoadConfig(path string, isFile bool) error {
}
if (viper.GetString("tls_letsencrypt_hostname") != "") &&
(viper.GetString("tls_letsencrypt_challenge_type") == TLSALPN01ChallengeType) &&
(viper.GetString("tls_letsencrypt_challenge_type") == tlsALPN01ChallengeType) &&
(!strings.HasSuffix(viper.GetString("listen_addr"), ":443")) {
// this is only a warning because there could be something sitting in front of headscale that redirects the traffic (e.g. an iptables rule)
log.Warn().
Msg("Warning: when using tls_letsencrypt_hostname with TLS-ALPN-01 as challenge type, headscale must be reachable on port 443, i.e. listen_addr should probably end in :443")
}
if (viper.GetString("tls_letsencrypt_challenge_type") != HTTP01ChallengeType) &&
(viper.GetString("tls_letsencrypt_challenge_type") != TLSALPN01ChallengeType) {
if (viper.GetString("tls_letsencrypt_challenge_type") != http01ChallengeType) &&
(viper.GetString("tls_letsencrypt_challenge_type") != tlsALPN01ChallengeType) {
errorText += "Fatal config error: the only supported values for tls_letsencrypt_challenge_type are HTTP-01 and TLS-ALPN-01\n"
}
@@ -294,7 +242,7 @@ func LoadConfig(path string, isFile bool) error {
}
if errorText != "" {
// nolint
//nolint
return errors.New(strings.TrimSuffix(errorText, "\n"))
} else {
return nil
@@ -306,15 +254,15 @@ func GetTLSConfig() TLSConfig {
LetsEncrypt: LetsEncryptConfig{
Hostname: viper.GetString("tls_letsencrypt_hostname"),
Listen: viper.GetString("tls_letsencrypt_listen"),
CacheDir: util.AbsolutePathFromConfigPath(
CacheDir: AbsolutePathFromConfigPath(
viper.GetString("tls_letsencrypt_cache_dir"),
),
ChallengeType: viper.GetString("tls_letsencrypt_challenge_type"),
},
CertPath: util.AbsolutePathFromConfigPath(
CertPath: AbsolutePathFromConfigPath(
viper.GetString("tls_cert_path"),
),
KeyPath: util.AbsolutePathFromConfigPath(
KeyPath: AbsolutePathFromConfigPath(
viper.GetString("tls_key_path"),
),
}
@@ -326,14 +274,7 @@ func GetDERPConfig() DERPConfig {
serverRegionCode := viper.GetString("derp.server.region_code")
serverRegionName := viper.GetString("derp.server.region_name")
stunAddr := viper.GetString("derp.server.stun_listen_addr")
privateKeyPath := util.AbsolutePathFromConfigPath(
viper.GetString("derp.server.private_key_path"),
)
ipv4 := viper.GetString("derp.server.ipv4")
ipv6 := viper.GetString("derp.server.ipv6")
automaticallyAddEmbeddedDerpRegion := viper.GetBool(
"derp.server.automatically_add_embedded_derp_region",
)
if serverEnabled && stunAddr == "" {
log.Fatal().
Msg("derp.server.stun_listen_addr must be set if derp.server.enabled is true")
@@ -356,28 +297,19 @@ func GetDERPConfig() DERPConfig {
paths := viper.GetStringSlice("derp.paths")
if serverEnabled && !automaticallyAddEmbeddedDerpRegion && len(paths) == 0 {
log.Fatal().
Msg("Disabling derp.server.automatically_add_embedded_derp_region requires to configure the derp server in derp.paths")
}
autoUpdate := viper.GetBool("derp.auto_update_enabled")
updateFrequency := viper.GetDuration("derp.update_frequency")
return DERPConfig{
ServerEnabled: serverEnabled,
ServerRegionID: serverRegionID,
ServerRegionCode: serverRegionCode,
ServerRegionName: serverRegionName,
ServerPrivateKeyPath: privateKeyPath,
STUNAddr: stunAddr,
URLs: urls,
Paths: paths,
AutoUpdate: autoUpdate,
UpdateFrequency: updateFrequency,
IPv4: ipv4,
IPv6: ipv6,
AutomaticallyAddEmbeddedDerpRegion: automaticallyAddEmbeddedDerpRegion,
ServerEnabled: serverEnabled,
ServerRegionID: serverRegionID,
ServerRegionCode: serverRegionCode,
ServerRegionName: serverRegionName,
STUNAddr: stunAddr,
URLs: urls,
Paths: paths,
AutoUpdate: autoUpdate,
UpdateFrequency: updateFrequency,
}
}
@@ -425,45 +357,6 @@ func GetLogConfig() LogConfig {
}
}
func GetDatabaseConfig() DatabaseConfig {
debug := viper.GetBool("database.debug")
type_ := viper.GetString("database.type")
switch type_ {
case DatabaseSqlite, DatabasePostgres:
break
case "sqlite":
type_ = "sqlite3"
default:
log.Fatal().
Msgf("invalid database type %q, must be sqlite, sqlite3 or postgres", type_)
}
return DatabaseConfig{
Type: type_,
Debug: debug,
Sqlite: SqliteConfig{
Path: util.AbsolutePathFromConfigPath(
viper.GetString("database.sqlite.path"),
),
},
Postgres: PostgresConfig{
Host: viper.GetString("database.postgres.host"),
Port: viper.GetInt("database.postgres.port"),
Name: viper.GetString("database.postgres.name"),
User: viper.GetString("database.postgres.user"),
Pass: viper.GetString("database.postgres.pass"),
Ssl: viper.GetString("database.postgres.ssl"),
MaxOpenConnections: viper.GetInt("database.postgres.max_open_conns"),
MaxIdleConnections: viper.GetInt("database.postgres.max_idle_conns"),
ConnMaxIdleTimeSecs: viper.GetInt(
"database.postgres.conn_max_idle_time_secs",
),
},
}
}
func GetDNSConfig() (*tailcfg.DNSConfig, string) {
if viper.IsSet("dns_config") {
dnsConfig := &tailcfg.DNSConfig{}
@@ -512,37 +405,39 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
}
if viper.IsSet("dns_config.restricted_nameservers") {
dnsConfig.Routes = make(map[string][]*dnstype.Resolver)
domains := []string{}
restrictedDNS := viper.GetStringMapStringSlice(
"dns_config.restricted_nameservers",
)
for domain, restrictedNameservers := range restrictedDNS {
restrictedResolvers := make(
[]*dnstype.Resolver,
len(restrictedNameservers),
if len(dnsConfig.Nameservers) > 0 {
dnsConfig.Routes = make(map[string][]*dnstype.Resolver)
restrictedDNS := viper.GetStringMapStringSlice(
"dns_config.restricted_nameservers",
)
for index, nameserverStr := range restrictedNameservers {
nameserver, err := netip.ParseAddr(nameserverStr)
if err != nil {
log.Error().
Str("func", "getDNSConfig").
Err(err).
Msgf("Could not parse restricted nameserver IP: %s", nameserverStr)
}
restrictedResolvers[index] = &dnstype.Resolver{
Addr: nameserver.String(),
for domain, restrictedNameservers := range restrictedDNS {
restrictedResolvers := make(
[]*dnstype.Resolver,
len(restrictedNameservers),
)
for index, nameserverStr := range restrictedNameservers {
nameserver, err := netip.ParseAddr(nameserverStr)
if err != nil {
log.Error().
Str("func", "getDNSConfig").
Err(err).
Msgf("Could not parse restricted nameserver IP: %s", nameserverStr)
}
restrictedResolvers[index] = &dnstype.Resolver{
Addr: nameserver.String(),
}
}
dnsConfig.Routes[domain] = restrictedResolvers
}
dnsConfig.Routes[domain] = restrictedResolvers
domains = append(domains, domain)
} else {
log.Warn().
Msg("Warning: dns_config.restricted_nameservers is set, but no nameservers are configured. Ignoring restricted_nameservers.")
}
dnsConfig.Domains = domains
}
if viper.IsSet("dns_config.domains") {
domains := viper.GetStringSlice("dns_config.domains")
if len(dnsConfig.Resolvers) > 0 {
if len(dnsConfig.Nameservers) > 0 {
dnsConfig.Domains = domains
} else if domains != nil {
log.Warn().
@@ -550,20 +445,6 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
}
}
if viper.IsSet("dns_config.extra_records") {
var extraRecords []tailcfg.DNSRecord
err := viper.UnmarshalKey("dns_config.extra_records", &extraRecords)
if err != nil {
log.Error().
Str("func", "getDNSConfig").
Err(err).
Msgf("Could not parse dns_config.extra_records")
}
dnsConfig.ExtraRecords = extraRecords
}
if viper.IsSet("dns_config.magic_dns") {
dnsConfig.Proxied = viper.GetBool("dns_config.magic_dns")
}
@@ -575,8 +456,6 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
baseDomain = "headscale.net" // does not really matter when MagicDNS is not enabled
}
log.Trace().Interface("dns_config", dnsConfig).Msg("DNS configuration loaded")
return dnsConfig, baseDomain
}
@@ -608,29 +487,6 @@ func GetHeadscaleConfig() (*Config, error) {
if err != nil {
panic(fmt.Errorf("failed to parse ip_prefixes[%d]: %w", i, err))
}
if prefix.Addr().Is4() {
builder := netipx.IPSetBuilder{}
builder.AddPrefix(tsaddr.CGNATRange())
ipSet, _ := builder.IPSet()
if !ipSet.ContainsPrefix(prefix) {
log.Warn().
Msgf("Prefix %s is not in the %s range. This is an unsupported configuration.",
prefixInConfig, tsaddr.CGNATRange())
}
}
if prefix.Addr().Is6() {
builder := netipx.IPSetBuilder{}
builder.AddPrefix(tsaddr.TailscaleULARange())
ipSet, _ := builder.IPSet()
if !ipSet.ContainsPrefix(prefix) {
log.Warn().
Msgf("Prefix %s is not in the %s range. This is an unsupported configuration.",
prefixInConfig, tsaddr.TailscaleULARange())
}
}
parsedPrefixes = append(parsedPrefixes, prefix)
}
@@ -655,19 +511,6 @@ func GetHeadscaleConfig() (*Config, error) {
Msgf("'ip_prefixes' not configured, falling back to default: %v", prefixes)
}
oidcClientSecret := viper.GetString("oidc.client_secret")
oidcClientSecretPath := viper.GetString("oidc.client_secret_path")
if oidcClientSecretPath != "" && oidcClientSecret != "" {
return nil, errOidcMutuallyExclusive
}
if oidcClientSecretPath != "" {
secretBytes, err := os.ReadFile(os.ExpandEnv(oidcClientSecretPath))
if err != nil {
return nil, err
}
oidcClientSecret = strings.TrimSpace(string(secretBytes))
}
return &Config{
ServerURL: viper.GetString("server_url"),
Addr: viper.GetString("listen_addr"),
@@ -677,7 +520,10 @@ func GetHeadscaleConfig() (*Config, error) {
DisableUpdateCheck: viper.GetBool("disable_check_updates"),
IPPrefixes: prefixes,
NoisePrivateKeyPath: util.AbsolutePathFromConfigPath(
PrivateKeyPath: AbsolutePathFromConfigPath(
viper.GetString("private_key_path"),
),
NoisePrivateKeyPath: AbsolutePathFromConfigPath(
viper.GetString("noise.private_key_path"),
),
BaseDomain: baseDomain,
@@ -692,7 +538,14 @@ func GetHeadscaleConfig() (*Config, error) {
"node_update_check_interval",
),
Database: GetDatabaseConfig(),
DBtype: viper.GetString("db_type"),
DBpath: AbsolutePathFromConfigPath(viper.GetString("db_path")),
DBhost: viper.GetString("db_host"),
DBport: viper.GetInt("db_port"),
DBname: viper.GetString("db_name"),
DBuser: viper.GetString("db_user"),
DBpass: viper.GetString("db_pass"),
DBssl: viper.GetString("db_ssl"),
TLS: GetTLSConfig(),
@@ -702,7 +555,7 @@ func GetHeadscaleConfig() (*Config, error) {
ACMEURL: viper.GetString("acme_url"),
UnixSocket: viper.GetString("unix_socket"),
UnixSocketPermission: util.GetFileMode("unix_socket_permission"),
UnixSocketPermission: GetFileMode("unix_socket_permission"),
OIDC: OIDCConfig{
OnlyStartIfOIDCIsAvailable: viper.GetBool(
@@ -710,29 +563,12 @@ func GetHeadscaleConfig() (*Config, error) {
),
Issuer: viper.GetString("oidc.issuer"),
ClientID: viper.GetString("oidc.client_id"),
ClientSecret: oidcClientSecret,
ClientSecret: viper.GetString("oidc.client_secret"),
Scope: viper.GetStringSlice("oidc.scope"),
ExtraParams: viper.GetStringMapString("oidc.extra_params"),
AllowedDomains: viper.GetStringSlice("oidc.allowed_domains"),
AllowedUsers: viper.GetStringSlice("oidc.allowed_users"),
AllowedGroups: viper.GetStringSlice("oidc.allowed_groups"),
StripEmaildomain: viper.GetBool("oidc.strip_email_domain"),
Expiry: func() time.Duration {
// if set to 0, we assume no expiry
if value := viper.GetString("oidc.expiry"); value == "0" {
return maxDuration
} else {
expiry, err := model.ParseDuration(value)
if err != nil {
log.Warn().Msg("failed to parse oidc.expiry, defaulting back to 180 days")
return defaultOIDCExpiryTime
}
return time.Duration(expiry)
}
}(),
UseExpiryFromToken: viper.GetBool("oidc.use_expiry_from_token"),
},
LogTail: logConfig,

309
db.go Normal file
View File

@@ -0,0 +1,309 @@
package headscale
import (
"context"
"database/sql/driver"
"encoding/json"
"errors"
"fmt"
"net/netip"
"time"
"github.com/glebarez/sqlite"
"github.com/rs/zerolog/log"
"gorm.io/driver/postgres"
"gorm.io/gorm"
"gorm.io/gorm/logger"
"tailscale.com/tailcfg"
)
const (
dbVersion = "1"
errValueNotFound = Error("not found")
)
// KV is a key-value store in a psql table. For future use...
type KV struct {
Key string
Value string
}
func (h *Headscale) initDB() error {
db, err := h.openDB()
if err != nil {
return err
}
h.db = db
if h.dbType == Postgres {
db.Exec(`create extension if not exists "uuid-ossp";`)
}
_ = db.Migrator().RenameColumn(&Machine{}, "ip_address", "ip_addresses")
_ = db.Migrator().RenameColumn(&Machine{}, "name", "hostname")
// GivenName is used as the primary source of DNS names, make sure
// the field is populated and normalized if it was not when the
// machine was registered.
_ = db.Migrator().RenameColumn(&Machine{}, "nickname", "given_name")
// If the Machine table has a column for registered,
// find all occourences of "false" and drop them. Then
// remove the column.
if db.Migrator().HasColumn(&Machine{}, "registered") {
log.Info().
Msg(`Database has legacy "registered" column in machine, removing...`)
machines := Machines{}
if err := h.db.Not("registered").Find(&machines).Error; err != nil {
log.Error().Err(err).Msg("Error accessing db")
}
for _, machine := range machines {
log.Info().
Str("machine", machine.Hostname).
Str("machine_key", machine.MachineKey).
Msg("Deleting unregistered machine")
if err := h.db.Delete(&Machine{}, machine.ID).Error; err != nil {
log.Error().
Err(err).
Str("machine", machine.Hostname).
Str("machine_key", machine.MachineKey).
Msg("Error deleting unregistered machine")
}
}
err := db.Migrator().DropColumn(&Machine{}, "registered")
if err != nil {
log.Error().Err(err).Msg("Error dropping registered column")
}
}
err = db.AutoMigrate(&Machine{})
if err != nil {
return err
}
if db.Migrator().HasColumn(&Machine{}, "given_name") {
machines := Machines{}
if err := h.db.Find(&machines).Error; err != nil {
log.Error().Err(err).Msg("Error accessing db")
}
for item, machine := range machines {
if machine.GivenName == "" {
normalizedHostname, err := NormalizeToFQDNRules(
machine.Hostname,
h.cfg.OIDC.StripEmaildomain,
)
if err != nil {
log.Error().
Caller().
Str("hostname", machine.Hostname).
Err(err).
Msg("Failed to normalize machine hostname in DB migration")
}
err = h.RenameMachine(&machines[item], normalizedHostname)
if err != nil {
log.Error().
Caller().
Str("hostname", machine.Hostname).
Err(err).
Msg("Failed to save normalized machine name in DB migration")
}
}
}
}
err = db.AutoMigrate(&KV{})
if err != nil {
return err
}
err = db.AutoMigrate(&Namespace{})
if err != nil {
return err
}
err = db.AutoMigrate(&PreAuthKey{})
if err != nil {
return err
}
err = db.AutoMigrate(&PreAuthKeyACLTag{})
if err != nil {
return err
}
_ = db.Migrator().DropTable("shared_machines")
err = db.AutoMigrate(&APIKey{})
if err != nil {
return err
}
err = h.setValue("db_version", dbVersion)
return err
}
func (h *Headscale) openDB() (*gorm.DB, error) {
var db *gorm.DB
var err error
var log logger.Interface
if h.dbDebug {
log = logger.Default
} else {
log = logger.Default.LogMode(logger.Silent)
}
switch h.dbType {
case Sqlite:
db, err = gorm.Open(
sqlite.Open(h.dbString+"?_synchronous=1&_journal_mode=WAL"),
&gorm.Config{
DisableForeignKeyConstraintWhenMigrating: true,
Logger: log,
},
)
db.Exec("PRAGMA foreign_keys=ON")
// The pure Go SQLite library does not handle locking in
// the same way as the C based one and we cant use the gorm
// connection pool as of 2022/02/23.
sqlDB, _ := db.DB()
sqlDB.SetMaxIdleConns(1)
sqlDB.SetMaxOpenConns(1)
sqlDB.SetConnMaxIdleTime(time.Hour)
case Postgres:
db, err = gorm.Open(postgres.Open(h.dbString), &gorm.Config{
DisableForeignKeyConstraintWhenMigrating: true,
Logger: log,
})
}
if err != nil {
return nil, err
}
return db, nil
}
// getValue returns the value for the given key in KV.
func (h *Headscale) getValue(key string) (string, error) {
var row KV
if result := h.db.First(&row, "key = ?", key); errors.Is(
result.Error,
gorm.ErrRecordNotFound,
) {
return "", errValueNotFound
}
return row.Value, nil
}
// setValue sets value for the given key in KV.
func (h *Headscale) setValue(key string, value string) error {
keyValue := KV{
Key: key,
Value: value,
}
if _, err := h.getValue(key); err == nil {
h.db.Model(&keyValue).Where("key = ?", key).Update("value", value)
return nil
}
if err := h.db.Create(keyValue).Error; err != nil {
return fmt.Errorf("failed to create key value pair in the database: %w", err)
}
return nil
}
func (h *Headscale) pingDB(ctx context.Context) error {
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
db, err := h.db.DB()
if err != nil {
return err
}
return db.PingContext(ctx)
}
// This is a "wrapper" type around tailscales
// Hostinfo to allow us to add database "serialization"
// methods. This allows us to use a typed values throughout
// the code and not have to marshal/unmarshal and error
// check all over the code.
type HostInfo tailcfg.Hostinfo
func (hi *HostInfo) Scan(destination interface{}) error {
switch value := destination.(type) {
case []byte:
return json.Unmarshal(value, hi)
case string:
return json.Unmarshal([]byte(value), hi)
default:
return fmt.Errorf("%w: unexpected data type %T", ErrMachineAddressesInvalid, destination)
}
}
// Value return json value, implement driver.Valuer interface.
func (hi HostInfo) Value() (driver.Value, error) {
bytes, err := json.Marshal(hi)
return string(bytes), err
}
type IPPrefixes []netip.Prefix
func (i *IPPrefixes) Scan(destination interface{}) error {
switch value := destination.(type) {
case []byte:
return json.Unmarshal(value, i)
case string:
return json.Unmarshal([]byte(value), i)
default:
return fmt.Errorf("%w: unexpected data type %T", ErrMachineAddressesInvalid, destination)
}
}
// Value return json value, implement driver.Valuer interface.
func (i IPPrefixes) Value() (driver.Value, error) {
bytes, err := json.Marshal(i)
return string(bytes), err
}
type StringList []string
func (i *StringList) Scan(destination interface{}) error {
switch value := destination.(type) {
case []byte:
return json.Unmarshal(value, i)
case string:
return json.Unmarshal([]byte(value), i)
default:
return fmt.Errorf("%w: unexpected data type %T", ErrMachineAddressesInvalid, destination)
}
}
// Value return json value, implement driver.Valuer interface.
func (i StringList) Value() (driver.Value, error) {
bytes, err := json.Marshal(i)
return string(bytes), err
}

View File

@@ -1,4 +1,4 @@
package derp
package headscale
import (
"context"
@@ -7,10 +7,10 @@ import (
"net/http"
"net/url"
"os"
"time"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/rs/zerolog/log"
"gopkg.in/yaml.v3"
"gopkg.in/yaml.v2"
"tailscale.com/tailcfg"
)
@@ -31,7 +31,7 @@ func loadDERPMapFromPath(path string) (*tailcfg.DERPMap, error) {
}
func loadDERPMapFromURL(addr url.URL) (*tailcfg.DERPMap, error) {
ctx, cancel := context.WithTimeout(context.Background(), types.HTTPReadTimeout)
ctx, cancel := context.WithTimeout(context.Background(), HTTPReadTimeout)
defer cancel()
req, err := http.NewRequestWithContext(ctx, http.MethodGet, addr.String(), nil)
@@ -40,7 +40,7 @@ func loadDERPMapFromURL(addr url.URL) (*tailcfg.DERPMap, error) {
}
client := http.Client{
Timeout: types.HTTPReadTimeout,
Timeout: HTTPReadTimeout,
}
resp, err := client.Do(req)
@@ -80,7 +80,7 @@ func mergeDERPMaps(derpMaps []*tailcfg.DERPMap) *tailcfg.DERPMap {
return &result
}
func GetDERPMap(cfg types.DERPConfig) *tailcfg.DERPMap {
func GetDERPMap(cfg DERPConfig) *tailcfg.DERPMap {
derpMaps := make([]*tailcfg.DERPMap, 0)
for _, path := range cfg.Paths {
@@ -132,3 +132,26 @@ func GetDERPMap(cfg types.DERPConfig) *tailcfg.DERPMap {
return derpMap
}
func (h *Headscale) scheduledDERPMapUpdateWorker(cancelChan <-chan struct{}) {
log.Info().
Dur("frequency", h.cfg.DERP.UpdateFrequency).
Msg("Setting up a DERPMap update worker")
ticker := time.NewTicker(h.cfg.DERP.UpdateFrequency)
for {
select {
case <-cancelChan:
return
case <-ticker.C:
log.Info().Msg("Fetching DERPMap updates")
h.DERPMap = GetDERPMap(h.cfg.DERP)
if h.cfg.DERP.ServerEnabled {
h.DERPMap.Regions[h.DERPServer.region.RegionID] = &h.DERPServer.region
}
h.setLastStateChangeToNow()
}
}
}

View File

@@ -1,4 +1,4 @@
package server
package headscale
import (
"context"
@@ -12,8 +12,6 @@ import (
"strings"
"time"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
"tailscale.com/derp"
"tailscale.com/net/stun"
@@ -28,30 +26,23 @@ import (
const fastStartHeader = "Derp-Fast-Start"
type DERPServer struct {
serverURL string
key key.NodePrivate
cfg *types.DERPConfig
tailscaleDERP *derp.Server
region tailcfg.DERPRegion
}
func NewDERPServer(
serverURL string,
derpKey key.NodePrivate,
cfg *types.DERPConfig,
) (*DERPServer, error) {
func (h *Headscale) NewDERPServer() (*DERPServer, error) {
log.Trace().Caller().Msg("Creating new embedded DERP server")
server := derp.NewServer(derpKey, util.TSLogfWrapper()) // nolint // zerolinter complains
server := derp.NewServer(key.NodePrivate(*h.privateKey), log.Info().Msgf)
region, err := h.generateRegionLocalDERP()
if err != nil {
return nil, err
}
return &DERPServer{
serverURL: serverURL,
key: derpKey,
cfg: cfg,
tailscaleDERP: server,
}, nil
return &DERPServer{server, region}, nil
}
func (d *DERPServer) GenerateRegion() (tailcfg.DERPRegion, error) {
serverURL, err := url.Parse(d.serverURL)
func (h *Headscale) generateRegionLocalDERP() (tailcfg.DERPRegion, error) {
serverURL, err := url.Parse(h.cfg.ServerURL)
if err != nil {
return tailcfg.DERPRegion{}, err
}
@@ -74,23 +65,21 @@ func (d *DERPServer) GenerateRegion() (tailcfg.DERPRegion, error) {
}
localDERPregion := tailcfg.DERPRegion{
RegionID: d.cfg.ServerRegionID,
RegionCode: d.cfg.ServerRegionCode,
RegionName: d.cfg.ServerRegionName,
RegionID: h.cfg.DERP.ServerRegionID,
RegionCode: h.cfg.DERP.ServerRegionCode,
RegionName: h.cfg.DERP.ServerRegionName,
Avoid: false,
Nodes: []*tailcfg.DERPNode{
{
Name: fmt.Sprintf("%d", d.cfg.ServerRegionID),
RegionID: d.cfg.ServerRegionID,
Name: fmt.Sprintf("%d", h.cfg.DERP.ServerRegionID),
RegionID: h.cfg.DERP.ServerRegionID,
HostName: host,
DERPPort: port,
IPv4: d.cfg.IPv4,
IPv6: d.cfg.IPv6,
},
},
}
_, portSTUNStr, err := net.SplitHostPort(d.cfg.STUNAddr)
_, portSTUNStr, err := net.SplitHostPort(h.cfg.DERP.STUNAddr)
if err != nil {
return tailcfg.DERPRegion{}, err
}
@@ -101,12 +90,11 @@ func (d *DERPServer) GenerateRegion() (tailcfg.DERPRegion, error) {
localDERPregion.Nodes[0].STUNPort = portSTUN
log.Info().Caller().Msgf("DERP region: %+v", localDERPregion)
log.Info().Caller().Msgf("DERP Nodes[0]: %+v", localDERPregion.Nodes[0])
return localDERPregion, nil
}
func (d *DERPServer) DERPHandler(
func (h *Headscale) DERPHandler(
writer http.ResponseWriter,
req *http.Request,
) {
@@ -168,23 +156,23 @@ func (d *DERPServer) DERPHandler(
log.Trace().Caller().Msgf("Hijacked connection from %v", req.RemoteAddr)
if !fastStart {
pubKey := d.key.Public()
pubKeyStr, _ := pubKey.MarshalText() //nolint
pubKey := h.privateKey.Public()
pubKeyStr := pubKey.UntypedHexString() //nolint
fmt.Fprintf(conn, "HTTP/1.1 101 Switching Protocols\r\n"+
"Upgrade: DERP\r\n"+
"Connection: Upgrade\r\n"+
"Derp-Version: %v\r\n"+
"Derp-Public-Key: %s\r\n\r\n",
derp.ProtocolVersion,
string(pubKeyStr))
pubKeyStr)
}
d.tailscaleDERP.Accept(req.Context(), netConn, conn, netConn.RemoteAddr().String())
h.DERPServer.tailscaleDERP.Accept(req.Context(), netConn, conn, netConn.RemoteAddr().String())
}
// DERPProbeHandler is the endpoint that js/wasm clients hit to measure
// DERP latency, since they can't do UDP STUN queries.
func DERPProbeHandler(
func (h *Headscale) DERPProbeHandler(
writer http.ResponseWriter,
req *http.Request,
) {
@@ -211,48 +199,43 @@ func DERPProbeHandler(
// The initial implementation is here https://github.com/tailscale/tailscale/pull/1406
// They have a cache, but not clear if that is really necessary at Headscale, uh, scale.
// An example implementation is found here https://derp.tailscale.com/bootstrap-dns
// Coordination server is included automatically, since local DERP is using the same DNS Name in d.serverURL.
func DERPBootstrapDNSHandler(
derpMap *tailcfg.DERPMap,
) func(http.ResponseWriter, *http.Request) {
return func(
writer http.ResponseWriter,
req *http.Request,
) {
dnsEntries := make(map[string][]net.IP)
func (h *Headscale) DERPBootstrapDNSHandler(
writer http.ResponseWriter,
req *http.Request,
) {
dnsEntries := make(map[string][]net.IP)
resolvCtx, cancel := context.WithTimeout(req.Context(), time.Minute)
defer cancel()
var resolver net.Resolver
for _, region := range derpMap.Regions {
for _, node := range region.Nodes { // we don't care if we override some nodes
addrs, err := resolver.LookupIP(resolvCtx, "ip", node.HostName)
if err != nil {
log.Trace().
Caller().
Err(err).
Msgf("bootstrap DNS lookup failed %q", node.HostName)
resolvCtx, cancel := context.WithTimeout(req.Context(), time.Minute)
defer cancel()
var resolver net.Resolver
for _, region := range h.DERPMap.Regions {
for _, node := range region.Nodes { // we don't care if we override some nodes
addrs, err := resolver.LookupIP(resolvCtx, "ip", node.HostName)
if err != nil {
log.Trace().
Caller().
Err(err).
Msgf("bootstrap DNS lookup failed %q", node.HostName)
continue
}
dnsEntries[node.HostName] = addrs
continue
}
dnsEntries[node.HostName] = addrs
}
writer.Header().Set("Content-Type", "application/json")
writer.WriteHeader(http.StatusOK)
err := json.NewEncoder(writer).Encode(dnsEntries)
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Failed to write response")
}
}
writer.Header().Set("Content-Type", "application/json")
writer.WriteHeader(http.StatusOK)
err := json.NewEncoder(writer).Encode(dnsEntries)
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Failed to write response")
}
}
// ServeSTUN starts a STUN server on the configured addr.
func (d *DERPServer) ServeSTUN() {
packetConn, err := net.ListenPacket("udp", d.cfg.STUNAddr)
func (h *Headscale) ServeSTUN() {
packetConn, err := net.ListenPacket("udp", h.cfg.DERP.STUNAddr)
if err != nil {
log.Fatal().Msgf("failed to open STUN listener: %v", err)
}

Some files were not shown because too many files have changed in this diff Show More