mirror of
https://github.com/juanfont/headscale.git
synced 2025-08-20 09:57:37 +00:00
Compare commits
60 Commits
v0.22.0-nf
...
rename-mac
Author | SHA1 | Date | |
---|---|---|---|
![]() |
9f7c25e853 | ||
![]() |
851da9d674 | ||
![]() |
83b4389090 | ||
![]() |
89fffeab31 | ||
![]() |
46221cc220 | ||
![]() |
cf22604a4b | ||
![]() |
ae03f440ee | ||
![]() |
47bc930ace | ||
![]() |
a2b760834f | ||
![]() |
493bcfcf18 | ||
![]() |
df72508089 | ||
![]() |
0f8d8fc2d8 | ||
![]() |
744e5a11b6 | ||
![]() |
3ea1750ea0 | ||
![]() |
a45777d22e | ||
![]() |
56dd734300 | ||
![]() |
d0113732fe | ||
![]() |
6215eb6471 | ||
![]() |
1d2b4bca8a | ||
![]() |
96f9680afd | ||
![]() |
b465592c07 | ||
![]() |
991ff25362 | ||
![]() |
eacd687dbf | ||
![]() |
549f5a164d | ||
![]() |
bb07aec82c | ||
![]() |
a5afe4bd06 | ||
![]() |
a71cc81fe7 | ||
![]() |
679305c3e4 | ||
![]() |
c0680f34f1 | ||
![]() |
64ebe6b0c8 | ||
![]() |
e6b26499f7 | ||
![]() |
977eb1dee3 | ||
![]() |
b2e2b02210 | ||
![]() |
2abff4bb08 | ||
![]() |
54c00645d1 | ||
![]() |
cad5ce0ebd | ||
![]() |
b12a167fa2 | ||
![]() |
667295e15e | ||
![]() |
bea52678e3 | ||
![]() |
307cfc3304 | ||
![]() |
5e74ca9414 | ||
![]() |
9836b097a4 | ||
![]() |
d0b3b1bfc4 | ||
![]() |
6eea96eabc | ||
![]() |
d08fee78c3 | ||
![]() |
bb5f0d456c | ||
![]() |
c186c49e25 | ||
![]() |
4ec6894773 | ||
![]() |
dd9b4b1cb7 | ||
![]() |
a43bb9c958 | ||
![]() |
ba905ff6fc | ||
![]() |
99bd09f688 | ||
![]() |
a6bc792a61 | ||
![]() |
6381d3660a | ||
![]() |
66c5f74d78 | ||
![]() |
1723a6bf40 | ||
![]() |
353f191e4f | ||
![]() |
8d865bb61b | ||
![]() |
c6815c5334 | ||
![]() |
b684ac0668 |
45
.github/workflows/docs.yml
vendored
Normal file
45
.github/workflows/docs.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
name: Build documentation
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
- name: Install python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.x
|
||||
- name: Setup cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
key: ${{ github.ref }}
|
||||
path: .cache
|
||||
- name: Setup dependencies
|
||||
run: pip install mkdocs-material pillow cairosvg mkdocs-minify-plugin
|
||||
- name: Build docs
|
||||
run: mkdocs build --strict
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v1
|
||||
with:
|
||||
path: ./site
|
||||
deploy:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v1
|
138
.github/workflows/release-docker.yml
vendored
Normal file
138
.github/workflows/release-docker.yml
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
---
|
||||
name: Release Docker
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "*" # triggers only if push new tag version
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
docker-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Set up QEMU for multiple platforms
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: arm64,amd64
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/headscale
|
||||
ghcr.io/${{ github.repository_owner }}/headscale
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=sha
|
||||
type=raw,value=develop
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new
|
||||
build-args: |
|
||||
VERSION=${{ steps.meta.outputs.version }}
|
||||
- name: Prepare cache for next build
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
|
||||
docker-debug-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Set up QEMU for multiple platforms
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: arm64,amd64
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache-debug
|
||||
key: ${{ runner.os }}-buildx-debug-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-debug-
|
||||
- name: Docker meta
|
||||
id: meta-debug
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/headscale
|
||||
ghcr.io/${{ github.repository_owner }}/headscale
|
||||
flavor: |
|
||||
suffix=-debug,onlatest=true
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=sha
|
||||
type=raw,value=develop
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
file: Dockerfile.debug
|
||||
tags: ${{ steps.meta-debug.outputs.tags }}
|
||||
labels: ${{ steps.meta-debug.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache-debug
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-debug-new
|
||||
build-args: |
|
||||
VERSION=${{ steps.meta-debug.outputs.version }}
|
||||
- name: Prepare cache for next build
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache-debug
|
||||
mv /tmp/.buildx-cache-debug-new /tmp/.buildx-cache-debug
|
131
.github/workflows/release.yml
vendored
131
.github/workflows/release.yml
vendored
@@ -19,135 +19,6 @@ jobs:
|
||||
- uses: cachix/install-nix-action@v16
|
||||
|
||||
- name: Run goreleaser
|
||||
run: nix develop --command -- goreleaser release --rm-dist
|
||||
run: nix develop --command -- goreleaser release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
docker-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Set up QEMU for multiple platforms
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: arm64,amd64
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/headscale
|
||||
ghcr.io/${{ github.repository_owner }}/headscale
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=sha
|
||||
type=raw,value=develop
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new
|
||||
build-args: |
|
||||
VERSION=${{ steps.meta.outputs.version }}
|
||||
- name: Prepare cache for next build
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
|
||||
docker-debug-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Set up QEMU for multiple platforms
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: arm64,amd64
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache-debug
|
||||
key: ${{ runner.os }}-buildx-debug-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-debug-
|
||||
- name: Docker meta
|
||||
id: meta-debug
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/headscale
|
||||
ghcr.io/${{ github.repository_owner }}/headscale
|
||||
flavor: |
|
||||
suffix=-debug,onlatest=true
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=sha
|
||||
type=raw,value=develop
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
file: Dockerfile.debug
|
||||
tags: ${{ steps.meta-debug.outputs.tags }}
|
||||
labels: ${{ steps.meta-debug.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache-debug
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-debug-new
|
||||
build-args: |
|
||||
VERSION=${{ steps.meta-debug.outputs.version }}
|
||||
- name: Prepare cache for next build
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache-debug
|
||||
mv /tmp/.buildx-cache-debug-new /tmp/.buildx-cache-debug
|
||||
|
35
.github/workflows/test-integration-derp.yml
vendored
35
.github/workflows/test-integration-derp.yml
vendored
@@ -1,35 +0,0 @@
|
||||
name: Integration Test DERP
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
integration-test-derp:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Set Swap Space
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 10
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v16
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run Embedded DERP server integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: nix develop --command -- make test_integration_derp
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
63
.github/workflows/test-integration-v2-TestACLDevice1CanAccessDevice2.yaml
vendored
Normal file
63
.github/workflows/test-integration-v2-TestACLDevice1CanAccessDevice2.yaml
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestACLDevice1CanAccessDevice2
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestACLDevice1CanAccessDevice2$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
63
.github/workflows/test-integration-v2-TestACLNamedHostsCanReach.yaml
vendored
Normal file
63
.github/workflows/test-integration-v2-TestACLNamedHostsCanReach.yaml
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestACLNamedHostsCanReach
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestACLNamedHostsCanReach$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
63
.github/workflows/test-integration-v2-TestACLNamedHostsCanReachBySubnet.yaml
vendored
Normal file
63
.github/workflows/test-integration-v2-TestACLNamedHostsCanReachBySubnet.yaml
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestACLNamedHostsCanReachBySubnet
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestACLNamedHostsCanReachBySubnet$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
63
.github/workflows/test-integration-v2-TestDERPServerScenario.yaml
vendored
Normal file
63
.github/workflows/test-integration-v2-TestDERPServerScenario.yaml
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestDERPServerScenario
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestDERPServerScenario$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
@@ -55,3 +55,9 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
|
9
.gitignore
vendored
9
.gitignore
vendored
@@ -1,3 +1,5 @@
|
||||
ignored/
|
||||
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
@@ -12,8 +14,9 @@
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
vendor/
|
||||
|
||||
dist/
|
||||
/headscale
|
||||
config.json
|
||||
config.yaml
|
||||
@@ -34,3 +37,7 @@ result
|
||||
.direnv/
|
||||
|
||||
integration_test/etc/config.dump.yaml
|
||||
|
||||
# MkDocs
|
||||
.cache
|
||||
/site
|
||||
|
115
.goreleaser.yml
115
.goreleaser.yml
@@ -1,21 +1,28 @@
|
||||
---
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy -compat=1.19
|
||||
- go mod tidy -compat=1.20
|
||||
- go mod vendor
|
||||
|
||||
release:
|
||||
prerelease: auto
|
||||
|
||||
builds:
|
||||
- id: darwin-amd64
|
||||
- id: headscale
|
||||
main: ./cmd/headscale/headscale.go
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- darwin
|
||||
goarch:
|
||||
- amd64
|
||||
targets:
|
||||
- darwin_amd64
|
||||
- darwin_arm64
|
||||
- freebsd_amd64
|
||||
- linux_386
|
||||
- linux_amd64
|
||||
- linux_arm64
|
||||
- linux_arm_5
|
||||
- linux_arm_6
|
||||
- linux_arm_7
|
||||
flags:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
@@ -23,60 +30,56 @@ builds:
|
||||
tags:
|
||||
- ts2019
|
||||
|
||||
- id: darwin-arm64
|
||||
main: ./cmd/headscale/headscale.go
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- darwin
|
||||
goarch:
|
||||
- arm64
|
||||
flags:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
tags:
|
||||
- ts2019
|
||||
|
||||
- id: linux-amd64
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
main: ./cmd/headscale/headscale.go
|
||||
ldflags:
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
tags:
|
||||
- ts2019
|
||||
|
||||
- id: linux-arm64
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- arm64
|
||||
main: ./cmd/headscale/headscale.go
|
||||
ldflags:
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
tags:
|
||||
- ts2019
|
||||
|
||||
archives:
|
||||
- id: golang-cross
|
||||
builds:
|
||||
- darwin-amd64
|
||||
- darwin-arm64
|
||||
- linux-amd64
|
||||
- linux-arm64
|
||||
name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||
name_template: '{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ with .Mips }}_{{ . }}{{ end }}{{ if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }}'
|
||||
format: binary
|
||||
|
||||
source:
|
||||
enabled: true
|
||||
name_template: "{{ .ProjectName }}_{{ .Version }}"
|
||||
format: tar.gz
|
||||
files:
|
||||
- "vendor/"
|
||||
|
||||
nfpms:
|
||||
# Configure nFPM for .deb and .rpm releases
|
||||
#
|
||||
# See https://nfpm.goreleaser.com/configuration/
|
||||
# and https://goreleaser.com/customization/nfpm/
|
||||
#
|
||||
# Useful tools for debugging .debs:
|
||||
# List file contents: dpkg -c dist/headscale...deb
|
||||
# Package metadata: dpkg --info dist/headscale....deb
|
||||
#
|
||||
- builds:
|
||||
- headscale
|
||||
package_name: headscale
|
||||
priority: optional
|
||||
vendor: headscale
|
||||
maintainer: Kristoffer Dalby <kristoffer@dalby.cc>
|
||||
homepage: https://github.com/juanfont/headscale
|
||||
license: BSD
|
||||
bindir: /usr/bin
|
||||
formats:
|
||||
- deb
|
||||
# - rpm
|
||||
contents:
|
||||
- src: ./config-example.yaml
|
||||
dst: /etc/headscale/config.yaml
|
||||
type: config|noreplace
|
||||
file_info:
|
||||
mode: 0644
|
||||
- src: ./docs/packaging/headscale.systemd.service
|
||||
dst: /usr/lib/systemd/system/headscale.service
|
||||
- dst: /var/lib/headscale
|
||||
type: dir
|
||||
- dst: /var/run/headscale
|
||||
type: dir
|
||||
scripts:
|
||||
postinstall: ./docs/packaging/postinstall.sh
|
||||
postremove: ./docs/packaging/postremove.sh
|
||||
|
||||
checksum:
|
||||
name_template: "checksums.txt"
|
||||
snapshot:
|
||||
|
21
CHANGELOG.md
21
CHANGELOG.md
@@ -1,10 +1,29 @@
|
||||
# CHANGELOG
|
||||
|
||||
## 0.22.0 (2023-XX-XX)
|
||||
## 0.23.0 (2023-XX-XX)
|
||||
|
||||
### Changes
|
||||
|
||||
- Add environment flags to enable pprof (profiling) [#1382](https://github.com/juanfont/headscale/pull/1382)
|
||||
- Profiles are continously generated in our integration tests.
|
||||
- Fix systemd service file location in `.deb` packages [#1391](https://github.com/juanfont/headscale/pull/1391)
|
||||
|
||||
## 0.22.1 (2023-04-20)
|
||||
|
||||
### Changes
|
||||
|
||||
- Fix issue where SystemD could not bind to port 80 [#1365](https://github.com/juanfont/headscale/pull/1365)
|
||||
|
||||
## 0.22.0 (2023-04-20)
|
||||
|
||||
### Changes
|
||||
|
||||
- Add `.deb` packages to release process [#1297](https://github.com/juanfont/headscale/pull/1297)
|
||||
- Update and simplify the documentation to use new `.deb` packages [#1349](https://github.com/juanfont/headscale/pull/1349)
|
||||
- Add 32-bit Arm platforms to release process [#1297](https://github.com/juanfont/headscale/pull/1297)
|
||||
- Fix longstanding bug that would prevent "\*" from working properly in ACLs (issue [#699](https://github.com/juanfont/headscale/issues/699)) [#1279](https://github.com/juanfont/headscale/pull/1279)
|
||||
- Fix issue where IPv6 could not be used in, or while using ACLs (part of [#809](https://github.com/juanfont/headscale/issues/809)) [#1339](https://github.com/juanfont/headscale/pull/1339)
|
||||
- Target Go 1.20 and Tailscale 1.38 for Headscale [#1323](https://github.com/juanfont/headscale/pull/1323)
|
||||
|
||||
## 0.21.0 (2023-03-20)
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
# Builder image
|
||||
FROM docker.io/golang:1.19-bullseye AS build
|
||||
FROM docker.io/golang:1.20-bullseye AS build
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
@@ -1,5 +1,5 @@
|
||||
# Builder image
|
||||
FROM docker.io/golang:1.19-bullseye AS build
|
||||
FROM docker.io/golang:1.20-bullseye AS build
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
@@ -13,7 +13,7 @@ RUN CGO_ENABLED=0 GOOS=linux go install -tags ts2019 -ldflags="-s -w -X github.c
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
# Debug image
|
||||
FROM docker.io/golang:1.19.0-bullseye
|
||||
FROM docker.io/golang:1.20.0-bullseye
|
||||
|
||||
COPY --from=build /go/bin/headscale /bin/headscale
|
||||
ENV TZ UTC
|
||||
|
29
Makefile
29
Makefile
@@ -36,17 +36,7 @@ test_integration_cli:
|
||||
-v ~/.cache/hs-integration-go:/go \
|
||||
-v $$PWD:$$PWD -w $$PWD \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock golang:1 \
|
||||
go test $(TAGS) -failfast -timeout 30m -count=1 -run IntegrationCLI ./...
|
||||
|
||||
test_integration_derp:
|
||||
docker network rm $$(docker network ls --filter name=headscale --quiet) || true
|
||||
docker network create headscale-test || true
|
||||
docker run -t --rm \
|
||||
--network headscale-test \
|
||||
-v ~/.cache/hs-integration-go:/go \
|
||||
-v $$PWD:$$PWD -w $$PWD \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock golang:1 \
|
||||
go test $(TAGS) -failfast -timeout 30m -count=1 -run IntegrationDERP ./...
|
||||
go run gotest.tools/gotestsum@latest -- $(TAGS) -failfast -timeout 30m -count=1 -run IntegrationCLI ./...
|
||||
|
||||
test_integration_v2_general:
|
||||
docker run \
|
||||
@@ -56,13 +46,7 @@ test_integration_v2_general:
|
||||
-v $$PWD:$$PWD -w $$PWD/integration \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
golang:1 \
|
||||
go test $(TAGS) -failfast ./... -timeout 120m -parallel 8
|
||||
|
||||
coverprofile_func:
|
||||
go tool cover -func=coverage.out
|
||||
|
||||
coverprofile_html:
|
||||
go tool cover -html=coverage.out
|
||||
go run gotest.tools/gotestsum@latest -- $(TAGS) -failfast ./... -timeout 120m -parallel 8
|
||||
|
||||
lint:
|
||||
golangci-lint run --fix --timeout 10m
|
||||
@@ -80,11 +64,4 @@ compress: build
|
||||
|
||||
generate:
|
||||
rm -rf gen
|
||||
go run github.com/bufbuild/buf/cmd/buf generate proto
|
||||
|
||||
install-protobuf-plugins:
|
||||
go install \
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway \
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2 \
|
||||
google.golang.org/protobuf/cmd/protoc-gen-go \
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc
|
||||
buf generate proto
|
||||
|
182
README.md
182
README.md
@@ -38,7 +38,6 @@ implements a _single_ Tailnet, which is typically what a single organisation, or
|
||||
home/personal setup would use.
|
||||
|
||||
`headscale` uses terms that maps to Tailscale's control server, consult the
|
||||
[glossary](./docs/glossary.md) for explainations.
|
||||
|
||||
## Support
|
||||
|
||||
@@ -79,7 +78,7 @@ one of the maintainers.
|
||||
|
||||
## Running headscale
|
||||
|
||||
Please have a look at the documentation under [`docs/`](docs/).
|
||||
Please have a look at the [`documentation`](https://headscale.net/).
|
||||
|
||||
## Graphical Control Panels
|
||||
|
||||
@@ -188,13 +187,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Juan Font</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/restanrm>
|
||||
<img src=https://avatars.githubusercontent.com/u/4344371?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Adrien Raffin-Caboisse/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Adrien Raffin-Caboisse</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/cure>
|
||||
<img src=https://avatars.githubusercontent.com/u/149135?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ward Vandewege/>
|
||||
@@ -216,8 +208,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Benjamin Roberts</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/reynico>
|
||||
<img src=https://avatars.githubusercontent.com/u/715768?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Nico/>
|
||||
@@ -225,6 +215,8 @@ make build
|
||||
<sub style="font-size:14px"><b>Nico</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/evenh>
|
||||
<img src=https://avatars.githubusercontent.com/u/2701536?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Even Holthe/>
|
||||
@@ -260,6 +252,13 @@ make build
|
||||
<sub style="font-size:14px"><b>unreality</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/mpldr>
|
||||
<img src=https://avatars.githubusercontent.com/u/33086936?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Moritz Poldrack/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Moritz Poldrack</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
@@ -270,10 +269,10 @@ make build
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/mpldr>
|
||||
<img src=https://avatars.githubusercontent.com/u/33086936?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Moritz Poldrack/>
|
||||
<a href=https://github.com/restanrm>
|
||||
<img src=https://avatars.githubusercontent.com/u/4344371?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Adrien Raffin-Caboisse/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Moritz Poldrack</b></sub>
|
||||
<sub style="font-size:14px"><b>Adrien Raffin-Caboisse</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
@@ -350,6 +349,13 @@ make build
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/majst01>
|
||||
<img src=https://avatars.githubusercontent.com/u/410110?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Stefan Majer/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Stefan Majer</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/fdelucchijr>
|
||||
<img src=https://avatars.githubusercontent.com/u/69133647?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Fernando De Lucchi/>
|
||||
@@ -364,13 +370,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Orville Q. Song</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/majst01>
|
||||
<img src=https://avatars.githubusercontent.com/u/410110?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Stefan Majer/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Stefan Majer</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/hdhoang>
|
||||
<img src=https://avatars.githubusercontent.com/u/12537?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=hdhoang/>
|
||||
@@ -385,6 +384,15 @@ make build
|
||||
<sub style="font-size:14px"><b>bravechamp</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/bravechamp>
|
||||
<img src=https://avatars.githubusercontent.com/u/48980452?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=bravechamp/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>bravechamp</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/deonthomasgy>
|
||||
<img src=https://avatars.githubusercontent.com/u/150036?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Deon Thomas/>
|
||||
@@ -392,8 +400,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Deon Thomas</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/madjam002>
|
||||
<img src=https://avatars.githubusercontent.com/u/679137?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jamie Greeff/>
|
||||
@@ -429,6 +435,8 @@ make build
|
||||
<sub style="font-size:14px"><b>Paul Tötterman</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/samson4649>
|
||||
<img src=https://avatars.githubusercontent.com/u/12725953?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Samuel Lock/>
|
||||
@@ -436,8 +444,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Samuel Lock</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/kevin1sMe>
|
||||
<img src=https://avatars.githubusercontent.com/u/6886076?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=kevinlin/>
|
||||
@@ -473,6 +479,8 @@ make build
|
||||
<sub style="font-size:14px"><b>dbevacqua</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/joshuataylor>
|
||||
<img src=https://avatars.githubusercontent.com/u/225131?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Josh Taylor/>
|
||||
@@ -480,8 +488,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Josh Taylor</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/CNLHC>
|
||||
<img src=https://avatars.githubusercontent.com/u/21005146?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=LiuHanCheng/>
|
||||
@@ -517,6 +523,8 @@ make build
|
||||
<sub style="font-size:14px"><b>Steven Honson</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ratsclub>
|
||||
<img src=https://avatars.githubusercontent.com/u/25647735?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Victor Freire/>
|
||||
@@ -524,15 +532,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Victor Freire</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/lachy2849>
|
||||
<img src=https://avatars.githubusercontent.com/u/98844035?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=lachy2849/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>lachy2849</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/t56k>
|
||||
<img src=https://avatars.githubusercontent.com/u/12165422?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=thomas/>
|
||||
@@ -540,6 +539,13 @@ make build
|
||||
<sub style="font-size:14px"><b>thomas</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/linsomniac>
|
||||
<img src=https://avatars.githubusercontent.com/u/466380?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Sean Reifschneider/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Sean Reifschneider</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/aberoham>
|
||||
<img src=https://avatars.githubusercontent.com/u/586805?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Abraham Ingersoll/>
|
||||
@@ -561,6 +567,8 @@ make build
|
||||
<sub style="font-size:14px"><b>Andrei Pechkurov</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/theryecatcher>
|
||||
<img src=https://avatars.githubusercontent.com/u/16442416?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Anoop Sundaresh/>
|
||||
@@ -568,8 +576,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Anoop Sundaresh</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/apognu>
|
||||
<img src=https://avatars.githubusercontent.com/u/3017182?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Antoine POPINEAU/>
|
||||
@@ -577,6 +583,13 @@ make build
|
||||
<sub style="font-size:14px"><b>Antoine POPINEAU</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/tony1661>
|
||||
<img src=https://avatars.githubusercontent.com/u/5287266?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Antonio Fernandez/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Antonio Fernandez</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/aofei>
|
||||
<img src=https://avatars.githubusercontent.com/u/5037285?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Aofei Sheng/>
|
||||
@@ -591,13 +604,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Arnar</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/awoimbee>
|
||||
<img src=https://avatars.githubusercontent.com/u/22431493?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Arthur Woimbée/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Arthur Woimbée</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/avirut>
|
||||
<img src=https://avatars.githubusercontent.com/u/27095602?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Avirut Mehta/>
|
||||
@@ -605,6 +611,8 @@ make build
|
||||
<sub style="font-size:14px"><b>Avirut Mehta</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/stensonb>
|
||||
<img src=https://avatars.githubusercontent.com/u/933389?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Bryan Stenson/>
|
||||
@@ -612,8 +620,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Bryan Stenson</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/yangchuansheng>
|
||||
<img src=https://avatars.githubusercontent.com/u/15308462?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt= Carson Yang/>
|
||||
@@ -649,6 +655,15 @@ make build
|
||||
<sub style="font-size:14px"><b>Felix Yan</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/gabe565>
|
||||
<img src=https://avatars.githubusercontent.com/u/7717888?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Gabe Cook/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Gabe Cook</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/JJGadgets>
|
||||
<img src=https://avatars.githubusercontent.com/u/5709019?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=JJGadgets/>
|
||||
@@ -656,8 +671,6 @@ make build
|
||||
<sub style="font-size:14px"><b>JJGadgets</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/hrtkpf>
|
||||
<img src=https://avatars.githubusercontent.com/u/42646788?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=hrtkpf/>
|
||||
@@ -686,6 +699,8 @@ make build
|
||||
<sub style="font-size:14px"><b>John Axel Eriksson</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ShadowJonathan>
|
||||
<img src=https://avatars.githubusercontent.com/u/22740616?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jonathan de Jong/>
|
||||
@@ -693,6 +708,20 @@ make build
|
||||
<sub style="font-size:14px"><b>Jonathan de Jong</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/JulienFloris>
|
||||
<img src=https://avatars.githubusercontent.com/u/20380255?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Julien Zweverink/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Julien Zweverink</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/win-t>
|
||||
<img src=https://avatars.githubusercontent.com/u/1589120?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Kurnia D Win/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Kurnia D Win</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/foxtrot>
|
||||
<img src=https://avatars.githubusercontent.com/u/4153572?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Marc/>
|
||||
@@ -700,8 +729,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Marc</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/magf>
|
||||
<img src=https://avatars.githubusercontent.com/u/11992737?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Maxim Gajdaj/>
|
||||
@@ -716,6 +743,8 @@ make build
|
||||
<sub style="font-size:14px"><b>Michael Savage</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/piec>
|
||||
<img src=https://avatars.githubusercontent.com/u/781471?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pierre Carru/>
|
||||
@@ -744,8 +773,6 @@ make build
|
||||
<sub style="font-size:14px"><b>rcursaru</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/renovate-bot>
|
||||
<img src=https://avatars.githubusercontent.com/u/25180681?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mend Renovate/>
|
||||
@@ -760,13 +787,8 @@ make build
|
||||
<sub style="font-size:14px"><b>Ryan Fowler</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/linsomniac>
|
||||
<img src=https://avatars.githubusercontent.com/u/466380?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Sean Reifschneider/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Sean Reifschneider</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/shaananc>
|
||||
<img src=https://avatars.githubusercontent.com/u/2287839?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Shaanan Cohney/>
|
||||
@@ -788,8 +810,6 @@ make build
|
||||
<sub style="font-size:14px"><b>sophware</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/m-tanner-dev0>
|
||||
<img src=https://avatars.githubusercontent.com/u/97977342?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tanner/>
|
||||
@@ -804,6 +824,15 @@ make build
|
||||
<sub style="font-size:14px"><b>Teteros</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Teteros>
|
||||
<img src=https://avatars.githubusercontent.com/u/5067989?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Teteros/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Teteros</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/gitter-badger>
|
||||
<img src=https://avatars.githubusercontent.com/u/8518239?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=The Gitter Badger/>
|
||||
@@ -832,8 +861,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Tjerk Woudsma</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/y0ngb1n>
|
||||
<img src=https://avatars.githubusercontent.com/u/25719408?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Yang Bin/>
|
||||
@@ -848,6 +875,8 @@ make build
|
||||
<sub style="font-size:14px"><b>Yujie Xia</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/newellz2>
|
||||
<img src=https://avatars.githubusercontent.com/u/52436542?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zachary Newell/>
|
||||
@@ -876,8 +905,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Ziyuan Han</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/caelansar>
|
||||
<img src=https://avatars.githubusercontent.com/u/31852257?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=caelansar/>
|
||||
@@ -892,6 +919,8 @@ make build
|
||||
<sub style="font-size:14px"><b>derelm</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/dnaq>
|
||||
<img src=https://avatars.githubusercontent.com/u/1299717?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=dnaq/>
|
||||
@@ -920,8 +949,6 @@ make build
|
||||
<sub style="font-size:14px"><b>jimyag</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/magichuihui>
|
||||
<img src=https://avatars.githubusercontent.com/u/10866198?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=suhelen/>
|
||||
@@ -936,6 +963,8 @@ make build
|
||||
<sub style="font-size:14px"><b>sharkonet</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ma6174>
|
||||
<img src=https://avatars.githubusercontent.com/u/1449133?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ma6174/>
|
||||
@@ -951,10 +980,17 @@ make build
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/pernila>
|
||||
<img src=https://avatars.githubusercontent.com/u/12460060?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=pernila/>
|
||||
<a href=https://github.com/nicholas-yap>
|
||||
<img src=https://avatars.githubusercontent.com/u/38109533?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=nicholas-yap/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>pernila</b></sub>
|
||||
<sub style="font-size:14px"><b>nicholas-yap</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/pernila>
|
||||
<img src=https://avatars.githubusercontent.com/u/12460060?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tommi Pernila/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Tommi Pernila</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
@@ -964,8 +1000,6 @@ make build
|
||||
<sub style="font-size:14px"><b>phpmalik</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Wakeful-Cloud>
|
||||
<img src=https://avatars.githubusercontent.com/u/38930607?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Wakeful-Cloud/>
|
||||
@@ -973,6 +1007,8 @@ make build
|
||||
<sub style="font-size:14px"><b>Wakeful-Cloud</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/xpzouying>
|
||||
<img src=https://avatars.githubusercontent.com/u/3946563?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=zy/>
|
||||
|
159
acls.go
159
acls.go
@@ -13,6 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/samber/lo"
|
||||
"github.com/tailscale/hujson"
|
||||
"go4.org/netipx"
|
||||
"gopkg.in/yaml.v3"
|
||||
@@ -118,7 +119,7 @@ func (h *Headscale) LoadACLPolicy(path string) error {
|
||||
}
|
||||
|
||||
func (h *Headscale) UpdateACLRules() error {
|
||||
machines, err := h.ListMachines()
|
||||
nodes, err := h.ListNodes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -127,7 +128,7 @@ func (h *Headscale) UpdateACLRules() error {
|
||||
return errEmptyPolicy
|
||||
}
|
||||
|
||||
rules, err := generateACLRules(machines, *h.aclPolicy, h.cfg.OIDC.StripEmaildomain)
|
||||
rules, err := generateACLRules(nodes, *h.aclPolicy, h.cfg.OIDC.StripEmaildomain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -162,23 +163,20 @@ func (h *Headscale) UpdateACLRules() error {
|
||||
// generateACLPeerCacheMap takes a list of Tailscale filter rules and generates a map
|
||||
// of which Sources ("*" and IPs) can access destinations. This is to speed up the
|
||||
// process of generating MapResponses when deciding which Peers to inform nodes about.
|
||||
func generateACLPeerCacheMap(rules []tailcfg.FilterRule) map[string]map[string]struct{} {
|
||||
aclCachePeerMap := make(map[string]map[string]struct{})
|
||||
func generateACLPeerCacheMap(rules []tailcfg.FilterRule) map[string][]string {
|
||||
aclCachePeerMap := make(map[string][]string)
|
||||
for _, rule := range rules {
|
||||
for _, srcIP := range rule.SrcIPs {
|
||||
for _, ip := range expandACLPeerAddr(srcIP) {
|
||||
if data, ok := aclCachePeerMap[ip]; ok {
|
||||
for _, dstPort := range rule.DstPorts {
|
||||
for _, dstIP := range expandACLPeerAddr(dstPort.IP) {
|
||||
data[dstIP] = struct{}{}
|
||||
}
|
||||
data = append(data, dstPort.IP)
|
||||
}
|
||||
aclCachePeerMap[ip] = data
|
||||
} else {
|
||||
dstPortsMap := make(map[string]struct{}, len(rule.DstPorts))
|
||||
dstPortsMap := make([]string, 0)
|
||||
for _, dstPort := range rule.DstPorts {
|
||||
for _, dstIP := range expandACLPeerAddr(dstPort.IP) {
|
||||
dstPortsMap[dstIP] = struct{}{}
|
||||
}
|
||||
dstPortsMap = append(dstPortsMap, dstPort.IP)
|
||||
}
|
||||
aclCachePeerMap[ip] = dstPortsMap
|
||||
}
|
||||
@@ -227,7 +225,7 @@ func expandACLPeerAddr(srcIP string) []string {
|
||||
}
|
||||
|
||||
func generateACLRules(
|
||||
machines []Machine,
|
||||
nodes []Node,
|
||||
aclPolicy ACLPolicy,
|
||||
stripEmaildomain bool,
|
||||
) ([]tailcfg.FilterRule, error) {
|
||||
@@ -240,7 +238,7 @@ func generateACLRules(
|
||||
|
||||
srcIPs := []string{}
|
||||
for innerIndex, src := range acl.Sources {
|
||||
srcs, err := generateACLPolicySrc(machines, aclPolicy, src, stripEmaildomain)
|
||||
srcs, err := generateACLPolicySrc(nodes, aclPolicy, src, stripEmaildomain)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing ACL %d, Source %d", index, innerIndex)
|
||||
@@ -261,7 +259,7 @@ func generateACLRules(
|
||||
destPorts := []tailcfg.NetPortRange{}
|
||||
for innerIndex, dest := range acl.Destinations {
|
||||
dests, err := generateACLPolicyDest(
|
||||
machines,
|
||||
nodes,
|
||||
aclPolicy,
|
||||
dest,
|
||||
needsWildcard,
|
||||
@@ -293,7 +291,7 @@ func (h *Headscale) generateSSHRules() ([]*tailcfg.SSHRule, error) {
|
||||
return nil, errEmptyPolicy
|
||||
}
|
||||
|
||||
machines, err := h.ListMachines()
|
||||
nodes, err := h.ListNodes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -341,7 +339,7 @@ func (h *Headscale) generateSSHRules() ([]*tailcfg.SSHRule, error) {
|
||||
principals := make([]*tailcfg.SSHPrincipal, 0, len(sshACL.Sources))
|
||||
for innerIndex, rawSrc := range sshACL.Sources {
|
||||
expandedSrcs, err := expandAlias(
|
||||
machines,
|
||||
nodes,
|
||||
*h.aclPolicy,
|
||||
rawSrc,
|
||||
h.cfg.OIDC.StripEmaildomain,
|
||||
@@ -392,30 +390,55 @@ func sshCheckAction(duration string) (*tailcfg.SSHAction, error) {
|
||||
}
|
||||
|
||||
func generateACLPolicySrc(
|
||||
machines []Machine,
|
||||
nodes []Node,
|
||||
aclPolicy ACLPolicy,
|
||||
src string,
|
||||
stripEmaildomain bool,
|
||||
) ([]string, error) {
|
||||
return expandAlias(machines, aclPolicy, src, stripEmaildomain)
|
||||
return expandAlias(nodes, aclPolicy, src, stripEmaildomain)
|
||||
}
|
||||
|
||||
func generateACLPolicyDest(
|
||||
machines []Machine,
|
||||
nodes []Node,
|
||||
aclPolicy ACLPolicy,
|
||||
dest string,
|
||||
needsWildcard bool,
|
||||
stripEmaildomain bool,
|
||||
) ([]tailcfg.NetPortRange, error) {
|
||||
tokens := strings.Split(dest, ":")
|
||||
var tokens []string
|
||||
|
||||
log.Trace().Str("destination", dest).Msg("generating policy destination")
|
||||
|
||||
// Check if there is a IPv4/6:Port combination, IPv6 has more than
|
||||
// three ":".
|
||||
tokens = strings.Split(dest, ":")
|
||||
if len(tokens) < expectedTokenItems || len(tokens) > 3 {
|
||||
return nil, errInvalidPortFormat
|
||||
port := tokens[len(tokens)-1]
|
||||
|
||||
maybeIPv6Str := strings.TrimSuffix(dest, ":"+port)
|
||||
log.Trace().Str("maybeIPv6Str", maybeIPv6Str).Msg("")
|
||||
|
||||
if maybeIPv6, err := netip.ParseAddr(maybeIPv6Str); err != nil && !maybeIPv6.Is6() {
|
||||
log.Trace().Err(err).Msg("trying to parse as IPv6")
|
||||
|
||||
return nil, fmt.Errorf(
|
||||
"failed to parse destination, tokens %v: %w",
|
||||
tokens,
|
||||
errInvalidPortFormat,
|
||||
)
|
||||
} else {
|
||||
tokens = []string{maybeIPv6Str, port}
|
||||
}
|
||||
}
|
||||
|
||||
log.Trace().Strs("tokens", tokens).Msg("generating policy destination")
|
||||
|
||||
var alias string
|
||||
// We can have here stuff like:
|
||||
// git-server:*
|
||||
// 192.168.1.0/24:22
|
||||
// fd7a:115c:a1e0::2:22
|
||||
// fd7a:115c:a1e0::2/128:22
|
||||
// tag:montreal-webserver:80,443
|
||||
// tag:api-server:443
|
||||
// example-host-1:*
|
||||
@@ -426,7 +449,7 @@ func generateACLPolicyDest(
|
||||
}
|
||||
|
||||
expanded, err := expandAlias(
|
||||
machines,
|
||||
nodes,
|
||||
aclPolicy,
|
||||
alias,
|
||||
stripEmaildomain,
|
||||
@@ -508,9 +531,11 @@ func parseProtocol(protocol string) ([]int, bool, error) {
|
||||
// - a group
|
||||
// - a tag
|
||||
// - a host
|
||||
// - an ip
|
||||
// - a cidr
|
||||
// and transform these in IPAddresses.
|
||||
func expandAlias(
|
||||
machines []Machine,
|
||||
nodes Nodes,
|
||||
aclPolicy ACLPolicy,
|
||||
alias string,
|
||||
stripEmailDomain bool,
|
||||
@@ -530,7 +555,7 @@ func expandAlias(
|
||||
return ips, err
|
||||
}
|
||||
for _, n := range users {
|
||||
nodes := filterMachinesByUser(machines, n)
|
||||
nodes := filterNodesByUser(nodes, n)
|
||||
for _, node := range nodes {
|
||||
ips = append(ips, node.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
@@ -541,9 +566,9 @@ func expandAlias(
|
||||
|
||||
if strings.HasPrefix(alias, "tag:") {
|
||||
// check for forced tags
|
||||
for _, machine := range machines {
|
||||
if contains(machine.ForcedTags, alias) {
|
||||
ips = append(ips, machine.IPAddresses.ToStringSlice()...)
|
||||
for _, node := range nodes {
|
||||
if contains(node.ForcedTags, alias) {
|
||||
ips = append(ips, node.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -565,13 +590,13 @@ func expandAlias(
|
||||
}
|
||||
}
|
||||
|
||||
// filter out machines per tag owner
|
||||
// filter out nodes per tag owner
|
||||
for _, user := range owners {
|
||||
machines := filterMachinesByUser(machines, user)
|
||||
for _, machine := range machines {
|
||||
hi := machine.GetHostInfo()
|
||||
nodes := filterNodesByUser(nodes, user)
|
||||
for _, node := range nodes {
|
||||
hi := node.GetHostInfo()
|
||||
if contains(hi.RequestTags, alias) {
|
||||
ips = append(ips, machine.IPAddresses.ToStringSlice()...)
|
||||
ips = append(ips, node.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -580,10 +605,10 @@ func expandAlias(
|
||||
}
|
||||
|
||||
// if alias is a user
|
||||
nodes := filterMachinesByUser(machines, alias)
|
||||
nodes = excludeCorrectlyTaggedNodes(aclPolicy, nodes, alias, stripEmailDomain)
|
||||
filteredNodes := filterNodesByUser(nodes, alias)
|
||||
filteredNodes = excludeCorrectlyTaggedNodes(aclPolicy, filteredNodes, alias, stripEmailDomain)
|
||||
|
||||
for _, n := range nodes {
|
||||
for _, n := range filteredNodes {
|
||||
ips = append(ips, n.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
if len(ips) > 0 {
|
||||
@@ -592,19 +617,40 @@ func expandAlias(
|
||||
|
||||
// if alias is an host
|
||||
if h, ok := aclPolicy.Hosts[alias]; ok {
|
||||
return []string{h.String()}, nil
|
||||
log.Trace().Str("host", h.String()).Msg("expandAlias got hosts entry")
|
||||
|
||||
return expandAlias(filteredNodes, aclPolicy, h.String(), stripEmailDomain)
|
||||
}
|
||||
|
||||
// if alias is an IP
|
||||
ip, err := netip.ParseAddr(alias)
|
||||
if err == nil {
|
||||
return []string{ip.String()}, nil
|
||||
if ip, err := netip.ParseAddr(alias); err == nil {
|
||||
log.Trace().Str("ip", ip.String()).Msg("expandAlias got ip")
|
||||
ips := []string{ip.String()}
|
||||
matches := nodes.FilterByIP(ip)
|
||||
|
||||
for _, node := range matches {
|
||||
ips = append(ips, node.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
|
||||
return lo.Uniq(ips), nil
|
||||
}
|
||||
|
||||
// if alias is an CIDR
|
||||
cidr, err := netip.ParsePrefix(alias)
|
||||
if err == nil {
|
||||
return []string{cidr.String()}, nil
|
||||
if cidr, err := netip.ParsePrefix(alias); err == nil {
|
||||
log.Trace().Str("cidr", cidr.String()).Msg("expandAlias got cidr")
|
||||
val := []string{cidr.String()}
|
||||
// This is suboptimal and quite expensive, but if we only add the cidr, we will miss all the relevant IPv6
|
||||
// addresses for the hosts that belong to tailscale. This doesnt really affect stuff like subnet routers.
|
||||
for _, node := range nodes {
|
||||
for _, ip := range node.IPAddresses {
|
||||
// log.Trace().
|
||||
// Msgf("checking if node ip (%s) is part of cidr (%s): %v, is single ip cidr (%v), addr: %s", ip.String(), cidr.String(), cidr.Contains(ip), cidr.IsSingleIP(), cidr.Addr().String())
|
||||
if cidr.Contains(ip) {
|
||||
val = append(val, node.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return lo.Uniq(val), nil
|
||||
}
|
||||
|
||||
log.Warn().Msgf("No IPs found with the alias %v", alias)
|
||||
@@ -617,11 +663,11 @@ func expandAlias(
|
||||
// we assume in this function that we only have nodes from 1 user.
|
||||
func excludeCorrectlyTaggedNodes(
|
||||
aclPolicy ACLPolicy,
|
||||
nodes []Machine,
|
||||
nodes []Node,
|
||||
user string,
|
||||
stripEmailDomain bool,
|
||||
) []Machine {
|
||||
out := []Machine{}
|
||||
) []Node {
|
||||
out := []Node{}
|
||||
tags := []string{}
|
||||
for tag := range aclPolicy.TagOwners {
|
||||
owners, _ := expandTagOwners(aclPolicy, user, stripEmailDomain)
|
||||
@@ -630,9 +676,9 @@ func excludeCorrectlyTaggedNodes(
|
||||
tags = append(tags, tag)
|
||||
}
|
||||
}
|
||||
// for each machine if tag is in tags list, don't append it.
|
||||
for _, machine := range nodes {
|
||||
hi := machine.GetHostInfo()
|
||||
// for each node if tag is in tags list, don't append it.
|
||||
for _, node := range nodes {
|
||||
hi := node.GetHostInfo()
|
||||
|
||||
found := false
|
||||
for _, t := range hi.RequestTags {
|
||||
@@ -642,11 +688,11 @@ func excludeCorrectlyTaggedNodes(
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(machine.ForcedTags) > 0 {
|
||||
if len(node.ForcedTags) > 0 {
|
||||
found = true
|
||||
}
|
||||
if !found {
|
||||
out = append(out, machine)
|
||||
out = append(out, node)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -666,6 +712,7 @@ func expandPorts(portsStr string, needsWildcard bool) (*[]tailcfg.PortRange, err
|
||||
|
||||
ports := []tailcfg.PortRange{}
|
||||
for _, portStr := range strings.Split(portsStr, ",") {
|
||||
log.Trace().Msgf("parsing portstring: %s", portStr)
|
||||
rang := strings.Split(portStr, "-")
|
||||
switch len(rang) {
|
||||
case 1:
|
||||
@@ -700,11 +747,11 @@ func expandPorts(portsStr string, needsWildcard bool) (*[]tailcfg.PortRange, err
|
||||
return &ports, nil
|
||||
}
|
||||
|
||||
func filterMachinesByUser(machines []Machine, user string) []Machine {
|
||||
out := []Machine{}
|
||||
for _, machine := range machines {
|
||||
if machine.User.Name == user {
|
||||
out = append(out, machine)
|
||||
func filterNodesByUser(nodes []Node, user string) []Node {
|
||||
out := []Node{}
|
||||
for _, node := range nodes {
|
||||
if node.User.Name == user {
|
||||
out = append(out, node)
|
||||
}
|
||||
}
|
||||
|
||||
|
362
acls_test.go
362
acls_test.go
@@ -54,7 +54,7 @@ func (s *Suite) TestBasicRule(c *check.C) {
|
||||
err := app.LoadACLPolicy("./tests/acls/acl_policy_basic_1.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := generateACLRules([]Machine{}, *app.aclPolicy, false)
|
||||
rules, err := generateACLRules([]Node{}, *app.aclPolicy, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
}
|
||||
@@ -83,27 +83,27 @@ func (s *Suite) TestSshRules(c *check.C) {
|
||||
pak, err := app.CreatePreAuthKey(user.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("user1", "testmachine")
|
||||
_, err = app.GetNode("user1", "testnode")
|
||||
c.Assert(err, check.NotNil)
|
||||
hostInfo := tailcfg.Hostinfo{
|
||||
OS: "centos",
|
||||
Hostname: "testmachine",
|
||||
Hostname: "testnode",
|
||||
RequestTags: []string{"tag:test"},
|
||||
}
|
||||
|
||||
machine := Machine{
|
||||
node := Node{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testmachine",
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
Hostname: "testnode",
|
||||
IPAddresses: NodeAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
UserID: user.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
HostInfo: HostInfo(hostInfo),
|
||||
}
|
||||
app.db.Save(&machine)
|
||||
app.db.Save(&node)
|
||||
|
||||
app.aclPolicy = &ACLPolicy{
|
||||
Groups: Groups{
|
||||
@@ -193,27 +193,27 @@ func (s *Suite) TestValidExpandTagOwnersInSources(c *check.C) {
|
||||
pak, err := app.CreatePreAuthKey(user.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("user1", "testmachine")
|
||||
_, err = app.GetNode("user1", "testnode")
|
||||
c.Assert(err, check.NotNil)
|
||||
hostInfo := tailcfg.Hostinfo{
|
||||
OS: "centos",
|
||||
Hostname: "testmachine",
|
||||
Hostname: "testnode",
|
||||
RequestTags: []string{"tag:test"},
|
||||
}
|
||||
|
||||
machine := Machine{
|
||||
node := Node{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testmachine",
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
Hostname: "testnode",
|
||||
IPAddresses: NodeAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
UserID: user.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
HostInfo: HostInfo(hostInfo),
|
||||
}
|
||||
app.db.Save(&machine)
|
||||
app.db.Save(&node)
|
||||
|
||||
app.aclPolicy = &ACLPolicy{
|
||||
Groups: Groups{"group:test": []string{"user1", "user2"}},
|
||||
@@ -243,27 +243,27 @@ func (s *Suite) TestValidExpandTagOwnersInDestinations(c *check.C) {
|
||||
pak, err := app.CreatePreAuthKey(user.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("user1", "testmachine")
|
||||
_, err = app.GetNode("user1", "testnode")
|
||||
c.Assert(err, check.NotNil)
|
||||
hostInfo := tailcfg.Hostinfo{
|
||||
OS: "centos",
|
||||
Hostname: "testmachine",
|
||||
Hostname: "testnode",
|
||||
RequestTags: []string{"tag:test"},
|
||||
}
|
||||
|
||||
machine := Machine{
|
||||
node := Node{
|
||||
ID: 1,
|
||||
MachineKey: "12345",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testmachine",
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
Hostname: "testnode",
|
||||
IPAddresses: NodeAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
UserID: user.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
HostInfo: HostInfo(hostInfo),
|
||||
}
|
||||
app.db.Save(&machine)
|
||||
app.db.Save(&node)
|
||||
|
||||
app.aclPolicy = &ACLPolicy{
|
||||
Groups: Groups{"group:test": []string{"user1", "user2"}},
|
||||
@@ -293,27 +293,27 @@ func (s *Suite) TestInvalidTagValidUser(c *check.C) {
|
||||
pak, err := app.CreatePreAuthKey(user.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("user1", "testmachine")
|
||||
_, err = app.GetNode("user1", "testnode")
|
||||
c.Assert(err, check.NotNil)
|
||||
hostInfo := tailcfg.Hostinfo{
|
||||
OS: "centos",
|
||||
Hostname: "testmachine",
|
||||
Hostname: "testnode",
|
||||
RequestTags: []string{"tag:foo"},
|
||||
}
|
||||
|
||||
machine := Machine{
|
||||
node := Node{
|
||||
ID: 1,
|
||||
MachineKey: "12345",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testmachine",
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
Hostname: "testnode",
|
||||
IPAddresses: NodeAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
UserID: user.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
HostInfo: HostInfo(hostInfo),
|
||||
}
|
||||
app.db.Save(&machine)
|
||||
app.db.Save(&node)
|
||||
|
||||
app.aclPolicy = &ACLPolicy{
|
||||
TagOwners: TagOwners{"tag:test": []string{"user1"}},
|
||||
@@ -342,7 +342,7 @@ func (s *Suite) TestValidTagInvalidUser(c *check.C) {
|
||||
pak, err := app.CreatePreAuthKey(user.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("user1", "webserver")
|
||||
_, err = app.GetNode("user1", "webserver")
|
||||
c.Assert(err, check.NotNil)
|
||||
hostInfo := tailcfg.Hostinfo{
|
||||
OS: "centos",
|
||||
@@ -350,38 +350,38 @@ func (s *Suite) TestValidTagInvalidUser(c *check.C) {
|
||||
RequestTags: []string{"tag:webapp"},
|
||||
}
|
||||
|
||||
machine := Machine{
|
||||
node := Node{
|
||||
ID: 1,
|
||||
MachineKey: "12345",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "webserver",
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
IPAddresses: NodeAddresses{netip.MustParseAddr("100.64.0.1")},
|
||||
UserID: user.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
HostInfo: HostInfo(hostInfo),
|
||||
}
|
||||
app.db.Save(&machine)
|
||||
_, err = app.GetMachine("user1", "user")
|
||||
app.db.Save(&node)
|
||||
_, err = app.GetNode("user1", "user")
|
||||
hostInfo2 := tailcfg.Hostinfo{
|
||||
OS: "debian",
|
||||
Hostname: "Hostname",
|
||||
}
|
||||
c.Assert(err, check.NotNil)
|
||||
machine = Machine{
|
||||
node = Node{
|
||||
ID: 2,
|
||||
MachineKey: "56789",
|
||||
NodeKey: "bar2",
|
||||
DiscoKey: "faab",
|
||||
Hostname: "user",
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.2")},
|
||||
IPAddresses: NodeAddresses{netip.MustParseAddr("100.64.0.2")},
|
||||
UserID: user.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
HostInfo: HostInfo(hostInfo2),
|
||||
}
|
||||
app.db.Save(&machine)
|
||||
app.db.Save(&node)
|
||||
|
||||
app.aclPolicy = &ACLPolicy{
|
||||
TagOwners: TagOwners{"tag:webapp": []string{"user1"}},
|
||||
@@ -411,7 +411,7 @@ func (s *Suite) TestPortRange(c *check.C) {
|
||||
err := app.LoadACLPolicy("./tests/acls/acl_policy_basic_range.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := generateACLRules([]Machine{}, *app.aclPolicy, false)
|
||||
rules, err := generateACLRules([]Node{}, *app.aclPolicy, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
@@ -425,7 +425,7 @@ func (s *Suite) TestProtocolParsing(c *check.C) {
|
||||
err := app.LoadACLPolicy("./tests/acls/acl_policy_basic_protocols.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := generateACLRules([]Machine{}, *app.aclPolicy, false)
|
||||
rules, err := generateACLRules([]Node{}, *app.aclPolicy, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
@@ -439,7 +439,7 @@ func (s *Suite) TestPortWildcard(c *check.C) {
|
||||
err := app.LoadACLPolicy("./tests/acls/acl_policy_basic_wildcards.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := generateACLRules([]Machine{}, *app.aclPolicy, false)
|
||||
rules, err := generateACLRules([]Node{}, *app.aclPolicy, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
@@ -455,7 +455,7 @@ func (s *Suite) TestPortWildcardYAML(c *check.C) {
|
||||
err := app.LoadACLPolicy("./tests/acls/acl_policy_basic_wildcards.yaml")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := generateACLRules([]Machine{}, *app.aclPolicy, false)
|
||||
rules, err := generateACLRules([]Node{}, *app.aclPolicy, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
@@ -474,31 +474,31 @@ func (s *Suite) TestPortUser(c *check.C) {
|
||||
pak, err := app.CreatePreAuthKey(user.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("testuser", "testmachine")
|
||||
_, err = app.GetNode("testuser", "testnode")
|
||||
c.Assert(err, check.NotNil)
|
||||
ips, _ := app.getAvailableIPs()
|
||||
machine := Machine{
|
||||
node := Node{
|
||||
ID: 0,
|
||||
MachineKey: "12345",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testmachine",
|
||||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: ips,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
}
|
||||
app.db.Save(&machine)
|
||||
app.db.Save(&node)
|
||||
|
||||
err = app.LoadACLPolicy(
|
||||
"./tests/acls/acl_policy_basic_user_as_user.hujson",
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machines, err := app.ListMachines()
|
||||
nodes, err := app.ListNodes()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := generateACLRules(machines, *app.aclPolicy, false)
|
||||
rules, err := generateACLRules(nodes, *app.aclPolicy, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
@@ -519,29 +519,29 @@ func (s *Suite) TestPortGroup(c *check.C) {
|
||||
pak, err := app.CreatePreAuthKey(user.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("testuser", "testmachine")
|
||||
_, err = app.GetNode("testuser", "testnode")
|
||||
c.Assert(err, check.NotNil)
|
||||
ips, _ := app.getAvailableIPs()
|
||||
machine := Machine{
|
||||
node := Node{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Hostname: "testmachine",
|
||||
Hostname: "testnode",
|
||||
UserID: user.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: ips,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
}
|
||||
app.db.Save(&machine)
|
||||
app.db.Save(&node)
|
||||
|
||||
err = app.LoadACLPolicy("./tests/acls/acl_policy_basic_groups.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machines, err := app.ListMachines()
|
||||
nodes, err := app.ListNodes()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := generateACLRules(machines, *app.aclPolicy, false)
|
||||
rules, err := generateACLRules(nodes, *app.aclPolicy, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
@@ -843,47 +843,47 @@ func Test_expandPorts(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_listMachinesInUser(t *testing.T) {
|
||||
func Test_listNodesInUser(t *testing.T) {
|
||||
type args struct {
|
||||
machines []Machine
|
||||
user string
|
||||
nodes []Node
|
||||
user string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []Machine
|
||||
want []Node
|
||||
}{
|
||||
{
|
||||
name: "1 machine in user",
|
||||
name: "1 node in user",
|
||||
args: args{
|
||||
machines: []Machine{
|
||||
nodes: []Node{
|
||||
{User: User{Name: "joe"}},
|
||||
},
|
||||
user: "joe",
|
||||
},
|
||||
want: []Machine{
|
||||
want: []Node{
|
||||
{User: User{Name: "joe"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "3 machines, 2 in user",
|
||||
name: "3 nodes, 2 in user",
|
||||
args: args{
|
||||
machines: []Machine{
|
||||
nodes: []Node{
|
||||
{ID: 1, User: User{Name: "joe"}},
|
||||
{ID: 2, User: User{Name: "marc"}},
|
||||
{ID: 3, User: User{Name: "marc"}},
|
||||
},
|
||||
user: "marc",
|
||||
},
|
||||
want: []Machine{
|
||||
want: []Node{
|
||||
{ID: 2, User: User{Name: "marc"}},
|
||||
{ID: 3, User: User{Name: "marc"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "5 machines, 0 in user",
|
||||
name: "5 nodes, 0 in user",
|
||||
args: args{
|
||||
machines: []Machine{
|
||||
nodes: []Node{
|
||||
{ID: 1, User: User{Name: "joe"}},
|
||||
{ID: 2, User: User{Name: "marc"}},
|
||||
{ID: 3, User: User{Name: "marc"}},
|
||||
@@ -892,16 +892,16 @@ func Test_listMachinesInUser(t *testing.T) {
|
||||
},
|
||||
user: "mickael",
|
||||
},
|
||||
want: []Machine{},
|
||||
want: []Node{},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
if got := filterMachinesByUser(test.args.machines, test.args.user); !reflect.DeepEqual(
|
||||
if got := filterNodesByUser(test.args.nodes, test.args.user); !reflect.DeepEqual(
|
||||
got,
|
||||
test.want,
|
||||
) {
|
||||
t.Errorf("listMachinesInUser() = %v, want %v", got, test.want)
|
||||
t.Errorf("listNodesInUser() = %v, want %v", got, test.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -909,7 +909,7 @@ func Test_listMachinesInUser(t *testing.T) {
|
||||
|
||||
func Test_expandAlias(t *testing.T) {
|
||||
type args struct {
|
||||
machines []Machine
|
||||
nodes []Node
|
||||
aclPolicy ACLPolicy
|
||||
alias string
|
||||
stripEmailDomain bool
|
||||
@@ -924,10 +924,10 @@ func Test_expandAlias(t *testing.T) {
|
||||
name: "wildcard",
|
||||
args: args{
|
||||
alias: "*",
|
||||
machines: []Machine{
|
||||
{IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.1")}},
|
||||
nodes: []Node{
|
||||
{IPAddresses: NodeAddresses{netip.MustParseAddr("100.64.0.1")}},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.78.84.227"),
|
||||
},
|
||||
},
|
||||
@@ -942,27 +942,27 @@ func Test_expandAlias(t *testing.T) {
|
||||
name: "simple group",
|
||||
args: args{
|
||||
alias: "group:accountant",
|
||||
machines: []Machine{
|
||||
nodes: []Node{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
User: User{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
User: User{Name: "mickael"},
|
||||
@@ -980,27 +980,27 @@ func Test_expandAlias(t *testing.T) {
|
||||
name: "wrong group",
|
||||
args: args{
|
||||
alias: "group:hr",
|
||||
machines: []Machine{
|
||||
nodes: []Node{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
User: User{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
User: User{Name: "mickael"},
|
||||
@@ -1018,7 +1018,7 @@ func Test_expandAlias(t *testing.T) {
|
||||
name: "simple ipaddress",
|
||||
args: args{
|
||||
alias: "10.0.0.3",
|
||||
machines: []Machine{},
|
||||
nodes: []Node{},
|
||||
aclPolicy: ACLPolicy{},
|
||||
stripEmailDomain: true,
|
||||
},
|
||||
@@ -1026,36 +1026,77 @@ func Test_expandAlias(t *testing.T) {
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "private network",
|
||||
args: args{
|
||||
alias: "homeNetwork",
|
||||
machines: []Machine{},
|
||||
aclPolicy: ACLPolicy{
|
||||
Hosts: Hosts{
|
||||
"homeNetwork": netip.MustParsePrefix("192.168.1.0/24"),
|
||||
},
|
||||
},
|
||||
stripEmailDomain: true,
|
||||
},
|
||||
want: []string{"192.168.1.0/24"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "simple host by ip",
|
||||
name: "simple host by ip passed through",
|
||||
args: args{
|
||||
alias: "10.0.0.1",
|
||||
machines: []Machine{},
|
||||
nodes: []Node{},
|
||||
aclPolicy: ACLPolicy{},
|
||||
stripEmailDomain: true,
|
||||
},
|
||||
want: []string{"10.0.0.1"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "simple host by ipv4 single ipv4",
|
||||
args: args{
|
||||
alias: "10.0.0.1",
|
||||
nodes: []Node{
|
||||
{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("10.0.0.1"),
|
||||
},
|
||||
User: User{Name: "mickael"},
|
||||
},
|
||||
},
|
||||
aclPolicy: ACLPolicy{},
|
||||
stripEmailDomain: true,
|
||||
},
|
||||
want: []string{"10.0.0.1"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "simple host by ipv4 single dual stack",
|
||||
args: args{
|
||||
alias: "10.0.0.1",
|
||||
nodes: []Node{
|
||||
{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("10.0.0.1"),
|
||||
netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"),
|
||||
},
|
||||
User: User{Name: "mickael"},
|
||||
},
|
||||
},
|
||||
aclPolicy: ACLPolicy{},
|
||||
stripEmailDomain: true,
|
||||
},
|
||||
want: []string{"10.0.0.1", "fd7a:115c:a1e0:ab12:4843:2222:6273:2222"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "simple host by ipv6 single dual stack",
|
||||
args: args{
|
||||
alias: "fd7a:115c:a1e0:ab12:4843:2222:6273:2222",
|
||||
nodes: []Node{
|
||||
{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("10.0.0.1"),
|
||||
netip.MustParseAddr("fd7a:115c:a1e0:ab12:4843:2222:6273:2222"),
|
||||
},
|
||||
User: User{Name: "mickael"},
|
||||
},
|
||||
},
|
||||
aclPolicy: ACLPolicy{},
|
||||
stripEmailDomain: true,
|
||||
},
|
||||
want: []string{"fd7a:115c:a1e0:ab12:4843:2222:6273:2222", "10.0.0.1"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "simple host by hostname alias",
|
||||
args: args{
|
||||
alias: "testy",
|
||||
machines: []Machine{},
|
||||
alias: "testy",
|
||||
nodes: []Node{},
|
||||
aclPolicy: ACLPolicy{
|
||||
Hosts: Hosts{
|
||||
"testy": netip.MustParsePrefix("10.0.0.132/32"),
|
||||
@@ -1066,11 +1107,26 @@ func Test_expandAlias(t *testing.T) {
|
||||
want: []string{"10.0.0.132/32"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "private network",
|
||||
args: args{
|
||||
alias: "homeNetwork",
|
||||
nodes: []Node{},
|
||||
aclPolicy: ACLPolicy{
|
||||
Hosts: Hosts{
|
||||
"homeNetwork": netip.MustParsePrefix("192.168.1.0/24"),
|
||||
},
|
||||
},
|
||||
stripEmailDomain: true,
|
||||
},
|
||||
want: []string{"192.168.1.0/24"},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "simple CIDR",
|
||||
args: args{
|
||||
alias: "10.0.0.0/16",
|
||||
machines: []Machine{},
|
||||
nodes: []Node{},
|
||||
aclPolicy: ACLPolicy{},
|
||||
stripEmailDomain: true,
|
||||
},
|
||||
@@ -1081,9 +1137,9 @@ func Test_expandAlias(t *testing.T) {
|
||||
name: "simple tag",
|
||||
args: args{
|
||||
alias: "tag:hr-webserver",
|
||||
machines: []Machine{
|
||||
nodes: []Node{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
@@ -1094,7 +1150,7 @@ func Test_expandAlias(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
@@ -1105,13 +1161,13 @@ func Test_expandAlias(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
User: User{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
@@ -1129,27 +1185,27 @@ func Test_expandAlias(t *testing.T) {
|
||||
name: "No tag defined",
|
||||
args: args{
|
||||
alias: "tag:hr-webserver",
|
||||
machines: []Machine{
|
||||
nodes: []Node{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
User: User{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
User: User{Name: "mickael"},
|
||||
@@ -1170,29 +1226,29 @@ func Test_expandAlias(t *testing.T) {
|
||||
name: "Forced tag defined",
|
||||
args: args{
|
||||
alias: "tag:hr-webserver",
|
||||
machines: []Machine{
|
||||
nodes: []Node{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
ForcedTags: []string{"tag:hr-webserver"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
ForcedTags: []string{"tag:hr-webserver"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
User: User{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
User: User{Name: "mickael"},
|
||||
@@ -1208,16 +1264,16 @@ func Test_expandAlias(t *testing.T) {
|
||||
name: "Forced tag with legitimate tagOwner",
|
||||
args: args{
|
||||
alias: "tag:hr-webserver",
|
||||
machines: []Machine{
|
||||
nodes: []Node{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
ForcedTags: []string{"tag:hr-webserver"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
@@ -1228,13 +1284,13 @@ func Test_expandAlias(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
User: User{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
User: User{Name: "mickael"},
|
||||
@@ -1254,9 +1310,9 @@ func Test_expandAlias(t *testing.T) {
|
||||
name: "list host in user without correctly tagged servers",
|
||||
args: args{
|
||||
alias: "joe",
|
||||
machines: []Machine{
|
||||
nodes: []Node{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
@@ -1267,7 +1323,7 @@ func Test_expandAlias(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
@@ -1278,13 +1334,13 @@ func Test_expandAlias(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.3"),
|
||||
},
|
||||
User: User{Name: "marc"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
@@ -1302,7 +1358,7 @@ func Test_expandAlias(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
got, err := expandAlias(
|
||||
test.args.machines,
|
||||
test.args.nodes,
|
||||
test.args.aclPolicy,
|
||||
test.args.alias,
|
||||
test.args.stripEmailDomain,
|
||||
@@ -1322,14 +1378,14 @@ func Test_expandAlias(t *testing.T) {
|
||||
func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
type args struct {
|
||||
aclPolicy ACLPolicy
|
||||
nodes []Machine
|
||||
nodes []Node
|
||||
user string
|
||||
stripEmailDomain bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []Machine
|
||||
want []Node
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
@@ -1338,9 +1394,9 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
aclPolicy: ACLPolicy{
|
||||
TagOwners: TagOwners{"tag:accountant-webserver": []string{"joe"}},
|
||||
},
|
||||
nodes: []Machine{
|
||||
nodes: []Node{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
@@ -1351,7 +1407,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
@@ -1362,7 +1418,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
@@ -1371,9 +1427,9 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
user: "joe",
|
||||
stripEmailDomain: true,
|
||||
},
|
||||
want: []Machine{
|
||||
want: []Node{
|
||||
{
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.4")},
|
||||
IPAddresses: NodeAddresses{netip.MustParseAddr("100.64.0.4")},
|
||||
User: User{Name: "joe"},
|
||||
},
|
||||
},
|
||||
@@ -1389,9 +1445,9 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
"tag:accountant-webserver": []string{"group:accountant"},
|
||||
},
|
||||
},
|
||||
nodes: []Machine{
|
||||
nodes: []Node{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
@@ -1402,7 +1458,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
@@ -1413,7 +1469,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
@@ -1422,9 +1478,9 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
user: "joe",
|
||||
stripEmailDomain: true,
|
||||
},
|
||||
want: []Machine{
|
||||
want: []Node{
|
||||
{
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.4")},
|
||||
IPAddresses: NodeAddresses{netip.MustParseAddr("100.64.0.4")},
|
||||
User: User{Name: "joe"},
|
||||
},
|
||||
},
|
||||
@@ -1435,9 +1491,9 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
aclPolicy: ACLPolicy{
|
||||
TagOwners: TagOwners{"tag:accountant-webserver": []string{"joe"}},
|
||||
},
|
||||
nodes: []Machine{
|
||||
nodes: []Node{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
@@ -1448,14 +1504,14 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
ForcedTags: []string{"tag:accountant-webserver"},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
@@ -1464,9 +1520,9 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
user: "joe",
|
||||
stripEmailDomain: true,
|
||||
},
|
||||
want: []Machine{
|
||||
want: []Node{
|
||||
{
|
||||
IPAddresses: MachineAddresses{netip.MustParseAddr("100.64.0.4")},
|
||||
IPAddresses: NodeAddresses{netip.MustParseAddr("100.64.0.4")},
|
||||
User: User{Name: "joe"},
|
||||
},
|
||||
},
|
||||
@@ -1477,9 +1533,9 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
aclPolicy: ACLPolicy{
|
||||
TagOwners: TagOwners{"tag:accountant-webserver": []string{"joe"}},
|
||||
},
|
||||
nodes: []Machine{
|
||||
nodes: []Node{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
@@ -1490,7 +1546,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
@@ -1501,7 +1557,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
@@ -1510,9 +1566,9 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
user: "joe",
|
||||
stripEmailDomain: true,
|
||||
},
|
||||
want: []Machine{
|
||||
want: []Node{
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.1"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
@@ -1523,7 +1579,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.2"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
@@ -1534,7 +1590,7 @@ func Test_excludeCorrectlyTaggedNodes(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
IPAddresses: MachineAddresses{
|
||||
IPAddresses: NodeAddresses{
|
||||
netip.MustParseAddr("100.64.0.4"),
|
||||
},
|
||||
User: User{Name: "joe"},
|
||||
|
@@ -51,7 +51,7 @@ type AutoApprovers struct {
|
||||
ExitNode []string `json:"exitNode" yaml:"exitNode"`
|
||||
}
|
||||
|
||||
// SSH controls who can ssh into which machines.
|
||||
// SSH controls who can ssh into which nodes.
|
||||
type SSH struct {
|
||||
Action string `json:"action" yaml:"action"`
|
||||
Sources []string `json:"src" yaml:"src"`
|
||||
|
6
api.go
6
api.go
@@ -20,7 +20,7 @@ const (
|
||||
RegisterMethodOIDC = "oidc"
|
||||
RegisterMethodCLI = "cli"
|
||||
ErrRegisterMethodCLIDoesNotSupportExpire = Error(
|
||||
"machines registered with CLI does not support expire",
|
||||
"node registered with CLI does not support expire",
|
||||
)
|
||||
)
|
||||
|
||||
@@ -74,9 +74,9 @@ var registerWebAPITemplate = template.Must(
|
||||
</head>
|
||||
<body>
|
||||
<h1>headscale</h1>
|
||||
<h2>Machine registration</h2>
|
||||
<h2>Node registration</h2>
|
||||
<p>
|
||||
Run the command below in the headscale server to add this machine to your network:
|
||||
Run the command below in the headscale server to add this node to your network:
|
||||
</p>
|
||||
<pre><code>headscale nodes register --user USERNAME --key {{.Key}}</code></pre>
|
||||
</body>
|
||||
|
@@ -9,13 +9,13 @@ import (
|
||||
|
||||
func (h *Headscale) generateMapResponse(
|
||||
mapRequest tailcfg.MapRequest,
|
||||
machine *Machine,
|
||||
node *Node,
|
||||
) (*tailcfg.MapResponse, error) {
|
||||
log.Trace().
|
||||
Str("func", "generateMapResponse").
|
||||
Str("machine", mapRequest.Hostinfo.Hostname).
|
||||
Str("node", mapRequest.Hostinfo.Hostname).
|
||||
Msg("Creating Map response")
|
||||
node, err := h.toNode(*machine, h.cfg.BaseDomain, h.cfg.DNSConfig)
|
||||
tailNode, err := h.toNode(*node, h.cfg.BaseDomain, h.cfg.DNSConfig)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
@@ -26,7 +26,7 @@ func (h *Headscale) generateMapResponse(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
peers, err := h.getValidPeers(machine)
|
||||
peers, err := h.getValidPeers(node)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
@@ -37,7 +37,7 @@ func (h *Headscale) generateMapResponse(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
profiles := h.getMapResponseUserProfiles(*machine, peers)
|
||||
profiles := h.getMapResponseUserProfiles(*node, peers)
|
||||
|
||||
nodePeers, err := h.toNodes(peers, h.cfg.BaseDomain, h.cfg.DNSConfig)
|
||||
if err != nil {
|
||||
@@ -53,7 +53,7 @@ func (h *Headscale) generateMapResponse(
|
||||
dnsConfig := getMapResponseDNSConfig(
|
||||
h.cfg.DNSConfig,
|
||||
h.cfg.BaseDomain,
|
||||
*machine,
|
||||
*node,
|
||||
peers,
|
||||
)
|
||||
|
||||
@@ -61,7 +61,7 @@ func (h *Headscale) generateMapResponse(
|
||||
|
||||
resp := tailcfg.MapResponse{
|
||||
KeepAlive: false,
|
||||
Node: node,
|
||||
Node: tailNode,
|
||||
|
||||
// TODO: Only send if updated
|
||||
DERPMap: h.DERPMap,
|
||||
@@ -105,7 +105,7 @@ func (h *Headscale) generateMapResponse(
|
||||
|
||||
log.Trace().
|
||||
Str("func", "generateMapResponse").
|
||||
Str("machine", mapRequest.Hostinfo.Hostname).
|
||||
Str("node", mapRequest.Hostinfo.Hostname).
|
||||
// Interface("payload", resp).
|
||||
Msgf("Generated map response: %s", tailMapResponseToString(resp))
|
||||
|
||||
|
57
app.go
57
app.go
@@ -87,7 +87,7 @@ type Headscale struct {
|
||||
aclPolicy *ACLPolicy
|
||||
aclRules []tailcfg.FilterRule
|
||||
aclPeerCacheMapRW sync.RWMutex
|
||||
aclPeerCacheMap map[string]map[string]struct{}
|
||||
aclPeerCacheMap map[string][]string
|
||||
sshPolicy *tailcfg.SSHPolicy
|
||||
|
||||
lastStateChange *xsync.MapOf[string, time.Time]
|
||||
@@ -211,7 +211,7 @@ func (h *Headscale) redirect(w http.ResponseWriter, req *http.Request) {
|
||||
http.Redirect(w, req, target, http.StatusFound)
|
||||
}
|
||||
|
||||
// expireEphemeralNodes deletes ephemeral machine records that have not been
|
||||
// expireEphemeralNodes deletes ephemeral node records that have not been
|
||||
// seen for longer than h.cfg.EphemeralNodeInactivityTimeout.
|
||||
func (h *Headscale) expireEphemeralNodes(milliSeconds int64) {
|
||||
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
|
||||
@@ -220,12 +220,12 @@ func (h *Headscale) expireEphemeralNodes(milliSeconds int64) {
|
||||
}
|
||||
}
|
||||
|
||||
// expireExpiredMachines expires machines that have an explicit expiry set
|
||||
// expireExpiredNodes expires node that have an explicit expiry set
|
||||
// after that expiry time has passed.
|
||||
func (h *Headscale) expireExpiredMachines(milliSeconds int64) {
|
||||
func (h *Headscale) expireExpiredNodes(milliSeconds int64) {
|
||||
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
|
||||
for range ticker.C {
|
||||
h.expireExpiredMachinesWorker()
|
||||
h.expireExpiredNodesWorker()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -248,32 +248,32 @@ func (h *Headscale) expireEphemeralNodesWorker() {
|
||||
}
|
||||
|
||||
for _, user := range users {
|
||||
machines, err := h.ListMachinesByUser(user.Name)
|
||||
nodes, err := h.ListNodesByUser(user.Name)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("user", user.Name).
|
||||
Msg("Error listing machines in user")
|
||||
Msg("Error listing nodes in user")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
expiredFound := false
|
||||
for _, machine := range machines {
|
||||
if machine.isEphemeral() && machine.LastSeen != nil &&
|
||||
for _, node := range nodes {
|
||||
if node.isEphemeral() && node.LastSeen != nil &&
|
||||
time.Now().
|
||||
After(machine.LastSeen.Add(h.cfg.EphemeralNodeInactivityTimeout)) {
|
||||
After(node.LastSeen.Add(h.cfg.EphemeralNodeInactivityTimeout)) {
|
||||
expiredFound = true
|
||||
log.Info().
|
||||
Str("machine", machine.Hostname).
|
||||
Str("node", node.Hostname).
|
||||
Msg("Ephemeral client removed from database")
|
||||
|
||||
err = h.db.Unscoped().Delete(machine).Error
|
||||
err = h.db.Unscoped().Delete(node).Error
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("machine", machine.Hostname).
|
||||
Msg("🤮 Cannot delete ephemeral machine from the database")
|
||||
Str("node", node.Hostname).
|
||||
Msg("Cannot delete ephemeral node from the database")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -284,7 +284,7 @@ func (h *Headscale) expireEphemeralNodesWorker() {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) expireExpiredMachinesWorker() {
|
||||
func (h *Headscale) expireExpiredNodesWorker() {
|
||||
users, err := h.ListUsers()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error listing users")
|
||||
@@ -293,34 +293,34 @@ func (h *Headscale) expireExpiredMachinesWorker() {
|
||||
}
|
||||
|
||||
for _, user := range users {
|
||||
machines, err := h.ListMachinesByUser(user.Name)
|
||||
nodes, err := h.ListNodesByUser(user.Name)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("user", user.Name).
|
||||
Msg("Error listing machines in user")
|
||||
Msg("Error listing nodes in user")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
expiredFound := false
|
||||
for index, machine := range machines {
|
||||
if machine.isExpired() &&
|
||||
machine.Expiry.After(h.getLastStateChange(user)) {
|
||||
for index, node := range nodes {
|
||||
if node.isExpired() &&
|
||||
node.Expiry.After(h.getLastStateChange(user)) {
|
||||
expiredFound = true
|
||||
|
||||
err := h.ExpireMachine(&machines[index])
|
||||
err := h.ExpireNode(&nodes[index])
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("machine", machine.Hostname).
|
||||
Str("name", machine.GivenName).
|
||||
Msg("🤮 Cannot expire machine")
|
||||
Str("node", node.Hostname).
|
||||
Str("name", node.GivenName).
|
||||
Msg("Cannot expire node")
|
||||
} else {
|
||||
log.Info().
|
||||
Str("machine", machine.Hostname).
|
||||
Str("name", machine.GivenName).
|
||||
Msg("Machine successfully expired")
|
||||
Str("node", node.Hostname).
|
||||
Str("name", node.GivenName).
|
||||
Msg("Node successfully expired")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -552,7 +552,7 @@ func (h *Headscale) Serve() error {
|
||||
}
|
||||
|
||||
go h.expireEphemeralNodes(updateInterval)
|
||||
go h.expireExpiredMachines(updateInterval)
|
||||
go h.expireExpiredNodes(updateInterval)
|
||||
|
||||
go h.failoverSubnetRoutes(updateInterval)
|
||||
|
||||
@@ -820,7 +820,6 @@ func (h *Headscale) Serve() error {
|
||||
|
||||
// And we're done:
|
||||
cancel()
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -76,6 +76,12 @@ jobs:
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: pprof
|
||||
path: "control_logs/*.pprof.tar"
|
||||
`),
|
||||
)
|
||||
)
|
||||
|
@@ -57,7 +57,7 @@ var debugCmd = &cobra.Command{
|
||||
|
||||
var createNodeCmd = &cobra.Command{
|
||||
Use: "create-node",
|
||||
Short: "Create a node (machine) that can be registered with `nodes register <>` command",
|
||||
Short: "Create a node that can be registered with `nodes register <>` command",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
@@ -83,7 +83,7 @@ var createNodeCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
machineKey, err := cmd.Flags().GetString("key")
|
||||
nodeKey, err := cmd.Flags().GetString("key")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
@@ -93,7 +93,7 @@ var createNodeCmd = &cobra.Command{
|
||||
|
||||
return
|
||||
}
|
||||
if !headscale.NodePublicKeyRegex.Match([]byte(machineKey)) {
|
||||
if !headscale.NodePublicKeyRegex.Match([]byte(nodeKey)) {
|
||||
err = errPreAuthKeyMalformed
|
||||
ErrorOutput(
|
||||
err,
|
||||
@@ -115,24 +115,24 @@ var createNodeCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
request := &v1.DebugCreateMachineRequest{
|
||||
Key: machineKey,
|
||||
request := &v1.DebugCreateNodeRequest{
|
||||
Key: nodeKey,
|
||||
Name: name,
|
||||
User: user,
|
||||
Routes: routes,
|
||||
}
|
||||
|
||||
response, err := client.DebugCreateMachine(ctx, request)
|
||||
response, err := client.DebugCreateNode(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot create machine: %s", status.Convert(err).Message()),
|
||||
fmt.Sprintf("Cannot create node: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Machine created", output)
|
||||
SuccessOutput(response.Node, "Node created", output)
|
||||
},
|
||||
}
|
||||
|
@@ -107,7 +107,7 @@ var nodeCmd = &cobra.Command{
|
||||
|
||||
var registerNodeCmd = &cobra.Command{
|
||||
Use: "register",
|
||||
Short: "Registers a machine to your network",
|
||||
Short: "Registers a node to your network",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
user, err := cmd.Flags().GetString("user")
|
||||
@@ -132,12 +132,12 @@ var registerNodeCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
request := &v1.RegisterMachineRequest{
|
||||
request := &v1.RegisterNodeRequest{
|
||||
Key: machineKey,
|
||||
User: user,
|
||||
}
|
||||
|
||||
response, err := client.RegisterMachine(ctx, request)
|
||||
response, err := client.RegisterNode(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
@@ -152,8 +152,8 @@ var registerNodeCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
SuccessOutput(
|
||||
response.Machine,
|
||||
fmt.Sprintf("Machine %s registered", response.Machine.GivenName), output)
|
||||
response.Node,
|
||||
fmt.Sprintf("Node %s registered", response.Node.GivenName), output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -180,11 +180,11 @@ var listNodesCmd = &cobra.Command{
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ListMachinesRequest{
|
||||
request := &v1.ListNodesRequest{
|
||||
User: user,
|
||||
}
|
||||
|
||||
response, err := client.ListMachines(ctx, request)
|
||||
response, err := client.ListNodes(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
@@ -196,12 +196,12 @@ var listNodesCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.Machines, "", output)
|
||||
SuccessOutput(response.Nodes, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
tableData, err := nodesToPtables(user, showTags, response.Machines)
|
||||
tableData, err := nodesToPtables(user, showTags, response.Nodes)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
|
||||
@@ -244,11 +244,11 @@ var expireNodeCmd = &cobra.Command{
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ExpireMachineRequest{
|
||||
MachineId: identifier,
|
||||
request := &v1.ExpireNodeRequest{
|
||||
NodeId: identifier,
|
||||
}
|
||||
|
||||
response, err := client.ExpireMachine(ctx, request)
|
||||
response, err := client.ExpireNode(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
@@ -262,7 +262,7 @@ var expireNodeCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Machine expired", output)
|
||||
SuccessOutput(response.Node, "Node expired", output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -291,12 +291,12 @@ var renameNodeCmd = &cobra.Command{
|
||||
if len(args) > 0 {
|
||||
newName = args[0]
|
||||
}
|
||||
request := &v1.RenameMachineRequest{
|
||||
MachineId: identifier,
|
||||
NewName: newName,
|
||||
request := &v1.RenameNodeRequest{
|
||||
NodeId: identifier,
|
||||
NewName: newName,
|
||||
}
|
||||
|
||||
response, err := client.RenameMachine(ctx, request)
|
||||
response, err := client.RenameNode(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
@@ -310,7 +310,7 @@ var renameNodeCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Machine renamed", output)
|
||||
SuccessOutput(response.Node, "Node renamed", output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -336,11 +336,11 @@ var deleteNodeCmd = &cobra.Command{
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
getRequest := &v1.GetMachineRequest{
|
||||
MachineId: identifier,
|
||||
getRequest := &v1.GetNodeRequest{
|
||||
NodeId: identifier,
|
||||
}
|
||||
|
||||
getResponse, err := client.GetMachine(ctx, getRequest)
|
||||
getResponse, err := client.GetNode(ctx, getRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
@@ -354,8 +354,8 @@ var deleteNodeCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
deleteRequest := &v1.DeleteMachineRequest{
|
||||
MachineId: identifier,
|
||||
deleteRequest := &v1.DeleteNodeRequest{
|
||||
NodeId: identifier,
|
||||
}
|
||||
|
||||
confirm := false
|
||||
@@ -364,7 +364,7 @@ var deleteNodeCmd = &cobra.Command{
|
||||
prompt := &survey.Confirm{
|
||||
Message: fmt.Sprintf(
|
||||
"Do you want to remove the node %s?",
|
||||
getResponse.GetMachine().Name,
|
||||
getResponse.GetNode().Name,
|
||||
),
|
||||
}
|
||||
err = survey.AskOne(prompt, &confirm)
|
||||
@@ -374,7 +374,7 @@ var deleteNodeCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if confirm || force {
|
||||
response, err := client.DeleteMachine(ctx, deleteRequest)
|
||||
response, err := client.DeleteNode(ctx, deleteRequest)
|
||||
if output != "" {
|
||||
SuccessOutput(response, "", output)
|
||||
|
||||
@@ -436,11 +436,11 @@ var moveNodeCmd = &cobra.Command{
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
getRequest := &v1.GetMachineRequest{
|
||||
MachineId: identifier,
|
||||
getRequest := &v1.GetNodeRequest{
|
||||
NodeId: identifier,
|
||||
}
|
||||
|
||||
_, err = client.GetMachine(ctx, getRequest)
|
||||
_, err = client.GetNode(ctx, getRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
@@ -454,12 +454,12 @@ var moveNodeCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
moveRequest := &v1.MoveMachineRequest{
|
||||
MachineId: identifier,
|
||||
User: user,
|
||||
moveRequest := &v1.MoveNodeRequest{
|
||||
NodeId: identifier,
|
||||
User: user,
|
||||
}
|
||||
|
||||
moveResponse, err := client.MoveMachine(ctx, moveRequest)
|
||||
moveResponse, err := client.MoveNode(ctx, moveRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
@@ -473,14 +473,14 @@ var moveNodeCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(moveResponse.Machine, "Node moved to another user", output)
|
||||
SuccessOutput(moveResponse.Node, "Node moved to another user", output)
|
||||
},
|
||||
}
|
||||
|
||||
func nodesToPtables(
|
||||
currentUser string,
|
||||
showTags bool,
|
||||
machines []*v1.Machine,
|
||||
nodes []*v1.Node,
|
||||
) (pterm.TableData, error) {
|
||||
tableHeader := []string{
|
||||
"ID",
|
||||
@@ -505,23 +505,23 @@ func nodesToPtables(
|
||||
}
|
||||
tableData := pterm.TableData{tableHeader}
|
||||
|
||||
for _, machine := range machines {
|
||||
for _, node := range nodes {
|
||||
var ephemeral bool
|
||||
if machine.PreAuthKey != nil && machine.PreAuthKey.Ephemeral {
|
||||
if node.PreAuthKey != nil && node.PreAuthKey.Ephemeral {
|
||||
ephemeral = true
|
||||
}
|
||||
|
||||
var lastSeen time.Time
|
||||
var lastSeenTime string
|
||||
if machine.LastSeen != nil {
|
||||
lastSeen = machine.LastSeen.AsTime()
|
||||
if node.LastSeen != nil {
|
||||
lastSeen = node.LastSeen.AsTime()
|
||||
lastSeenTime = lastSeen.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
|
||||
var expiry time.Time
|
||||
var expiryTime string
|
||||
if machine.Expiry != nil {
|
||||
expiry = machine.Expiry.AsTime()
|
||||
if node.Expiry != nil {
|
||||
expiry = node.Expiry.AsTime()
|
||||
expiryTime = expiry.Format("2006-01-02 15:04:05")
|
||||
} else {
|
||||
expiryTime = "N/A"
|
||||
@@ -529,7 +529,7 @@ func nodesToPtables(
|
||||
|
||||
var machineKey key.MachinePublic
|
||||
err := machineKey.UnmarshalText(
|
||||
[]byte(headscale.MachinePublicKeyEnsurePrefix(machine.MachineKey)),
|
||||
[]byte(headscale.MachinePublicKeyEnsurePrefix(node.MachineKey)),
|
||||
)
|
||||
if err != nil {
|
||||
machineKey = key.MachinePublic{}
|
||||
@@ -537,14 +537,14 @@ func nodesToPtables(
|
||||
|
||||
var nodeKey key.NodePublic
|
||||
err = nodeKey.UnmarshalText(
|
||||
[]byte(headscale.NodePublicKeyEnsurePrefix(machine.NodeKey)),
|
||||
[]byte(headscale.NodePublicKeyEnsurePrefix(node.NodeKey)),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var online string
|
||||
if machine.Online {
|
||||
if node.Online {
|
||||
online = pterm.LightGreen("online")
|
||||
} else {
|
||||
online = pterm.LightRed("offline")
|
||||
@@ -558,36 +558,36 @@ func nodesToPtables(
|
||||
}
|
||||
|
||||
var forcedTags string
|
||||
for _, tag := range machine.ForcedTags {
|
||||
for _, tag := range node.ForcedTags {
|
||||
forcedTags += "," + tag
|
||||
}
|
||||
forcedTags = strings.TrimLeft(forcedTags, ",")
|
||||
var invalidTags string
|
||||
for _, tag := range machine.InvalidTags {
|
||||
if !contains(machine.ForcedTags, tag) {
|
||||
for _, tag := range node.InvalidTags {
|
||||
if !contains(node.ForcedTags, tag) {
|
||||
invalidTags += "," + pterm.LightRed(tag)
|
||||
}
|
||||
}
|
||||
invalidTags = strings.TrimLeft(invalidTags, ",")
|
||||
var validTags string
|
||||
for _, tag := range machine.ValidTags {
|
||||
if !contains(machine.ForcedTags, tag) {
|
||||
for _, tag := range node.ValidTags {
|
||||
if !contains(node.ForcedTags, tag) {
|
||||
validTags += "," + pterm.LightGreen(tag)
|
||||
}
|
||||
}
|
||||
validTags = strings.TrimLeft(validTags, ",")
|
||||
|
||||
var user string
|
||||
if currentUser == "" || (currentUser == machine.User.Name) {
|
||||
user = pterm.LightMagenta(machine.User.Name)
|
||||
if currentUser == "" || (currentUser == node.User.Name) {
|
||||
user = pterm.LightMagenta(node.User.Name)
|
||||
} else {
|
||||
// Shared into this user
|
||||
user = pterm.LightYellow(machine.User.Name)
|
||||
user = pterm.LightYellow(node.User.Name)
|
||||
}
|
||||
|
||||
var IPV4Address string
|
||||
var IPV6Address string
|
||||
for _, addr := range machine.IpAddresses {
|
||||
for _, addr := range node.IpAddresses {
|
||||
if netip.MustParseAddr(addr).Is4() {
|
||||
IPV4Address = addr
|
||||
} else {
|
||||
@@ -596,9 +596,9 @@ func nodesToPtables(
|
||||
}
|
||||
|
||||
nodeData := []string{
|
||||
strconv.FormatUint(machine.Id, headscale.Base10),
|
||||
machine.Name,
|
||||
machine.GetGivenName(),
|
||||
strconv.FormatUint(node.Id, headscale.Base10),
|
||||
node.Name,
|
||||
node.GetGivenName(),
|
||||
machineKey.ShortString(),
|
||||
nodeKey.ShortString(),
|
||||
user,
|
||||
@@ -655,8 +655,8 @@ var tagCmd = &cobra.Command{
|
||||
|
||||
// Sending tags to machine
|
||||
request := &v1.SetTagsRequest{
|
||||
MachineId: identifier,
|
||||
Tags: tagsToSet,
|
||||
NodeId: identifier,
|
||||
Tags: tagsToSet,
|
||||
}
|
||||
resp, err := client.SetTags(ctx, request)
|
||||
if err != nil {
|
||||
@@ -671,8 +671,8 @@ var tagCmd = &cobra.Command{
|
||||
|
||||
if resp != nil {
|
||||
SuccessOutput(
|
||||
resp.GetMachine(),
|
||||
"Machine updated",
|
||||
resp.GetNode(),
|
||||
"Node updated",
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
@@ -57,11 +57,11 @@ var listRoutesCmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
machineID, err := cmd.Flags().GetUint64("identifier")
|
||||
nodeID, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine id from flag: %s", err),
|
||||
fmt.Sprintf("Error getting node id from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
@@ -74,7 +74,7 @@ var listRoutesCmd = &cobra.Command{
|
||||
|
||||
var routes []*v1.Route
|
||||
|
||||
if machineID == 0 {
|
||||
if nodeID == 0 {
|
||||
response, err := client.GetRoutes(ctx, &v1.GetRoutesRequest{})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
@@ -94,13 +94,13 @@ var listRoutesCmd = &cobra.Command{
|
||||
|
||||
routes = response.Routes
|
||||
} else {
|
||||
response, err := client.GetMachineRoutes(ctx, &v1.GetMachineRoutesRequest{
|
||||
MachineId: machineID,
|
||||
response, err := client.GetNodeRoutes(ctx, &v1.GetNodeRoutesRequest{
|
||||
NodeId: nodeID,
|
||||
})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot get routes for machine %d: %s", machineID, status.Convert(err).Message()),
|
||||
fmt.Sprintf("Cannot get routes for node %d: %s", nodeID, status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
@@ -147,7 +147,7 @@ var enableRouteCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine id from flag: %s", err),
|
||||
fmt.Sprintf("Error getting node id from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
@@ -190,7 +190,7 @@ var disableRouteCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine id from flag: %s", err),
|
||||
fmt.Sprintf("Error getting node id from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
@@ -233,7 +233,7 @@ var deleteRouteCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine id from flag: %s", err),
|
||||
fmt.Sprintf("Error getting node id from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
@@ -267,7 +267,7 @@ var deleteRouteCmd = &cobra.Command{
|
||||
|
||||
// routesToPtables converts the list of routes to a nice table.
|
||||
func routesToPtables(routes []*v1.Route) pterm.TableData {
|
||||
tableData := pterm.TableData{{"ID", "Machine", "Prefix", "Advertised", "Enabled", "Primary"}}
|
||||
tableData := pterm.TableData{{"ID", "Node", "Prefix", "Advertised", "Enabled", "Primary"}}
|
||||
|
||||
for _, route := range routes {
|
||||
var isPrimaryStr string
|
||||
@@ -286,7 +286,7 @@ func routesToPtables(routes []*v1.Route) pterm.TableData {
|
||||
tableData = append(tableData,
|
||||
[]string{
|
||||
strconv.FormatUint(route.Id, Base10),
|
||||
route.Machine.GivenName,
|
||||
route.Node.GivenName,
|
||||
route.Prefix,
|
||||
strconv.FormatBool(route.Advertised),
|
||||
strconv.FormatBool(route.Enabled),
|
||||
|
@@ -6,11 +6,25 @@ import (
|
||||
|
||||
"github.com/efekarakus/termcolor"
|
||||
"github.com/juanfont/headscale/cmd/headscale/cli"
|
||||
"github.com/pkg/profile"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if _, enableProfile := os.LookupEnv("HEADSCALE_PROFILING_ENABLED"); enableProfile {
|
||||
if profilePath, ok := os.LookupEnv("HEADSCALE_PROFILING_PATH"); ok {
|
||||
err := os.MkdirAll(profilePath, os.ModePerm)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("failed to create profiling directory")
|
||||
}
|
||||
|
||||
defer profile.Start(profile.ProfilePath(profilePath)).Stop()
|
||||
} else {
|
||||
defer profile.Start().Stop()
|
||||
}
|
||||
}
|
||||
|
||||
var colors bool
|
||||
switch l := termcolor.SupportLevel(os.Stderr); l {
|
||||
case termcolor.Level16M:
|
||||
|
@@ -58,7 +58,7 @@ func (*Suite) TestConfigFileLoading(c *check.C) {
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
|
||||
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite3")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "./db.sqlite")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "/var/lib/headscale/db.sqlite")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
@@ -101,7 +101,7 @@ func (*Suite) TestConfigLoading(c *check.C) {
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
|
||||
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite3")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "./db.sqlite")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "/var/lib/headscale/db.sqlite")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
|
@@ -44,9 +44,7 @@ grpc_allow_insecure: false
|
||||
# and Tailscale clients.
|
||||
# The private key file will be autogenerated if it's missing.
|
||||
#
|
||||
# For production:
|
||||
# /var/lib/headscale/private.key
|
||||
private_key_path: ./private.key
|
||||
private_key_path: /var/lib/headscale/private.key
|
||||
|
||||
# The Noise section includes specific configuration for the
|
||||
# TS2021 Noise protocol
|
||||
@@ -55,10 +53,7 @@ noise:
|
||||
# traffic between headscale and Tailscale clients when
|
||||
# using the new Noise-based protocol. It must be different
|
||||
# from the legacy private key.
|
||||
#
|
||||
# For production:
|
||||
# private_key_path: /var/lib/headscale/noise_private.key
|
||||
private_key_path: ./noise_private.key
|
||||
private_key_path: /var/lib/headscale/noise_private.key
|
||||
|
||||
# List of IP prefixes to allocate tailaddresses from.
|
||||
# Each prefix consists of either an IPv4 or IPv6 address,
|
||||
@@ -137,8 +132,7 @@ node_update_check_interval: 10s
|
||||
db_type: sqlite3
|
||||
|
||||
# For production:
|
||||
# db_path: /var/lib/headscale/db.sqlite
|
||||
db_path: ./db.sqlite
|
||||
db_path: /var/lib/headscale/db.sqlite
|
||||
|
||||
# # Postgres config
|
||||
# If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank.
|
||||
@@ -172,8 +166,7 @@ tls_letsencrypt_hostname: ""
|
||||
# Path to store certificates and metadata needed by
|
||||
# letsencrypt
|
||||
# For production:
|
||||
# tls_letsencrypt_cache_dir: /var/lib/headscale/cache
|
||||
tls_letsencrypt_cache_dir: ./cache
|
||||
tls_letsencrypt_cache_dir: /var/lib/headscale/cache
|
||||
|
||||
# Type of ACME challenge to use, currently supported types:
|
||||
# HTTP-01 or TLS-ALPN-01
|
||||
@@ -263,8 +256,7 @@ dns_config:
|
||||
|
||||
# Unix socket used for the CLI to connect without authentication
|
||||
# Note: for production you will want to set this to something like:
|
||||
# unix_socket: /var/run/headscale.sock
|
||||
unix_socket: ./headscale.sock
|
||||
unix_socket: /var/run/headscale/headscale.sock
|
||||
unix_socket_permission: "0770"
|
||||
#
|
||||
# headscale supports experimental OpenID connect support,
|
||||
|
96
db.go
96
db.go
@@ -43,49 +43,53 @@ func (h *Headscale) initDB() error {
|
||||
|
||||
_ = db.Migrator().RenameTable("namespaces", "users")
|
||||
|
||||
// the big rename from Machine to Node
|
||||
_ = db.Migrator().RenameTable("machines", "nodes")
|
||||
_ = db.Migrator().RenameColumn(&Route{}, "machine_id", "node_id")
|
||||
|
||||
err = db.AutoMigrate(&User{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_ = db.Migrator().RenameColumn(&Machine{}, "namespace_id", "user_id")
|
||||
_ = db.Migrator().RenameColumn(&Node{}, "namespace_id", "user_id")
|
||||
_ = db.Migrator().RenameColumn(&PreAuthKey{}, "namespace_id", "user_id")
|
||||
|
||||
_ = db.Migrator().RenameColumn(&Machine{}, "ip_address", "ip_addresses")
|
||||
_ = db.Migrator().RenameColumn(&Machine{}, "name", "hostname")
|
||||
_ = db.Migrator().RenameColumn(&Node{}, "ip_address", "ip_addresses")
|
||||
_ = db.Migrator().RenameColumn(&Node{}, "name", "hostname")
|
||||
|
||||
// GivenName is used as the primary source of DNS names, make sure
|
||||
// the field is populated and normalized if it was not when the
|
||||
// machine was registered.
|
||||
_ = db.Migrator().RenameColumn(&Machine{}, "nickname", "given_name")
|
||||
// node was registered.
|
||||
_ = db.Migrator().RenameColumn(&Node{}, "nickname", "given_name")
|
||||
|
||||
// If the Machine table has a column for registered,
|
||||
// If the Node table has a column for registered,
|
||||
// find all occourences of "false" and drop them. Then
|
||||
// remove the column.
|
||||
if db.Migrator().HasColumn(&Machine{}, "registered") {
|
||||
if db.Migrator().HasColumn(&Node{}, "registered") {
|
||||
log.Info().
|
||||
Msg(`Database has legacy "registered" column in machine, removing...`)
|
||||
Msg(`Database has legacy "registered" column in node, removing...`)
|
||||
|
||||
machines := Machines{}
|
||||
if err := h.db.Not("registered").Find(&machines).Error; err != nil {
|
||||
nodes := Nodes{}
|
||||
if err := h.db.Not("registered").Find(&nodes).Error; err != nil {
|
||||
log.Error().Err(err).Msg("Error accessing db")
|
||||
}
|
||||
|
||||
for _, machine := range machines {
|
||||
for _, node := range nodes {
|
||||
log.Info().
|
||||
Str("machine", machine.Hostname).
|
||||
Str("machine_key", machine.MachineKey).
|
||||
Msg("Deleting unregistered machine")
|
||||
if err := h.db.Delete(&Machine{}, machine.ID).Error; err != nil {
|
||||
Str("node", node.Hostname).
|
||||
Str("machine_key", node.MachineKey).
|
||||
Msg("Deleting unregistered node")
|
||||
if err := h.db.Delete(&Node{}, node.ID).Error; err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("machine", machine.Hostname).
|
||||
Str("machine_key", machine.MachineKey).
|
||||
Msg("Error deleting unregistered machine")
|
||||
Str("node", node.Hostname).
|
||||
Str("machine_key", node.MachineKey).
|
||||
Msg("Error deleting unregistered node")
|
||||
}
|
||||
}
|
||||
|
||||
err := db.Migrator().DropColumn(&Machine{}, "registered")
|
||||
err := db.Migrator().DropColumn(&Node{}, "registered")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error dropping registered column")
|
||||
}
|
||||
@@ -96,21 +100,21 @@ func (h *Headscale) initDB() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if db.Migrator().HasColumn(&Machine{}, "enabled_routes") {
|
||||
log.Info().Msgf("Database has legacy enabled_routes column in machine, migrating...")
|
||||
if db.Migrator().HasColumn(&Node{}, "enabled_routes") {
|
||||
log.Info().Msgf("Database has legacy enabled_routes column in node, migrating...")
|
||||
|
||||
type MachineAux struct {
|
||||
type NodeAux struct {
|
||||
ID uint64
|
||||
EnabledRoutes IPPrefixes
|
||||
}
|
||||
|
||||
machinesAux := []MachineAux{}
|
||||
err := db.Table("machines").Select("id, enabled_routes").Scan(&machinesAux).Error
|
||||
nodesAux := []NodeAux{}
|
||||
err := db.Table("nodes").Select("id, enabled_routes").Scan(&nodesAux).Error
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("Error accessing db")
|
||||
}
|
||||
for _, machine := range machinesAux {
|
||||
for _, prefix := range machine.EnabledRoutes {
|
||||
for _, node := range nodesAux {
|
||||
for _, prefix := range node.EnabledRoutes {
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
@@ -120,8 +124,8 @@ func (h *Headscale) initDB() error {
|
||||
continue
|
||||
}
|
||||
|
||||
err = db.Preload("Machine").
|
||||
Where("machine_id = ? AND prefix = ?", machine.ID, IPPrefix(prefix)).
|
||||
err = db.Preload("Node").
|
||||
Where("node_id = ? AND prefix = ?", node.ID, IPPrefix(prefix)).
|
||||
First(&Route{}).
|
||||
Error
|
||||
if err == nil {
|
||||
@@ -133,7 +137,7 @@ func (h *Headscale) initDB() error {
|
||||
}
|
||||
|
||||
route := Route{
|
||||
MachineID: machine.ID,
|
||||
NodeID: node.ID,
|
||||
Advertised: true,
|
||||
Enabled: true,
|
||||
Prefix: IPPrefix(prefix),
|
||||
@@ -142,51 +146,51 @@ func (h *Headscale) initDB() error {
|
||||
log.Error().Err(err).Msg("Error creating route")
|
||||
} else {
|
||||
log.Info().
|
||||
Uint64("machine_id", route.MachineID).
|
||||
Uint64("node_id", route.NodeID).
|
||||
Str("prefix", prefix.String()).
|
||||
Msg("Route migrated")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = db.Migrator().DropColumn(&Machine{}, "enabled_routes")
|
||||
err = db.Migrator().DropColumn(&Node{}, "enabled_routes")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error dropping enabled_routes column")
|
||||
}
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&Machine{})
|
||||
err = db.AutoMigrate(&Node{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if db.Migrator().HasColumn(&Machine{}, "given_name") {
|
||||
machines := Machines{}
|
||||
if err := h.db.Find(&machines).Error; err != nil {
|
||||
if db.Migrator().HasColumn(&Node{}, "given_name") {
|
||||
nodes := Nodes{}
|
||||
if err := h.db.Find(&nodes).Error; err != nil {
|
||||
log.Error().Err(err).Msg("Error accessing db")
|
||||
}
|
||||
|
||||
for item, machine := range machines {
|
||||
if machine.GivenName == "" {
|
||||
for item, node := range nodes {
|
||||
if node.GivenName == "" {
|
||||
normalizedHostname, err := NormalizeToFQDNRules(
|
||||
machine.Hostname,
|
||||
node.Hostname,
|
||||
h.cfg.OIDC.StripEmaildomain,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("hostname", machine.Hostname).
|
||||
Str("hostname", node.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed to normalize machine hostname in DB migration")
|
||||
Msg("Failed to normalize node hostname in DB migration")
|
||||
}
|
||||
|
||||
err = h.RenameMachine(&machines[item], normalizedHostname)
|
||||
err = h.RenameNode(&nodes[item], normalizedHostname)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("hostname", machine.Hostname).
|
||||
Str("hostname", node.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed to save normalized machine name in DB migration")
|
||||
Msg("Failed to save normalized node name in DB migration")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -324,7 +328,7 @@ func (hi *HostInfo) Scan(destination interface{}) error {
|
||||
return json.Unmarshal([]byte(value), hi)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("%w: unexpected data type %T", ErrMachineAddressesInvalid, destination)
|
||||
return fmt.Errorf("%w: unexpected data type %T", ErrNodeAddressesInvalid, destination)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -370,7 +374,7 @@ func (i *IPPrefixes) Scan(destination interface{}) error {
|
||||
return json.Unmarshal([]byte(value), i)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("%w: unexpected data type %T", ErrMachineAddressesInvalid, destination)
|
||||
return fmt.Errorf("%w: unexpected data type %T", ErrNodeAddressesInvalid, destination)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -392,7 +396,7 @@ func (i *StringList) Scan(destination interface{}) error {
|
||||
return json.Unmarshal([]byte(value), i)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("%w: unexpected data type %T", ErrMachineAddressesInvalid, destination)
|
||||
return fmt.Errorf("%w: unexpected data type %T", ErrNodeAddressesInvalid, destination)
|
||||
}
|
||||
}
|
||||
|
||||
|
22
dns.go
22
dns.go
@@ -159,22 +159,22 @@ func generateIPv6DNSRootDomain(ipPrefix netip.Prefix) []dnsname.FQDN {
|
||||
}
|
||||
|
||||
// If any nextdns DoH resolvers are present in the list of resolvers it will
|
||||
// take metadata from the machine metadata and instruct tailscale to add it
|
||||
// take metadata from the node metadata and instruct tailscale to add it
|
||||
// to the requests. This makes it possible to identify from which device the
|
||||
// requests come in the NextDNS dashboard.
|
||||
//
|
||||
// This will produce a resolver like:
|
||||
// `https://dns.nextdns.io/<nextdns-id>?device_name=node-name&device_model=linux&device_ip=100.64.0.1`
|
||||
func addNextDNSMetadata(resolvers []*dnstype.Resolver, machine Machine) {
|
||||
func addNextDNSMetadata(resolvers []*dnstype.Resolver, node Node) {
|
||||
for _, resolver := range resolvers {
|
||||
if strings.HasPrefix(resolver.Addr, nextDNSDoHPrefix) {
|
||||
attrs := url.Values{
|
||||
"device_name": []string{machine.Hostname},
|
||||
"device_model": []string{machine.HostInfo.OS},
|
||||
"device_name": []string{node.Hostname},
|
||||
"device_model": []string{node.HostInfo.OS},
|
||||
}
|
||||
|
||||
if len(machine.IPAddresses) > 0 {
|
||||
attrs.Add("device_ip", machine.IPAddresses[0].String())
|
||||
if len(node.IPAddresses) > 0 {
|
||||
attrs.Add("device_ip", node.IPAddresses[0].String())
|
||||
}
|
||||
|
||||
resolver.Addr = fmt.Sprintf("%s?%s", resolver.Addr, attrs.Encode())
|
||||
@@ -185,8 +185,8 @@ func addNextDNSMetadata(resolvers []*dnstype.Resolver, machine Machine) {
|
||||
func getMapResponseDNSConfig(
|
||||
dnsConfigOrig *tailcfg.DNSConfig,
|
||||
baseDomain string,
|
||||
machine Machine,
|
||||
peers Machines,
|
||||
node Node,
|
||||
peers Nodes,
|
||||
) *tailcfg.DNSConfig {
|
||||
var dnsConfig *tailcfg.DNSConfig = dnsConfigOrig.Clone()
|
||||
if dnsConfigOrig != nil && dnsConfigOrig.Proxied { // if MagicDNS is enabled
|
||||
@@ -195,13 +195,13 @@ func getMapResponseDNSConfig(
|
||||
dnsConfig.Domains,
|
||||
fmt.Sprintf(
|
||||
"%s.%s",
|
||||
machine.User.Name,
|
||||
node.User.Name,
|
||||
baseDomain,
|
||||
),
|
||||
)
|
||||
|
||||
userSet := mapset.NewSet[User]()
|
||||
userSet.Add(machine.User)
|
||||
userSet.Add(node.User)
|
||||
for _, p := range peers {
|
||||
userSet.Add(p.User)
|
||||
}
|
||||
@@ -213,7 +213,7 @@ func getMapResponseDNSConfig(
|
||||
dnsConfig = dnsConfigOrig
|
||||
}
|
||||
|
||||
addNextDNSMetadata(dnsConfig.Resolvers, machine)
|
||||
addNextDNSMetadata(dnsConfig.Resolvers, node)
|
||||
|
||||
return dnsConfig
|
||||
}
|
||||
|
60
dns_test.go
60
dns_test.go
@@ -157,10 +157,10 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine(userShared1.Name, "test_get_shared_nodes_1")
|
||||
_, err = app.GetNode(userShared1.Name, "test_get_shared_nodes_1")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
machineInShared1 := &Machine{
|
||||
nodesInShared1 := &Node{
|
||||
ID: 1,
|
||||
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
@@ -172,12 +172,12 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
|
||||
AuthKeyID: uint(preAuthKeyInShared1.ID),
|
||||
}
|
||||
app.db.Save(machineInShared1)
|
||||
app.db.Save(nodesInShared1)
|
||||
|
||||
_, err = app.GetMachine(userShared1.Name, machineInShared1.Hostname)
|
||||
_, err = app.GetNode(userShared1.Name, nodesInShared1.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared2 := &Machine{
|
||||
nodesInShared2 := &Node{
|
||||
ID: 2,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
@@ -189,12 +189,12 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")},
|
||||
AuthKeyID: uint(preAuthKeyInShared2.ID),
|
||||
}
|
||||
app.db.Save(machineInShared2)
|
||||
app.db.Save(nodesInShared2)
|
||||
|
||||
_, err = app.GetMachine(userShared2.Name, machineInShared2.Hostname)
|
||||
_, err = app.GetNode(userShared2.Name, nodesInShared2.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared3 := &Machine{
|
||||
nodesInShared3 := &Node{
|
||||
ID: 3,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
@@ -206,12 +206,12 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
|
||||
AuthKeyID: uint(preAuthKeyInShared3.ID),
|
||||
}
|
||||
app.db.Save(machineInShared3)
|
||||
app.db.Save(nodesInShared3)
|
||||
|
||||
_, err = app.GetMachine(userShared3.Name, machineInShared3.Hostname)
|
||||
_, err = app.GetNode(userShared3.Name, nodesInShared3.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machine2InShared1 := &Machine{
|
||||
nodes2InShared1 := &Node{
|
||||
ID: 4,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
@@ -223,7 +223,7 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.4")},
|
||||
AuthKeyID: uint(PreAuthKey2InShared1.ID),
|
||||
}
|
||||
app.db.Save(machine2InShared1)
|
||||
app.db.Save(nodes2InShared1)
|
||||
|
||||
baseDomain := "foobar.headscale.net"
|
||||
dnsConfigOrig := tailcfg.DNSConfig{
|
||||
@@ -232,14 +232,14 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
Proxied: true,
|
||||
}
|
||||
|
||||
peersOfMachineInShared1, err := app.getPeers(machineInShared1)
|
||||
peersOfNodeInShared1, err := app.getPeers(nodesInShared1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
dnsConfig := getMapResponseDNSConfig(
|
||||
&dnsConfigOrig,
|
||||
baseDomain,
|
||||
*machineInShared1,
|
||||
peersOfMachineInShared1,
|
||||
*nodesInShared1,
|
||||
peersOfNodeInShared1,
|
||||
)
|
||||
c.Assert(dnsConfig, check.NotNil)
|
||||
|
||||
@@ -304,10 +304,10 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine(userShared1.Name, "test_get_shared_nodes_1")
|
||||
_, err = app.GetNode(userShared1.Name, "test_get_shared_nodes_1")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
machineInShared1 := &Machine{
|
||||
nodesInShared1 := &Node{
|
||||
ID: 1,
|
||||
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
@@ -319,12 +319,12 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
|
||||
AuthKeyID: uint(preAuthKeyInShared1.ID),
|
||||
}
|
||||
app.db.Save(machineInShared1)
|
||||
app.db.Save(nodesInShared1)
|
||||
|
||||
_, err = app.GetMachine(userShared1.Name, machineInShared1.Hostname)
|
||||
_, err = app.GetNode(userShared1.Name, nodesInShared1.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared2 := &Machine{
|
||||
nodesInShared2 := &Node{
|
||||
ID: 2,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
@@ -336,12 +336,12 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")},
|
||||
AuthKeyID: uint(preAuthKeyInShared2.ID),
|
||||
}
|
||||
app.db.Save(machineInShared2)
|
||||
app.db.Save(nodesInShared2)
|
||||
|
||||
_, err = app.GetMachine(userShared2.Name, machineInShared2.Hostname)
|
||||
_, err = app.GetNode(userShared2.Name, nodesInShared2.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared3 := &Machine{
|
||||
nodesInShared3 := &Node{
|
||||
ID: 3,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
@@ -353,12 +353,12 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
|
||||
AuthKeyID: uint(preAuthKeyInShared3.ID),
|
||||
}
|
||||
app.db.Save(machineInShared3)
|
||||
app.db.Save(nodesInShared3)
|
||||
|
||||
_, err = app.GetMachine(userShared3.Name, machineInShared3.Hostname)
|
||||
_, err = app.GetNode(userShared3.Name, nodesInShared3.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machine2InShared1 := &Machine{
|
||||
nodes2InShared1 := &Node{
|
||||
ID: 4,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
@@ -370,7 +370,7 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.4")},
|
||||
AuthKeyID: uint(preAuthKey2InShared1.ID),
|
||||
}
|
||||
app.db.Save(machine2InShared1)
|
||||
app.db.Save(nodes2InShared1)
|
||||
|
||||
baseDomain := "foobar.headscale.net"
|
||||
dnsConfigOrig := tailcfg.DNSConfig{
|
||||
@@ -379,14 +379,14 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
Proxied: false,
|
||||
}
|
||||
|
||||
peersOfMachine1Shared1, err := app.getPeers(machineInShared1)
|
||||
peersOfNode1Shared1, err := app.getPeers(nodesInShared1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
dnsConfig := getMapResponseDNSConfig(
|
||||
&dnsConfigOrig,
|
||||
baseDomain,
|
||||
*machineInShared1,
|
||||
peersOfMachine1Shared1,
|
||||
*nodesInShared1,
|
||||
peersOfNode1Shared1,
|
||||
)
|
||||
c.Assert(dnsConfig, check.NotNil)
|
||||
c.Assert(len(dnsConfig.Routes), check.Equals, 0)
|
||||
|
@@ -1,56 +0,0 @@
|
||||
# headscale documentation
|
||||
|
||||
This page contains the official and community contributed documentation for `headscale`.
|
||||
|
||||
If you are having trouble with following the documentation or get unexpected results,
|
||||
please ask on [Discord](https://discord.gg/c84AZQhmpx) instead of opening an Issue.
|
||||
|
||||
## Official documentation
|
||||
|
||||
### How-to
|
||||
|
||||
- [Running headscale on Linux](running-headscale-linux.md)
|
||||
- [Control headscale remotely](remote-cli.md)
|
||||
- [Using a Windows client with headscale](windows-client.md)
|
||||
- [Configuring OIDC](oidc.md)
|
||||
|
||||
### References
|
||||
|
||||
- [Configuration](../config-example.yaml)
|
||||
- [Glossary](glossary.md)
|
||||
- [TLS](tls.md)
|
||||
|
||||
## Community documentation
|
||||
|
||||
Community documentation is not actively maintained by the headscale authors and is
|
||||
written by community members. It is _not_ verified by `headscale` developers.
|
||||
|
||||
**It might be outdated and it might miss necessary steps**.
|
||||
|
||||
- [Running headscale in a container](running-headscale-container.md)
|
||||
- [Running headscale on OpenBSD](running-headscale-openbsd.md)
|
||||
- [Running headscale behind a reverse proxy](reverse-proxy.md)
|
||||
- [Set Custom DNS records](dns-records.md)
|
||||
|
||||
## Misc
|
||||
|
||||
### Policy ACLs
|
||||
|
||||
Headscale implements the same policy ACLs as Tailscale.com, adapted to the self-hosted environment.
|
||||
|
||||
For instance, instead of referring to users when defining groups you must
|
||||
use users (which are the equivalent to user/logins in Tailscale.com).
|
||||
|
||||
Please check https://tailscale.com/kb/1018/acls/, and `./tests/acls/` in this repo for working examples.
|
||||
|
||||
When using ACL's the User borders are no longer applied. All machines
|
||||
whichever the User have the ability to communicate with other hosts as
|
||||
long as the ACL's permits this exchange.
|
||||
|
||||
The [ACLs](acls.md) document should help understand a fictional case of setting
|
||||
up ACLs in a small company. All concepts presented in this document could be
|
||||
applied outside of business oriented usage.
|
||||
|
||||
### Apple devices
|
||||
|
||||
An endpoint with information on how to connect your Apple devices (currently macOS only) is available at `/apple` on your running instance.
|
13
docs/acls.md
13
docs/acls.md
@@ -1,4 +1,15 @@
|
||||
# ACLs use case example
|
||||
Headscale implements the same policy ACLs as Tailscale.com, adapted to the self-hosted environment.
|
||||
|
||||
For instance, instead of referring to users when defining groups you must
|
||||
use users (which are the equivalent to user/logins in Tailscale.com).
|
||||
|
||||
Please check https://tailscale.com/kb/1018/acls/, and `./tests/acls/` in this repo for working examples.
|
||||
|
||||
When using ACL's the User borders are no longer applied. All machines
|
||||
whichever the User have the ability to communicate with other hosts as
|
||||
long as the ACL's permits this exchange.
|
||||
|
||||
## ACLs use case example
|
||||
|
||||
Let's build an example use case for a small business (It may be the place where
|
||||
ACL's are the most useful).
|
||||
|
@@ -1,5 +1,12 @@
|
||||
# Setting custom DNS records
|
||||
|
||||
!!! warning "Community documentation"
|
||||
|
||||
This page is not actively maintained by the headscale authors and is
|
||||
written by community members. It is _not_ verified by `headscale` developers.
|
||||
|
||||
**It might be outdated and it might miss necessary steps**.
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing how a user can set custom DNS records with `headscale`s magic dns.
|
||||
|
@@ -12,6 +12,11 @@ Ensure that the installed version is at least 1.38.1, as that is the first relea
|
||||
|
||||
## Configuring the headscale URL
|
||||
|
||||
!!! info "Apple devices"
|
||||
|
||||
An endpoint with information on how to connect your Apple devices
|
||||
(currently macOS only) is available at `/apple` on your running instance.
|
||||
|
||||
Ensure that the tailscale app is logged out before proceeding.
|
||||
|
||||
Go to iOS settings, scroll down past game center and tv provider to the tailscale app and select it. The headscale URL can be entered into the _"ALTERNATE COORDINATION SERVER URL"_ box.
|
||||
|
12
docs/index.md
Normal file
12
docs/index.md
Normal file
@@ -0,0 +1,12 @@
|
||||
---
|
||||
hide:
|
||||
- navigation
|
||||
- toc
|
||||
---
|
||||
|
||||
# headscale documentation
|
||||
|
||||
This site contains the official and community contributed documentation for `headscale`.
|
||||
|
||||
If you are having trouble with following the documentation or get unexpected results,
|
||||
please ask on [Discord](https://discord.gg/c84AZQhmpx) instead of opening an Issue.
|
5
docs/packaging/README.md
Normal file
5
docs/packaging/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Packaging
|
||||
|
||||
We use [nFPM](https://nfpm.goreleaser.com/) for making `.deb`, `.rpm` and `.apk`.
|
||||
|
||||
This folder contains files we need to package with these releases.
|
52
docs/packaging/headscale.systemd.service
Normal file
52
docs/packaging/headscale.systemd.service
Normal file
@@ -0,0 +1,52 @@
|
||||
[Unit]
|
||||
After=syslog.target
|
||||
After=network.target
|
||||
Description=headscale coordination server for Tailscale
|
||||
X-Restart-Triggers=/etc/headscale/config.yaml
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=headscale
|
||||
Group=headscale
|
||||
ExecStart=/usr/bin/headscale serve
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
WorkingDirectory=/var/lib/headscale
|
||||
ReadWritePaths=/var/lib/headscale /var/run
|
||||
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE CAP_CHOWN
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_CHOWN
|
||||
LockPersonality=true
|
||||
NoNewPrivileges=true
|
||||
PrivateDevices=true
|
||||
PrivateMounts=true
|
||||
PrivateTmp=true
|
||||
ProcSubset=pid
|
||||
ProtectClock=true
|
||||
ProtectControlGroups=true
|
||||
ProtectHome=true
|
||||
ProtectHome=yes
|
||||
ProtectHostname=true
|
||||
ProtectKernelLogs=true
|
||||
ProtectKernelModules=true
|
||||
ProtectKernelTunables=true
|
||||
ProtectProc=invisible
|
||||
ProtectSystem=strict
|
||||
RemoveIPC=true
|
||||
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
|
||||
RestrictNamespaces=true
|
||||
RestrictRealtime=true
|
||||
RestrictSUIDSGID=true
|
||||
RuntimeDirectory=headscale
|
||||
RuntimeDirectoryMode=0750
|
||||
StateDirectory=headscale
|
||||
StateDirectoryMode=0750
|
||||
SystemCallArchitectures=native
|
||||
SystemCallFilter=@chown
|
||||
SystemCallFilter=@system-service
|
||||
SystemCallFilter=~@privileged
|
||||
UMask=0077
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
86
docs/packaging/postinstall.sh
Normal file
86
docs/packaging/postinstall.sh
Normal file
@@ -0,0 +1,86 @@
|
||||
#!/bin/sh
|
||||
# Determine OS platform
|
||||
# shellcheck source=/dev/null
|
||||
. /etc/os-release
|
||||
|
||||
HEADSCALE_EXE="/usr/bin/headscale"
|
||||
BSD_HIER=""
|
||||
HEADSCALE_RUN_DIR="/var/run/headscale"
|
||||
HEADSCALE_USER="headscale"
|
||||
HEADSCALE_GROUP="headscale"
|
||||
|
||||
ensure_sudo() {
|
||||
if [ "$(id -u)" = "0" ]; then
|
||||
echo "Sudo permissions detected"
|
||||
else
|
||||
echo "No sudo permission detected, please run as sudo"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
ensure_headscale_path() {
|
||||
if [ ! -f "$HEADSCALE_EXE" ]; then
|
||||
echo "headscale not in default path, exiting..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
printf "Found headscale %s\n" "$HEADSCALE_EXE"
|
||||
}
|
||||
|
||||
create_headscale_user() {
|
||||
printf "PostInstall: Adding headscale user %s\n" "$HEADSCALE_USER"
|
||||
useradd -s /bin/sh -c "headscale default user" headscale
|
||||
}
|
||||
|
||||
create_headscale_group() {
|
||||
if command -V systemctl >/dev/null 2>&1; then
|
||||
printf "PostInstall: Adding headscale group %s\n" "$HEADSCALE_GROUP"
|
||||
groupadd "$HEADSCALE_GROUP"
|
||||
|
||||
printf "PostInstall: Adding headscale user %s to group %s\n" "$HEADSCALE_USER" "$HEADSCALE_GROUP"
|
||||
usermod -a -G "$HEADSCALE_GROUP" "$HEADSCALE_USER"
|
||||
fi
|
||||
|
||||
if [ "$ID" = "alpine" ]; then
|
||||
printf "PostInstall: Adding headscale group %s\n" "$HEADSCALE_GROUP"
|
||||
addgroup "$HEADSCALE_GROUP"
|
||||
|
||||
printf "PostInstall: Adding headscale user %s to group %s\n" "$HEADSCALE_USER" "$HEADSCALE_GROUP"
|
||||
addgroup "$HEADSCALE_USER" "$HEADSCALE_GROUP"
|
||||
fi
|
||||
}
|
||||
|
||||
create_run_dir() {
|
||||
printf "PostInstall: Creating headscale run directory \n"
|
||||
mkdir -p "$HEADSCALE_RUN_DIR"
|
||||
|
||||
printf "PostInstall: Modifying group ownership of headscale run directory \n"
|
||||
chown "$HEADSCALE_USER":"$HEADSCALE_GROUP" "$HEADSCALE_RUN_DIR"
|
||||
}
|
||||
|
||||
summary() {
|
||||
echo "----------------------------------------------------------------------"
|
||||
echo " headscale package has been successfully installed."
|
||||
echo ""
|
||||
echo " Please follow the next steps to start the software:"
|
||||
echo ""
|
||||
echo " sudo systemctl enable headscale"
|
||||
echo " sudo systemctl start headscale"
|
||||
echo ""
|
||||
echo " Configuration settings can be adjusted here:"
|
||||
echo " ${BSD_HIER}/etc/headscale/config.yaml"
|
||||
echo ""
|
||||
echo "----------------------------------------------------------------------"
|
||||
}
|
||||
|
||||
#
|
||||
# Main body of the script
|
||||
#
|
||||
{
|
||||
ensure_sudo
|
||||
ensure_headscale_path
|
||||
create_headscale_user
|
||||
create_headscale_group
|
||||
create_run_dir
|
||||
summary
|
||||
}
|
15
docs/packaging/postremove.sh
Normal file
15
docs/packaging/postremove.sh
Normal file
@@ -0,0 +1,15 @@
|
||||
#!/bin/sh
|
||||
# Determine OS platform
|
||||
# shellcheck source=/dev/null
|
||||
. /etc/os-release
|
||||
|
||||
if command -V systemctl >/dev/null 2>&1; then
|
||||
echo "Stop and disable headscale service"
|
||||
systemctl stop headscale >/dev/null 2>&1 || true
|
||||
systemctl disable headscale >/dev/null 2>&1 || true
|
||||
echo "Running daemon-reload"
|
||||
systemctl daemon-reload || true
|
||||
fi
|
||||
|
||||
echo "Removing run directory"
|
||||
rm -rf "/var/run/headscale.sock"
|
@@ -1,5 +1,12 @@
|
||||
# Running headscale behind a reverse proxy
|
||||
|
||||
!!! warning "Community documentation"
|
||||
|
||||
This page is not actively maintained by the headscale authors and is
|
||||
written by community members. It is _not_ verified by `headscale` developers.
|
||||
|
||||
**It might be outdated and it might miss necessary steps**.
|
||||
|
||||
Running headscale behind a reverse proxy is useful when running multiple applications on the same server, and you want to reuse the same external IP and port - usually tcp/443 for HTTPS.
|
||||
|
||||
### WebSockets
|
||||
|
@@ -1,7 +1,11 @@
|
||||
# Running headscale in a container
|
||||
|
||||
**Note:** the container documentation is maintained by the _community_ and there is no guarentee
|
||||
it is up to date, or working.
|
||||
!!! warning "Community documentation"
|
||||
|
||||
This page is not actively maintained by the headscale authors and is
|
||||
written by community members. It is _not_ verified by `headscale` developers.
|
||||
|
||||
**It might be outdated and it might miss necessary steps**.
|
||||
|
||||
## Goal
|
||||
|
||||
@@ -24,7 +28,7 @@ cd ./headscale
|
||||
touch ./config/db.sqlite
|
||||
```
|
||||
|
||||
3. **(Strongly Recommended)** Download a copy of the [example configuration](../config-example.yaml) from the [headscale repository](https://github.com/juanfont/headscale/).
|
||||
3. **(Strongly Recommended)** Download a copy of the [example configuration][config-example.yaml](https://github.com/juanfont/headscale/blob/main/config-example.yaml) from the headscale repository.
|
||||
|
||||
Using wget:
|
||||
|
||||
|
198
docs/running-headscale-linux-manual.md
Normal file
198
docs/running-headscale-linux-manual.md
Normal file
@@ -0,0 +1,198 @@
|
||||
# Running headscale on Linux
|
||||
|
||||
## Note: Outdated and "advanced"
|
||||
|
||||
This documentation is considered the "legacy"/advanced/manual version of the documentation, you most likely do not
|
||||
want to use this documentation and rather look at the distro specific documentation (TODO LINK)[].
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing a user how-to set up and run `headscale` on Linux.
|
||||
In additional to the "get up and running section", there is an optional [SystemD section](#running-headscale-in-the-background-with-systemd)
|
||||
describing how to make `headscale` run properly in a server environment.
|
||||
|
||||
## Configure and run `headscale`
|
||||
|
||||
1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases):
|
||||
|
||||
```shell
|
||||
wget --output-document=/usr/local/bin/headscale \
|
||||
https://github.com/juanfont/headscale/releases/download/v<HEADSCALE VERSION>/headscale_<HEADSCALE VERSION>_linux_<ARCH>
|
||||
```
|
||||
|
||||
2. Make `headscale` executable:
|
||||
|
||||
```shell
|
||||
chmod +x /usr/local/bin/headscale
|
||||
```
|
||||
|
||||
3. Prepare a directory to hold `headscale` configuration and the [SQLite](https://www.sqlite.org/) database:
|
||||
|
||||
```shell
|
||||
# Directory for configuration
|
||||
|
||||
mkdir -p /etc/headscale
|
||||
|
||||
# Directory for Database, and other variable data (like certificates)
|
||||
mkdir -p /var/lib/headscale
|
||||
# or if you create a headscale user:
|
||||
useradd \
|
||||
--create-home \
|
||||
--home-dir /var/lib/headscale/ \
|
||||
--system \
|
||||
--user-group \
|
||||
--shell /usr/bin/nologin \
|
||||
headscale
|
||||
```
|
||||
|
||||
4. Create an empty SQLite database:
|
||||
|
||||
```shell
|
||||
touch /var/lib/headscale/db.sqlite
|
||||
```
|
||||
|
||||
5. Create a `headscale` configuration:
|
||||
|
||||
```shell
|
||||
touch /etc/headscale/config.yaml
|
||||
```
|
||||
|
||||
**(Strongly Recommended)** Download a copy of the [example configuration][config-example.yaml](https://github.com/juanfont/headscale/blob/main/config-example.yaml) from the headscale repository.
|
||||
|
||||
6. Start the headscale server:
|
||||
|
||||
```shell
|
||||
headscale serve
|
||||
```
|
||||
|
||||
This command will start `headscale` in the current terminal session.
|
||||
|
||||
---
|
||||
|
||||
To continue the tutorial, open a new terminal and let it run in the background.
|
||||
Alternatively use terminal emulators like [tmux](https://github.com/tmux/tmux) or [screen](https://www.gnu.org/software/screen/).
|
||||
|
||||
To run `headscale` in the background, please follow the steps in the [SystemD section](#running-headscale-in-the-background-with-systemd) before continuing.
|
||||
|
||||
7. Verify `headscale` is running:
|
||||
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
|
||||
8. Create a user ([tailnet](https://tailscale.com/kb/1136/tailnet/)):
|
||||
|
||||
```shell
|
||||
headscale users create myfirstuser
|
||||
```
|
||||
|
||||
### Register a machine (normal login)
|
||||
|
||||
On a client machine, execute the `tailscale` login command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server YOUR_HEADSCALE_URL
|
||||
```
|
||||
|
||||
Register the machine:
|
||||
|
||||
```shell
|
||||
headscale --user myfirstuser nodes register --key <YOUR_MACHINE_KEY>
|
||||
```
|
||||
|
||||
### Register machine using a pre authenticated key
|
||||
|
||||
Generate a key using the command line:
|
||||
|
||||
```shell
|
||||
headscale --user myfirstuser preauthkeys create --reusable --expiration 24h
|
||||
```
|
||||
|
||||
This will return a pre-authenticated key that can be used to connect a node to `headscale` during the `tailscale` command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
## Running `headscale` in the background with SystemD
|
||||
|
||||
:warning: **Deprecated**: This part is very outdated and you should use the [pre-packaged Headscale for this](./running-headscale-linux.md
|
||||
|
||||
This section demonstrates how to run `headscale` as a service in the background with [SystemD](https://www.freedesktop.org/wiki/Software/systemd/).
|
||||
This should work on most modern Linux distributions.
|
||||
|
||||
1. Create a SystemD service configuration at `/etc/systemd/system/headscale.service` containing:
|
||||
|
||||
```systemd
|
||||
[Unit]
|
||||
Description=headscale controller
|
||||
After=syslog.target
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=headscale
|
||||
Group=headscale
|
||||
ExecStart=/usr/local/bin/headscale serve
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
# Optional security enhancements
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
WorkingDirectory=/var/lib/headscale
|
||||
ReadWritePaths=/var/lib/headscale /var/run/headscale
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
RuntimeDirectory=headscale
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
Note that when running as the headscale user ensure that, either you add your current user to the headscale group:
|
||||
|
||||
```shell
|
||||
usermod -a -G headscale current_user
|
||||
```
|
||||
|
||||
or run all headscale commands as the headscale user:
|
||||
|
||||
```shell
|
||||
su - headscale
|
||||
```
|
||||
|
||||
2. In `/etc/headscale/config.yaml`, override the default `headscale` unix socket with path that is writable by the `headscale` user or group:
|
||||
|
||||
```yaml
|
||||
unix_socket: /var/run/headscale/headscale.sock
|
||||
```
|
||||
|
||||
3. Reload SystemD to load the new configuration file:
|
||||
|
||||
```shell
|
||||
systemctl daemon-reload
|
||||
```
|
||||
|
||||
4. Enable and start the new `headscale` service:
|
||||
|
||||
```shell
|
||||
systemctl enable --now headscale
|
||||
```
|
||||
|
||||
5. Verify the headscale service:
|
||||
|
||||
```shell
|
||||
systemctl status headscale
|
||||
```
|
||||
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
|
||||
`headscale` will now run in the background and start at boot.
|
@@ -1,84 +1,65 @@
|
||||
# Running headscale on Linux
|
||||
|
||||
## Requirements
|
||||
|
||||
- Ubuntu 20.04 or newer, Debian 11 or newer.
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing a user how-to set up and run `headscale` on Linux.
|
||||
In additional to the "get up and running section", there is an optional [SystemD section](#running-headscale-in-the-background-with-systemd)
|
||||
describing how to make `headscale` run properly in a server environment.
|
||||
Get Headscale up and running.
|
||||
|
||||
## Configure and run `headscale`
|
||||
This includes running Headscale with SystemD.
|
||||
|
||||
1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases):
|
||||
## Migrating from manual install
|
||||
|
||||
If you are migrating from the old manual install, the best thing would be to remove
|
||||
the files installed by following [the guide in reverse](./running-headscale-linux-manual.md).
|
||||
|
||||
You should _not_ delete the database (`/var/headscale/db.sqlite`) and the
|
||||
configuration (`/etc/headscale/config.yaml`).
|
||||
|
||||
## Installation
|
||||
|
||||
1. Download the lastest Headscale package for your platform (`.deb` for Ubuntu and Debian) from [Headscale's releases page]():
|
||||
|
||||
```shell
|
||||
wget --output-document=/usr/local/bin/headscale \
|
||||
https://github.com/juanfont/headscale/releases/download/v<HEADSCALE VERSION>/headscale_<HEADSCALE VERSION>_linux_<ARCH>
|
||||
wget --output-document=headscale.deb \
|
||||
https://github.com/juanfont/headscale/releases/download/v<HEADSCALE VERSION>/headscale_<HEADSCALE VERSION>_linux_<ARCH>.deb
|
||||
```
|
||||
|
||||
2. Make `headscale` executable:
|
||||
2. Install Headscale:
|
||||
|
||||
```shell
|
||||
chmod +x /usr/local/bin/headscale
|
||||
sudo dpkg --install headscale.deb
|
||||
```
|
||||
|
||||
3. Prepare a directory to hold `headscale` configuration and the [SQLite](https://www.sqlite.org/) database:
|
||||
3. Enable Headscale service, this will start Headscale at boot:
|
||||
|
||||
```shell
|
||||
# Directory for configuration
|
||||
|
||||
mkdir -p /etc/headscale
|
||||
|
||||
# Directory for Database, and other variable data (like certificates)
|
||||
mkdir -p /var/lib/headscale
|
||||
# or if you create a headscale user:
|
||||
useradd \
|
||||
--create-home \
|
||||
--home-dir /var/lib/headscale/ \
|
||||
--system \
|
||||
--user-group \
|
||||
--shell /usr/bin/nologin \
|
||||
headscale
|
||||
sudo systemctl enable headscale
|
||||
```
|
||||
|
||||
4. Create an empty SQLite database:
|
||||
4. Configure Headscale by editing the configuration file:
|
||||
|
||||
```shell
|
||||
touch /var/lib/headscale/db.sqlite
|
||||
nano /etc/headscale/config.yaml
|
||||
```
|
||||
|
||||
5. Create a `headscale` configuration:
|
||||
5. Start Headscale:
|
||||
|
||||
```shell
|
||||
touch /etc/headscale/config.yaml
|
||||
sudo systemctl start headscale
|
||||
```
|
||||
|
||||
It is **strongly recommended** to copy and modify the [example configuration](../config-example.yaml)
|
||||
from the [headscale repository](../)
|
||||
|
||||
6. Start the headscale server:
|
||||
6. Check that Headscale is running as intended:
|
||||
|
||||
```shell
|
||||
headscale serve
|
||||
systemctl status headscale
|
||||
```
|
||||
|
||||
This command will start `headscale` in the current terminal session.
|
||||
## Using Headscale
|
||||
|
||||
---
|
||||
|
||||
To continue the tutorial, open a new terminal and let it run in the background.
|
||||
Alternatively use terminal emulators like [tmux](https://github.com/tmux/tmux) or [screen](https://www.gnu.org/software/screen/).
|
||||
|
||||
To run `headscale` in the background, please follow the steps in the [SystemD section](#running-headscale-in-the-background-with-systemd) before continuing.
|
||||
|
||||
7. Verify `headscale` is running:
|
||||
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
|
||||
8. Create a user ([tailnet](https://tailscale.com/kb/1136/tailnet/)):
|
||||
### Create a user
|
||||
|
||||
```shell
|
||||
headscale users create myfirstuser
|
||||
@@ -86,16 +67,16 @@ headscale users create myfirstuser
|
||||
|
||||
### Register a machine (normal login)
|
||||
|
||||
On a client machine, execute the `tailscale` login command:
|
||||
On a client machine, run the `tailscale` login command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server YOUR_HEADSCALE_URL
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL>
|
||||
```
|
||||
|
||||
Register the machine:
|
||||
|
||||
```shell
|
||||
headscale --user myfirstuser nodes register --key <YOU_+MACHINE_KEY>
|
||||
headscale --user myfirstuser nodes register --key <YOUR_MACHINE_KEY>
|
||||
```
|
||||
|
||||
### Register machine using a pre authenticated key
|
||||
@@ -106,87 +87,9 @@ Generate a key using the command line:
|
||||
headscale --user myfirstuser preauthkeys create --reusable --expiration 24h
|
||||
```
|
||||
|
||||
This will return a pre-authenticated key that can be used to connect a node to `headscale` during the `tailscale` command:
|
||||
This will return a pre-authenticated key that is used to
|
||||
connect a node to `headscale` during the `tailscale` command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
## Running `headscale` in the background with SystemD
|
||||
|
||||
This section demonstrates how to run `headscale` as a service in the background with [SystemD](https://www.freedesktop.org/wiki/Software/systemd/).
|
||||
This should work on most modern Linux distributions.
|
||||
|
||||
1. Create a SystemD service configuration at `/etc/systemd/system/headscale.service` containing:
|
||||
|
||||
```systemd
|
||||
[Unit]
|
||||
Description=headscale controller
|
||||
After=syslog.target
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=headscale
|
||||
Group=headscale
|
||||
ExecStart=/usr/local/bin/headscale serve
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
# Optional security enhancements
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
WorkingDirectory=/var/lib/headscale
|
||||
ReadWritePaths=/var/lib/headscale /var/run/headscale
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
RuntimeDirectory=headscale
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
Note that when running as the headscale user ensure that, either you add your current user to the headscale group:
|
||||
|
||||
```shell
|
||||
usermod -a -G headscale current_user
|
||||
```
|
||||
|
||||
or run all headscale commands as the headscale user:
|
||||
|
||||
```shell
|
||||
su - headscale
|
||||
```
|
||||
|
||||
2. In `/etc/headscale/config.yaml`, override the default `headscale` unix socket with path that is writable by the `headscale` user or group:
|
||||
|
||||
```yaml
|
||||
unix_socket: /var/run/headscale/headscale.sock
|
||||
```
|
||||
|
||||
3. Reload SystemD to load the new configuration file:
|
||||
|
||||
```shell
|
||||
systemctl daemon-reload
|
||||
```
|
||||
|
||||
4. Enable and start the new `headscale` service:
|
||||
|
||||
```shell
|
||||
systemctl enable --now headscale
|
||||
```
|
||||
|
||||
5. Verify the headscale service:
|
||||
|
||||
```shell
|
||||
systemctl status headscale
|
||||
```
|
||||
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
|
||||
`headscale` will now run in the background and start at boot.
|
||||
|
@@ -1,5 +1,12 @@
|
||||
# Running headscale on OpenBSD
|
||||
|
||||
!!! warning "Community documentation"
|
||||
|
||||
This page is not actively maintained by the headscale authors and is
|
||||
written by community members. It is _not_ verified by `headscale` developers.
|
||||
|
||||
**It might be outdated and it might miss necessary steps**.
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing a user how-to install and run `headscale` on OpenBSD 7.1.
|
||||
@@ -43,7 +50,7 @@ cp headscale /usr/local/sbin
|
||||
|
||||
```shell
|
||||
# Install prerequistes
|
||||
# 1. go v1.19+: headscale newer than 0.17 needs go 1.19+ to compile
|
||||
# 1. go v1.20+: headscale newer than 0.21 needs go 1.20+ to compile
|
||||
# 2. gmake: Makefile in the headscale repo is written in GNU make syntax
|
||||
|
||||
git clone https://github.com/juanfont/headscale.git
|
||||
@@ -87,8 +94,7 @@ touch /var/lib/headscale/db.sqlite
|
||||
touch /etc/headscale/config.yaml
|
||||
```
|
||||
|
||||
It is **strongly recommended** to copy and modify the [example configuration](../config-example.yaml)
|
||||
from the [headscale repository](../)
|
||||
**(Strongly Recommended)** Download a copy of the [example configuration][config-example.yaml](https://github.com/juanfont/headscale/blob/main/config-example.yaml) from the headscale repository.
|
||||
|
||||
4. Start the headscale server:
|
||||
|
||||
|
32
flake.lock
generated
32
flake.lock
generated
@@ -1,12 +1,15 @@
|
||||
{
|
||||
"nodes": {
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1667395993,
|
||||
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
|
||||
"lastModified": 1681202837,
|
||||
"narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f",
|
||||
"rev": "cfacdce06f30d2b68473a46042957675eebb3401",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -17,16 +20,16 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1670064435,
|
||||
"narHash": "sha256-+ELoY30UN+Pl3Yn7RWRPabykwebsVK/kYE9JsIsUMxQ=",
|
||||
"lastModified": 1681753173,
|
||||
"narHash": "sha256-MrGmzZWLUqh2VstoikKLFFIELXm/lsf/G9U9zR96VD4=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "61a8a98e6d557e6dd7ed0cdb54c3a3e3bbc5e25c",
|
||||
"rev": "0a4206a51b386e5cda731e8ac78d76ad924c7125",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
@@ -36,6 +39,21 @@
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
|
21
flake.nix
21
flake.nix
@@ -2,7 +2,7 @@
|
||||
description = "headscale - Open Source Tailscale Control server";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
};
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
pkgs = nixpkgs.legacyPackages.${prev.system};
|
||||
in
|
||||
rec {
|
||||
headscale = pkgs.buildGo119Module rec {
|
||||
headscale = pkgs.buildGo120Module rec {
|
||||
pname = "headscale";
|
||||
version = headscaleVersion;
|
||||
src = pkgs.lib.cleanSource self;
|
||||
@@ -36,7 +36,7 @@
|
||||
|
||||
# When updating go.mod or go.sum, a new sha will need to be calculated,
|
||||
# update this if you have a mismatch after doing a change to thos files.
|
||||
vendorSha256 = "sha256-R183PDeAUnNwNV8iE3b22S5hGPJG8aZQGdENGqcPCw8=";
|
||||
vendorSha256 = "sha256-5a3SR77vYp8AsVFyFhBQtHn1I2kdlph+RuAkuT6hPIs=";
|
||||
|
||||
ldflags = [ "-s" "-w" "-X github.com/juanfont/headscale/cmd/headscale/cli.Version=v${version}" ];
|
||||
};
|
||||
@@ -89,7 +89,7 @@
|
||||
overlays = [ self.overlay ];
|
||||
inherit system;
|
||||
};
|
||||
buildDeps = with pkgs; [ git go_1_19 gnumake ];
|
||||
buildDeps = with pkgs; [ git go_1_20 gnumake ];
|
||||
devDeps = with pkgs;
|
||||
buildDeps
|
||||
++ [
|
||||
@@ -97,8 +97,13 @@
|
||||
golines
|
||||
nodePackages.prettier
|
||||
goreleaser
|
||||
nfpm
|
||||
gotestsum
|
||||
|
||||
# 'dot' is needed for pprof graphs
|
||||
# go tool pprof -http=: <source>
|
||||
graphviz
|
||||
|
||||
# Protobuf dependencies
|
||||
protobuf
|
||||
protoc-gen-go
|
||||
@@ -128,6 +133,14 @@
|
||||
|
||||
shellHook = ''
|
||||
export GOFLAGS=-tags="ts2019"
|
||||
export PATH="$PWD/result/bin:$PATH"
|
||||
|
||||
mkdir -p ./ignored
|
||||
export HEADSCALE_PRIVATE_KEY_PATH="./ignored/private.key"
|
||||
export HEADSCALE_NOISE_PRIVATE_KEY_PATH="./ignored/noise_private.key"
|
||||
export HEADSCALE_DB_PATH="./ignored/db.sqlite"
|
||||
export HEADSCALE_TLS_LETSENCRYPT_CACHE_DIR="./ignored/cache"
|
||||
export HEADSCALE_UNIX_SOCKET="./ignored/headscale.sock"
|
||||
'';
|
||||
};
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc-gen-go v1.29.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/apikey.proto
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc-gen-go v1.29.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/device.proto
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user