mirror of
https://github.com/juanfont/headscale.git
synced 2025-08-16 10:47:41 +00:00
Compare commits
141 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
9c2a630055 | ||
![]() |
0e1ddf9715 | ||
![]() |
54da1a4155 | ||
![]() |
7141e2ed70 | ||
![]() |
c9e5048015 | ||
![]() |
4e077b053c | ||
![]() |
f973aef80c | ||
![]() |
a43bb1bb40 | ||
![]() |
d86123195c | ||
![]() |
91ffd10192 | ||
![]() |
642c7824a7 | ||
![]() |
149279f3d5 | ||
![]() |
275214920f | ||
![]() |
0124899759 | ||
![]() |
033136cb9a | ||
![]() |
05e08e0ac7 | ||
![]() |
226cb89d97 | ||
![]() |
3007c0ec4f | ||
![]() |
3fa1ac9c79 | ||
![]() |
bb2ccfddd9 | ||
![]() |
99fd126219 | ||
![]() |
15b8c8f4c5 | ||
![]() |
4243885246 | ||
![]() |
5bc5c5dc1b | ||
![]() |
db4f49901e | ||
![]() |
73a00c89ff | ||
![]() |
8a614dabc0 | ||
![]() |
c95cf15731 | ||
![]() |
e7ce902f9d | ||
![]() |
d421c7b665 | ||
![]() |
1abc68ccf4 | ||
![]() |
575b15e5fa | ||
![]() |
a8c8a358d0 | ||
![]() |
cd2ca137c0 | ||
![]() |
0660867a16 | ||
![]() |
b1200140b8 | ||
![]() |
d10b57b317 | ||
![]() |
42bf566fff | ||
![]() |
0bb2fabc6c | ||
![]() |
ee704f8ef3 | ||
![]() |
4aad3b7933 | ||
![]() |
6091373b53 | ||
![]() |
3879120967 | ||
![]() |
465669f650 | ||
![]() |
ea615e3a26 | ||
![]() |
d3349aa4d1 | ||
![]() |
73207decfd | ||
![]() |
eda6e560c3 | ||
![]() |
95de823b72 | ||
![]() |
9f85efffd5 | ||
![]() |
b5841c8a8b | ||
![]() |
309f868a21 | ||
![]() |
6c903d4a2f | ||
![]() |
c3aa9a5d4c | ||
![]() |
4fb55e1684 | ||
![]() |
91bfb481c1 | ||
![]() |
201ba109c3 | ||
![]() |
d3f965d493 | ||
![]() |
f832d7325b | ||
![]() |
b1d1bd32c3 | ||
![]() |
df6d4de6fd | ||
![]() |
461a893ee4 | ||
![]() |
97f7c90092 | ||
![]() |
ea3043cdcb | ||
![]() |
04dffcc4ae | ||
![]() |
3a07360b6e | ||
![]() |
b97d6f71b1 | ||
![]() |
4915902e04 | ||
![]() |
d87a4c87cc | ||
![]() |
e56755fd67 | ||
![]() |
2862c2034b | ||
![]() |
53185eaa9e | ||
![]() |
b83ecc3e6e | ||
![]() |
04fdd94201 | ||
![]() |
48ec51d166 | ||
![]() |
3260362436 | ||
![]() |
5f60671d12 | ||
![]() |
69d77f6e9d | ||
![]() |
1af9c11bdd | ||
![]() |
57c115e60a | ||
![]() |
96b4d2f391 | ||
![]() |
0f649aae8b | ||
![]() |
f491db232b | ||
![]() |
9a24340bd4 | ||
![]() |
39b756cf55 | ||
![]() |
9ca2ae7fc5 | ||
![]() |
f3139d26c8 | ||
![]() |
6f20a1fc68 | ||
![]() |
243b961cbe | ||
![]() |
5748744134 | ||
![]() |
31556e1ac0 | ||
![]() |
0159649d0a | ||
![]() |
cf9d920e4a | ||
![]() |
7d46dfe012 | ||
![]() |
eabb1ce881 | ||
![]() |
db20985b06 | ||
![]() |
29b80e3ca1 | ||
![]() |
a16a763283 | ||
![]() |
ad7f03c9dd | ||
![]() |
bff3d2d613 | ||
![]() |
f66c283756 | ||
![]() |
ad454d95b9 | ||
![]() |
e67a98b758 | ||
![]() |
ecf258f995 | ||
![]() |
d4b27fd54b | ||
![]() |
90e9ad9a0e | ||
![]() |
ff9d99b9ea | ||
![]() |
7590dee1f2 | ||
![]() |
315bc6b677 | ||
![]() |
a1b8f77b1b | ||
![]() |
19443669bf | ||
![]() |
d446e8a2fb | ||
![]() |
202d6b506f | ||
![]() |
401e6aec32 | ||
![]() |
bd86975d10 | ||
![]() |
d0e970f21d | ||
![]() |
07e95393b3 | ||
![]() |
136aab9dc8 | ||
![]() |
bbd6a67c46 | ||
![]() |
31ea67bcaf | ||
![]() |
5644dadaf9 | ||
![]() |
874aa4277d | ||
![]() |
b161a92e58 | ||
![]() |
95fee5aa6f | ||
![]() |
f5b8a3f710 | ||
![]() |
ba87ade9c5 | ||
![]() |
aa27709e60 | ||
![]() |
736182f651 | ||
![]() |
c4aa9d8aed | ||
![]() |
d8e0b16512 | ||
![]() |
d67be9ef58 | ||
![]() |
69ba750b38 | ||
![]() |
df0d214faf | ||
![]() |
73186eeb2f | ||
![]() |
fdcd3bb574 | ||
![]() |
c64d756ea7 | ||
![]() |
a63fb6b007 | ||
![]() |
27e97cbd09 | ||
![]() |
39550e262c | ||
![]() |
cfef55447f | ||
![]() |
9c276f33bd |
16
.dockerignore
Normal file
16
.dockerignore
Normal file
@@ -0,0 +1,16 @@
|
||||
// integration tests are not needed in docker
|
||||
// ignoring it let us speed up the integration test
|
||||
// development
|
||||
integration_test.go
|
||||
|
||||
Dockerfile*
|
||||
docker-compose*
|
||||
.dockerignore
|
||||
.goreleaser.yml
|
||||
.git
|
||||
.github
|
||||
.gitignore
|
||||
README.md
|
||||
LICENSE
|
||||
.vscode
|
||||
|
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -7,7 +7,7 @@ on:
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-18.04 # due to CGO we need to user an older version
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -18,3 +18,4 @@
|
||||
config.json
|
||||
*.key
|
||||
/db.sqlite
|
||||
*.sqlite3
|
||||
|
@@ -4,29 +4,65 @@ before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
builds:
|
||||
- env:
|
||||
- CGO_ENABLED=0
|
||||
- id: darwin-amd64
|
||||
main: ./cmd/headscale/headscale.go
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
goos:
|
||||
- linux
|
||||
- windows
|
||||
- darwin
|
||||
goarch:
|
||||
- amd64
|
||||
env:
|
||||
- PKG_CONFIG_SYSROOT_DIR=/sysroot/macos/amd64
|
||||
- PKG_CONFIG_PATH=/sysroot/macos/amd64/usr/local/lib/pkgconfig
|
||||
- CC=o64-clang
|
||||
- CXX=o64-clang++
|
||||
flags:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w -X main.version={{.Version}}
|
||||
- id: linux-armhf
|
||||
main: ./cmd/headscale/headscale.go
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- arm
|
||||
- arm64
|
||||
goarm:
|
||||
- 7
|
||||
env:
|
||||
- CC=arm-linux-gnueabihf-gcc
|
||||
- CXX=arm-linux-gnueabihf-g++
|
||||
- CGO_FLAGS=--sysroot=/sysroot/linux/armhf
|
||||
- CGO_LDFLAGS=--sysroot=/sysroot/linux/armhf
|
||||
- PKG_CONFIG_SYSROOT_DIR=/sysroot/linux/armhf
|
||||
- PKG_CONFIG_PATH=/sysroot/linux/armhf/opt/vc/lib/pkgconfig:/sysroot/linux/armhf/usr/lib/arm-linux-gnueabihf/pkgconfig:/sysroot/linux/armhf/usr/lib/pkgconfig:/sysroot/linux/armhf/usr/local/lib/pkgconfig
|
||||
flags:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w -X main.version={{.Version}}
|
||||
|
||||
|
||||
- id: linux-amd64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
goarm:
|
||||
- 6
|
||||
- 7
|
||||
|
||||
main: ./cmd/headscale/headscale.go
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
|
||||
archives:
|
||||
- replacements:
|
||||
darwin: Darwin
|
||||
linux: Linux
|
||||
windows: Windows
|
||||
amd64: x86_64
|
||||
- id: golang-cross
|
||||
builds:
|
||||
- darwin-amd64
|
||||
- linux-armhf
|
||||
- linux-amd64
|
||||
name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||
format: binary
|
||||
|
||||
checksum:
|
||||
name_template: 'checksums.txt'
|
||||
|
19
Dockerfile
Normal file
19
Dockerfile
Normal file
@@ -0,0 +1,19 @@
|
||||
FROM golang:latest AS build
|
||||
ENV GOPATH /go
|
||||
|
||||
COPY go.mod go.sum /go/src/headscale/
|
||||
WORKDIR /go/src/headscale
|
||||
RUN go mod download
|
||||
|
||||
COPY . /go/src/headscale
|
||||
|
||||
RUN go install -a -ldflags="-extldflags=-static" -tags netgo,sqlite_omit_load_extension ./cmd/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
FROM ubuntu:latest
|
||||
|
||||
COPY --from=build /go/bin/headscale /usr/local/bin/headscale
|
||||
ENV TZ UTC
|
||||
|
||||
EXPOSE 8080/tcp
|
||||
CMD ["headscale"]
|
9
Dockerfile.tailscale
Normal file
9
Dockerfile.tailscale
Normal file
@@ -0,0 +1,9 @@
|
||||
FROM ubuntu:latest
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y gnupg curl \
|
||||
&& curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.gpg | apt-key add - \
|
||||
&& curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.list | tee /etc/apt/sources.list.d/tailscale.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y tailscale \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
3
Makefile
3
Makefile
@@ -9,6 +9,9 @@ dev: lint test build
|
||||
test:
|
||||
@go test -coverprofile=coverage.out ./...
|
||||
|
||||
test_integration:
|
||||
go test -tags integration -timeout 30m ./...
|
||||
|
||||
coverprofile_func:
|
||||
go tool cover -func=coverage.out
|
||||
|
||||
|
88
README.md
88
README.md
@@ -1,8 +1,8 @@
|
||||
# Headscale
|
||||
|
||||
[](https://gitter.im/headscale-dev/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) 
|
||||
[](https://gitter.im/headscale-dev/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) 
|
||||
|
||||
An open source implementation of the Tailscale coordination server.
|
||||
An open source, self-hosted implementation of the Tailscale coordination server.
|
||||
|
||||
## Overview
|
||||
|
||||
@@ -10,28 +10,28 @@ Tailscale is [a modern VPN](https://tailscale.com/) built on top of [Wireguard](
|
||||
|
||||
Everything in Tailscale is Open Source, except the GUI clients for proprietary OS (Windows and macOS/iOS), and the 'coordination/control server'.
|
||||
|
||||
The control server works as an exchange point of cryptographic public keys for the nodes in the Tailscale network. It also assigns the IP addresses of the clients, creates the boundaries between each user, enables sharing machines between users, and exposes the advertised routes of your nodes.
|
||||
The control server works as an exchange point of Wireguard public keys for the nodes in the Tailscale network. It also assigns the IP addresses of the clients, creates the boundaries between each user, enables sharing machines between users, and exposes the advertised routes of your nodes.
|
||||
|
||||
Headscale implements this coordination server.
|
||||
|
||||
## Status
|
||||
|
||||
- [x] Basic functionality (nodes can communicate with each other)
|
||||
- [x] Base functionality (nodes can communicate with each other)
|
||||
- [x] Node registration through the web flow
|
||||
- [x] Network changes are relied to the nodes
|
||||
- [x] ~~Multiuser~~ Namespace support
|
||||
- [x] Basic routing (advertise & accept)
|
||||
- [ ] Share nodes between ~~users~~ namespaces
|
||||
- [x] Node registration via pre-auth keys (including reusable keys and ephemeral node support)
|
||||
- [x] Namespace support (~equivalent to multi-user in Tailscale.com)
|
||||
- [x] Routing (advertise & accept, including exit nodes)
|
||||
- [x] Node registration via pre-auth keys (including reusable keys, and ephemeral node support)
|
||||
- [X] JSON-formatted output
|
||||
- [ ] ACLs
|
||||
- [X] ACLs
|
||||
- [X] Support for alternative IP ranges in the tailnets (default Tailscale's 100.64.0.0/10)
|
||||
- [ ] Share nodes between ~~users~~ namespaces
|
||||
- [ ] DNS
|
||||
|
||||
... and probably lots of stuff missing
|
||||
|
||||
## Roadmap 🤷
|
||||
|
||||
Basic multiuser support (multinamespace, actually) is now implemented. No node sharing or ACLs between namespaces yet though...
|
||||
We are now focusing on adding integration tests with the official clients.
|
||||
|
||||
Suggestions/PRs welcomed!
|
||||
|
||||
@@ -39,10 +39,8 @@ Suggestions/PRs welcomed!
|
||||
|
||||
## Running it
|
||||
|
||||
1. Compile the headscale binary
|
||||
```shell
|
||||
make
|
||||
```
|
||||
1. Download the Headscale binary https://github.com/juanfont/headscale/releases, and place it somewhere in your PATH
|
||||
|
||||
|
||||
2. (Optional, you can also use SQLite) Get yourself a PostgreSQL DB running
|
||||
|
||||
@@ -63,33 +61,40 @@ Suggestions/PRs welcomed!
|
||||
cp config.json.sqlite.example config.json
|
||||
```
|
||||
|
||||
4. Create a namespace (equivalent to a user in tailscale.com)
|
||||
4. Create a namespace (a namespace is a 'tailnet', a group of Tailscale nodes that can talk to each other)
|
||||
```shell
|
||||
./headscale namespace create myfirstnamespace
|
||||
headscale namespaces create myfirstnamespace
|
||||
```
|
||||
|
||||
5. Run the server
|
||||
```shell
|
||||
./headscale serve
|
||||
headscale serve
|
||||
```
|
||||
|
||||
6. Add your first machine
|
||||
6. If you used tailscale.com before in your nodes, make sure you clear the tailscaled data folder
|
||||
```shell
|
||||
systemctl stop tailscaled
|
||||
rm -fr /var/lib/tailscale
|
||||
systemctl start tailscaled
|
||||
```
|
||||
|
||||
7. Add your first machine
|
||||
```shell
|
||||
tailscale up -login-server YOUR_HEADSCALE_URL
|
||||
```
|
||||
|
||||
7. Navigate to the URL you will get with `tailscale up`, where you'll find your machine key.
|
||||
8. Navigate to the URL you will get with `tailscale up`, where you'll find your machine key.
|
||||
|
||||
8. In the server, register your machine to a namespace with the CLI
|
||||
9. In the server, register your machine to a namespace with the CLI
|
||||
```shell
|
||||
./headscale -n myfirstnamespace node register YOURMACHINEKEY
|
||||
headscale -n myfirstnamespace node register YOURMACHINEKEY
|
||||
```
|
||||
|
||||
Alternatively, you can use Auth Keys to register your machines:
|
||||
|
||||
1. Create an authkey
|
||||
```shell
|
||||
./headscale -n myfirstnamespace preauthkey create --reusable --expiration 24h
|
||||
headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h
|
||||
```
|
||||
|
||||
2. Use the authkey from your machine to register it
|
||||
@@ -107,11 +112,17 @@ Please bear in mind that all the commands from headscale support adding `-o json
|
||||
Headscale's configuration file is named `config.json` or `config.yaml`. Headscale will look for it in `/etc/headscale`, `~/.headscale` and finally the directory from where the Headscale binary is executed.
|
||||
|
||||
```
|
||||
"server_url": "http://192.168.1.12:8000",
|
||||
"listen_addr": "0.0.0.0:8000",
|
||||
"server_url": "http://192.168.1.12:8080",
|
||||
"listen_addr": "0.0.0.0:8080",
|
||||
"ip_prefix": "100.64.0.0/10"
|
||||
```
|
||||
|
||||
`server_url` is the external URL via which Headscale is reachable. `listen_addr` is the IP address and port the Headscale program should listen on.
|
||||
`server_url` is the external URL via which Headscale is reachable. `listen_addr` is the IP address and port the Headscale program should listen on. `ip_prefix` is the IP prefix (range) in which IP addresses for nodes will be allocated (default 100.64.0.0/10, e.g., 192.168.4.0/24, 10.0.0.0/8)
|
||||
|
||||
```
|
||||
"log_level": "debug"
|
||||
```
|
||||
`log_level` can be used to set the Log level for Headscale, it defaults to `debug`, and the available levels are: `trace`, `debug`, `info`, `warn` and `error`.
|
||||
|
||||
```
|
||||
"private_key_path": "private.key",
|
||||
@@ -141,6 +152,7 @@ Headscale's configuration file is named `config.json` or `config.yaml`. Headscal
|
||||
|
||||
The fields starting with `db_` are used for the PostgreSQL connection information.
|
||||
|
||||
|
||||
### Running the service via TLS (optional)
|
||||
|
||||
```
|
||||
@@ -152,17 +164,37 @@ Headscale can be configured to expose its web service via TLS. To configure the
|
||||
|
||||
```
|
||||
"tls_letsencrypt_hostname": "",
|
||||
"tls_letsencrypt_listen": ":http",
|
||||
"tls_letsencrypt_cache_dir": ".cache",
|
||||
"tls_letsencrypt_challenge_type": "HTTP-01",
|
||||
```
|
||||
|
||||
To get a certificate automatically via [Let's Encrypt](https://letsencrypt.org/), set `tls_letsencrypt_hostname` to the desired certificate hostname. This name must resolve to the IP address(es) Headscale is reachable on (i.e., it must correspond to the `server_url` configuration parameter). The certificate and Let's Encrypt account credentials will be stored in the directory configured in `tls_letsencrypt_cache_dir`. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from. The certificate will automatically be renewed as needed. The default challenge type HTTP-01 requires that Headscale listens on port 80 for the Let's Encrypt automated validation, in addition to whatever port is configured in `listen_addr`. Alternatively, `tls_letsencrypt_challenge_type` can be set to `TLS-ALPN-01`. In this configuration, Headscale must be reachable via port 443, but port 80 is not required.
|
||||
To get a certificate automatically via [Let's Encrypt](https://letsencrypt.org/), set `tls_letsencrypt_hostname` to the desired certificate hostname. This name must resolve to the IP address(es) Headscale is reachable on (i.e., it must correspond to the `server_url` configuration parameter). The certificate and Let's Encrypt account credentials will be stored in the directory configured in `tls_letsencrypt_cache_dir`. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from. The certificate will automatically be renewed as needed.
|
||||
|
||||
#### Challenge type HTTP-01
|
||||
|
||||
The default challenge type `HTTP-01` requires that Headscale is reachable on port 80 for the Let's Encrypt automated validation, in addition to whatever port is configured in `listen_addr`. By default, Headscale listens on port 80 on all local IPs for Let's Encrypt automated validation.
|
||||
|
||||
If you need to change the ip and/or port used by Headscale for the Let's Encrypt validation process, set `tls_letsencrypt_listen` to the appropriate value. This can be handy if you are running Headscale as a non-root user (or can't run `setcap`). Keep in mind, however, that Let's Encrypt will _only_ connect to port 80 for the validation callback, so if you change `tls_letsencrypt_listen` you will also need to configure something else (e.g. a firewall rule) to forward the traffic from port 80 to the ip:port combination specified in `tls_letsencrypt_listen`.
|
||||
|
||||
#### Challenge type TLS-ALPN-01
|
||||
|
||||
Alternatively, `tls_letsencrypt_challenge_type` can be set to `TLS-ALPN-01`. In this configuration, Headscale listens on the ip:port combination defined in `listen_addr`. Let's Encrypt will _only_ connect to port 443 for the validation callback, so if `listen_addr` is not set to port 443, something else (e.g. a firewall rule) will be required to forward the traffic from port 443 to the ip:port combination specified in `listen_addr`.
|
||||
|
||||
### Policy ACLs
|
||||
|
||||
Headscale implements the same policy ACLs as Tailscale.com, adapted to the self-hosted environment.
|
||||
|
||||
For instance, instead of referring to users when defining groups you must
|
||||
use namespaces (which are the equivalent to user/logins in Tailscale.com).
|
||||
|
||||
Please check https://tailscale.com/kb/1018/acls/, and `./tests/acls/` in this repo for working examples.
|
||||
|
||||
|
||||
## Disclaimer
|
||||
|
||||
1. We have nothing to do with Tailscale, or Tailscale Inc.
|
||||
2. The purpose of writing this was to learn how Tailscale works.
|
||||
3. ~~I don't use Headscale myself.~~
|
||||
|
||||
|
||||
|
||||
|
266
acls.go
Normal file
266
acls.go
Normal file
@@ -0,0 +1,266 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"github.com/tailscale/hujson"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
const errorEmptyPolicy = Error("empty policy")
|
||||
const errorInvalidAction = Error("invalid action")
|
||||
const errorInvalidUserSection = Error("invalid user section")
|
||||
const errorInvalidGroup = Error("invalid group")
|
||||
const errorInvalidTag = Error("invalid tag")
|
||||
const errorInvalidNamespace = Error("invalid namespace")
|
||||
const errorInvalidPortFormat = Error("invalid port format")
|
||||
|
||||
// LoadACLPolicy loads the ACL policy from the specify path, and generates the ACL rules
|
||||
func (h *Headscale) LoadACLPolicy(path string) error {
|
||||
policyFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer policyFile.Close()
|
||||
|
||||
var policy ACLPolicy
|
||||
b, err := io.ReadAll(policyFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = hujson.Unmarshal(b, &policy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if policy.IsZero() {
|
||||
return errorEmptyPolicy
|
||||
}
|
||||
|
||||
h.aclPolicy = &policy
|
||||
rules, err := h.generateACLRules()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.aclRules = rules
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Headscale) generateACLRules() (*[]tailcfg.FilterRule, error) {
|
||||
rules := []tailcfg.FilterRule{}
|
||||
|
||||
for i, a := range h.aclPolicy.ACLs {
|
||||
if a.Action != "accept" {
|
||||
return nil, errorInvalidAction
|
||||
}
|
||||
|
||||
r := tailcfg.FilterRule{}
|
||||
|
||||
srcIPs := []string{}
|
||||
for j, u := range a.Users {
|
||||
srcs, err := h.generateACLPolicySrcIP(u)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing ACL %d, User %d", i, j)
|
||||
return nil, err
|
||||
}
|
||||
srcIPs = append(srcIPs, *srcs...)
|
||||
}
|
||||
r.SrcIPs = srcIPs
|
||||
|
||||
destPorts := []tailcfg.NetPortRange{}
|
||||
for j, d := range a.Ports {
|
||||
dests, err := h.generateACLPolicyDestPorts(d)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing ACL %d, Port %d", i, j)
|
||||
return nil, err
|
||||
}
|
||||
destPorts = append(destPorts, *dests...)
|
||||
}
|
||||
|
||||
rules = append(rules, tailcfg.FilterRule{
|
||||
SrcIPs: srcIPs,
|
||||
DstPorts: destPorts,
|
||||
})
|
||||
}
|
||||
|
||||
return &rules, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) generateACLPolicySrcIP(u string) (*[]string, error) {
|
||||
return h.expandAlias(u)
|
||||
}
|
||||
|
||||
func (h *Headscale) generateACLPolicyDestPorts(d string) (*[]tailcfg.NetPortRange, error) {
|
||||
tokens := strings.Split(d, ":")
|
||||
if len(tokens) < 2 || len(tokens) > 3 {
|
||||
return nil, errorInvalidPortFormat
|
||||
}
|
||||
|
||||
var alias string
|
||||
// We can have here stuff like:
|
||||
// git-server:*
|
||||
// 192.168.1.0/24:22
|
||||
// tag:montreal-webserver:80,443
|
||||
// tag:api-server:443
|
||||
// example-host-1:*
|
||||
if len(tokens) == 2 {
|
||||
alias = tokens[0]
|
||||
} else {
|
||||
alias = fmt.Sprintf("%s:%s", tokens[0], tokens[1])
|
||||
}
|
||||
|
||||
expanded, err := h.expandAlias(alias)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ports, err := h.expandPorts(tokens[len(tokens)-1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dests := []tailcfg.NetPortRange{}
|
||||
for _, d := range *expanded {
|
||||
for _, p := range *ports {
|
||||
pr := tailcfg.NetPortRange{
|
||||
IP: d,
|
||||
Ports: p,
|
||||
}
|
||||
dests = append(dests, pr)
|
||||
}
|
||||
}
|
||||
return &dests, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) expandAlias(s string) (*[]string, error) {
|
||||
if s == "*" {
|
||||
return &[]string{"*"}, nil
|
||||
}
|
||||
|
||||
if strings.HasPrefix(s, "group:") {
|
||||
if _, ok := h.aclPolicy.Groups[s]; !ok {
|
||||
return nil, errorInvalidGroup
|
||||
}
|
||||
ips := []string{}
|
||||
for _, n := range h.aclPolicy.Groups[s] {
|
||||
nodes, err := h.ListMachinesInNamespace(n)
|
||||
if err != nil {
|
||||
return nil, errorInvalidNamespace
|
||||
}
|
||||
for _, node := range *nodes {
|
||||
ips = append(ips, node.IPAddress)
|
||||
}
|
||||
}
|
||||
return &ips, nil
|
||||
}
|
||||
|
||||
if strings.HasPrefix(s, "tag:") {
|
||||
if _, ok := h.aclPolicy.TagOwners[s]; !ok {
|
||||
return nil, errorInvalidTag
|
||||
}
|
||||
|
||||
// This will have HORRIBLE performance.
|
||||
// We need to change the data model to better store tags
|
||||
machines := []Machine{}
|
||||
if err := h.db.Where("registered").Find(&machines).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ips := []string{}
|
||||
for _, m := range machines {
|
||||
hostinfo := tailcfg.Hostinfo{}
|
||||
if len(m.HostInfo) != 0 {
|
||||
hi, err := m.HostInfo.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = json.Unmarshal(hi, &hostinfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// FIXME: Check TagOwners allows this
|
||||
for _, t := range hostinfo.RequestTags {
|
||||
if s[4:] == t {
|
||||
ips = append(ips, m.IPAddress)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return &ips, nil
|
||||
}
|
||||
|
||||
n, err := h.GetNamespace(s)
|
||||
if err == nil {
|
||||
nodes, err := h.ListMachinesInNamespace(n.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ips := []string{}
|
||||
for _, n := range *nodes {
|
||||
ips = append(ips, n.IPAddress)
|
||||
}
|
||||
return &ips, nil
|
||||
}
|
||||
|
||||
if h, ok := h.aclPolicy.Hosts[s]; ok {
|
||||
return &[]string{h.String()}, nil
|
||||
}
|
||||
|
||||
ip, err := netaddr.ParseIP(s)
|
||||
if err == nil {
|
||||
return &[]string{ip.String()}, nil
|
||||
}
|
||||
|
||||
cidr, err := netaddr.ParseIPPrefix(s)
|
||||
if err == nil {
|
||||
return &[]string{cidr.String()}, nil
|
||||
}
|
||||
|
||||
return nil, errorInvalidUserSection
|
||||
}
|
||||
|
||||
func (h *Headscale) expandPorts(s string) (*[]tailcfg.PortRange, error) {
|
||||
if s == "*" {
|
||||
return &[]tailcfg.PortRange{{First: 0, Last: 65535}}, nil
|
||||
}
|
||||
|
||||
ports := []tailcfg.PortRange{}
|
||||
for _, p := range strings.Split(s, ",") {
|
||||
rang := strings.Split(p, "-")
|
||||
if len(rang) == 1 {
|
||||
pi, err := strconv.ParseUint(rang[0], 10, 16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ports = append(ports, tailcfg.PortRange{
|
||||
First: uint16(pi),
|
||||
Last: uint16(pi),
|
||||
})
|
||||
} else if len(rang) == 2 {
|
||||
start, err := strconv.ParseUint(rang[0], 10, 16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
last, err := strconv.ParseUint(rang[1], 10, 16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ports = append(ports, tailcfg.PortRange{
|
||||
First: uint16(start),
|
||||
Last: uint16(last),
|
||||
})
|
||||
} else {
|
||||
return nil, errorInvalidPortFormat
|
||||
}
|
||||
}
|
||||
return &ports, nil
|
||||
}
|
160
acls_test.go
Normal file
160
acls_test.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
func (s *Suite) TestWrongPath(c *check.C) {
|
||||
err := h.LoadACLPolicy("asdfg")
|
||||
c.Assert(err, check.NotNil)
|
||||
}
|
||||
|
||||
func (s *Suite) TestBrokenHuJson(c *check.C) {
|
||||
err := h.LoadACLPolicy("./tests/acls/broken.hujson")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
}
|
||||
|
||||
func (s *Suite) TestInvalidPolicyHuson(c *check.C) {
|
||||
err := h.LoadACLPolicy("./tests/acls/invalid.hujson")
|
||||
c.Assert(err, check.NotNil)
|
||||
c.Assert(err, check.Equals, errorEmptyPolicy)
|
||||
}
|
||||
|
||||
func (s *Suite) TestParseHosts(c *check.C) {
|
||||
var hs Hosts
|
||||
err := hs.UnmarshalJSON([]byte(`{"example-host-1": "100.100.100.100","example-host-2": "100.100.101.100/24"}`))
|
||||
c.Assert(hs, check.NotNil)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *Suite) TestParseInvalidCIDR(c *check.C) {
|
||||
var hs Hosts
|
||||
err := hs.UnmarshalJSON([]byte(`{"example-host-1": "100.100.100.100/42"}`))
|
||||
c.Assert(hs, check.IsNil)
|
||||
c.Assert(err, check.NotNil)
|
||||
}
|
||||
|
||||
func (s *Suite) TestRuleInvalidGeneration(c *check.C) {
|
||||
err := h.LoadACLPolicy("./tests/acls/acl_policy_invalid.hujson")
|
||||
c.Assert(err, check.NotNil)
|
||||
}
|
||||
|
||||
func (s *Suite) TestBasicRule(c *check.C) {
|
||||
err := h.LoadACLPolicy("./tests/acls/acl_policy_basic_1.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := h.generateACLRules()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
}
|
||||
|
||||
func (s *Suite) TestPortRange(c *check.C) {
|
||||
err := h.LoadACLPolicy("./tests/acls/acl_policy_basic_range.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := h.generateACLRules()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
c.Assert(*rules, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.First, check.Equals, uint16(5400))
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.Last, check.Equals, uint16(5500))
|
||||
}
|
||||
|
||||
func (s *Suite) TestPortWildcard(c *check.C) {
|
||||
err := h.LoadACLPolicy("./tests/acls/acl_policy_basic_wildcards.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := h.generateACLRules()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
c.Assert(*rules, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.First, check.Equals, uint16(0))
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.Last, check.Equals, uint16(65535))
|
||||
c.Assert((*rules)[0].SrcIPs, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].SrcIPs[0], check.Equals, "*")
|
||||
}
|
||||
|
||||
func (s *Suite) TestPortNamespace(c *check.C) {
|
||||
n, err := h.CreateNamespace("testnamespace")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = h.GetMachine("testnamespace", "testmachine")
|
||||
c.Assert(err, check.NotNil)
|
||||
ip, _ := h.getAvailableIP()
|
||||
m := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
NamespaceID: n.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
IPAddress: ip.String(),
|
||||
AuthKeyID: uint(pak.ID),
|
||||
}
|
||||
h.db.Save(&m)
|
||||
|
||||
err = h.LoadACLPolicy("./tests/acls/acl_policy_basic_namespace_as_user.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := h.generateACLRules()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
c.Assert(*rules, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.First, check.Equals, uint16(0))
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.Last, check.Equals, uint16(65535))
|
||||
c.Assert((*rules)[0].SrcIPs, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].SrcIPs[0], check.Not(check.Equals), "not an ip")
|
||||
c.Assert((*rules)[0].SrcIPs[0], check.Equals, ip.String())
|
||||
}
|
||||
|
||||
func (s *Suite) TestPortGroup(c *check.C) {
|
||||
n, err := h.CreateNamespace("testnamespace")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = h.GetMachine("testnamespace", "testmachine")
|
||||
c.Assert(err, check.NotNil)
|
||||
ip, _ := h.getAvailableIP()
|
||||
m := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
NamespaceID: n.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
IPAddress: ip.String(),
|
||||
AuthKeyID: uint(pak.ID),
|
||||
}
|
||||
h.db.Save(&m)
|
||||
|
||||
err = h.LoadACLPolicy("./tests/acls/acl_policy_basic_groups.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := h.generateACLRules()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
c.Assert(*rules, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.First, check.Equals, uint16(0))
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.Last, check.Equals, uint16(65535))
|
||||
c.Assert((*rules)[0].SrcIPs, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].SrcIPs[0], check.Not(check.Equals), "not an ip")
|
||||
c.Assert((*rules)[0].SrcIPs[0], check.Equals, ip.String())
|
||||
}
|
70
acls_types.go
Normal file
70
acls_types.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/tailscale/hujson"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
|
||||
// ACLPolicy represents a Tailscale ACL Policy
|
||||
type ACLPolicy struct {
|
||||
Groups Groups `json:"Groups"`
|
||||
Hosts Hosts `json:"Hosts"`
|
||||
TagOwners TagOwners `json:"TagOwners"`
|
||||
ACLs []ACL `json:"ACLs"`
|
||||
Tests []ACLTest `json:"Tests"`
|
||||
}
|
||||
|
||||
// ACL is a basic rule for the ACL Policy
|
||||
type ACL struct {
|
||||
Action string `json:"Action"`
|
||||
Users []string `json:"Users"`
|
||||
Ports []string `json:"Ports"`
|
||||
}
|
||||
|
||||
// Groups references a series of alias in the ACL rules
|
||||
type Groups map[string][]string
|
||||
|
||||
// Hosts are alias for IP addresses or subnets
|
||||
type Hosts map[string]netaddr.IPPrefix
|
||||
|
||||
// TagOwners specify what users (namespaces?) are allow to use certain tags
|
||||
type TagOwners map[string][]string
|
||||
|
||||
// ACLTest is not implemented, but should be use to check if a certain rule is allowed
|
||||
type ACLTest struct {
|
||||
User string `json:"User"`
|
||||
Allow []string `json:"Allow"`
|
||||
Deny []string `json:"Deny,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON allows to parse the Hosts directly into netaddr objects
|
||||
func (h *Hosts) UnmarshalJSON(data []byte) error {
|
||||
hosts := Hosts{}
|
||||
hs := make(map[string]string)
|
||||
err := hujson.Unmarshal(data, &hs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range hs {
|
||||
if !strings.Contains(v, "/") {
|
||||
v = v + "/32"
|
||||
}
|
||||
prefix, err := netaddr.ParseIPPrefix(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hosts[k] = prefix
|
||||
}
|
||||
*h = hosts
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsZero is perhaps a bit naive here
|
||||
func (p ACLPolicy) IsZero() bool {
|
||||
if len(p.Groups) == 0 && len(p.Hosts) == 0 && len(p.ACLs) == 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
361
api.go
361
api.go
@@ -3,19 +3,21 @@ package headscale
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/jinzhu/gorm"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"gorm.io/datatypes"
|
||||
"gorm.io/gorm"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/wgengine/wgcfg"
|
||||
"tailscale.com/types/wgkey"
|
||||
)
|
||||
|
||||
// KeyHandler provides the Headscale pub key
|
||||
@@ -45,7 +47,7 @@ func (h *Headscale) RegisterWebAPI(c *gin.Context) {
|
||||
|
||||
<p>
|
||||
<code>
|
||||
<b>headscale -n NAMESPACE node register %s</b>
|
||||
<b>headscale -n NAMESPACE nodes register %s</b>
|
||||
</code>
|
||||
</p>
|
||||
|
||||
@@ -60,60 +62,68 @@ func (h *Headscale) RegisterWebAPI(c *gin.Context) {
|
||||
func (h *Headscale) RegistrationHandler(c *gin.Context) {
|
||||
body, _ := io.ReadAll(c.Request.Body)
|
||||
mKeyStr := c.Param("id")
|
||||
mKey, err := wgcfg.ParseHexKey(mKeyStr)
|
||||
mKey, err := wgkey.ParseHex(mKeyStr)
|
||||
if err != nil {
|
||||
log.Printf("Cannot parse machine key: %s", err)
|
||||
log.Error().
|
||||
Str("handler", "Registration").
|
||||
Err(err).
|
||||
Msg("Cannot parse machine key")
|
||||
c.String(http.StatusInternalServerError, "Sad!")
|
||||
return
|
||||
}
|
||||
req := tailcfg.RegisterRequest{}
|
||||
err = decode(body, &req, &mKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Printf("Cannot decode message: %s", err)
|
||||
log.Error().
|
||||
Str("handler", "Registration").
|
||||
Err(err).
|
||||
Msg("Cannot decode message")
|
||||
c.String(http.StatusInternalServerError, "Very sad!")
|
||||
return
|
||||
}
|
||||
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
log.Printf("Cannot open DB: %s", err)
|
||||
c.String(http.StatusInternalServerError, ":(")
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
var m Machine
|
||||
if db.First(&m, "machine_key = ?", mKey.HexString()).RecordNotFound() {
|
||||
log.Println("New Machine!")
|
||||
if result := h.db.Preload("Namespace").First(&m, "machine_key = ?", mKey.HexString()); errors.Is(result.Error, gorm.ErrRecordNotFound) {
|
||||
log.Info().Str("machine", req.Hostinfo.Hostname).Msg("New machine")
|
||||
m = Machine{
|
||||
Expiry: &req.Expiry,
|
||||
MachineKey: mKey.HexString(),
|
||||
Name: req.Hostinfo.Hostname,
|
||||
NodeKey: wgcfg.Key(req.NodeKey).HexString(),
|
||||
NodeKey: wgkey.Key(req.NodeKey).HexString(),
|
||||
}
|
||||
if err := db.Create(&m).Error; err != nil {
|
||||
log.Printf("Could not create row: %s", err)
|
||||
if err := h.db.Create(&m).Error; err != nil {
|
||||
log.Error().
|
||||
Str("handler", "Registration").
|
||||
Err(err).
|
||||
Msg("Could not create row")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !m.Registered && req.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(c, db, mKey, req, m)
|
||||
h.handleAuthKey(c, h.db, mKey, req, m)
|
||||
return
|
||||
}
|
||||
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
// We have the updated key!
|
||||
if m.NodeKey == wgcfg.Key(req.NodeKey).HexString() {
|
||||
if m.NodeKey == wgkey.Key(req.NodeKey).HexString() {
|
||||
if m.Registered {
|
||||
log.Printf("[%s] Client is registered and we have the current NodeKey. All clear to /map", m.Name)
|
||||
log.Debug().
|
||||
Str("handler", "Registration").
|
||||
Str("machine", m.Name).
|
||||
Msg("Client is registered and we have the current NodeKey. All clear to /map")
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.MachineAuthorized = true
|
||||
resp.User = *m.Namespace.toUser()
|
||||
respBody, err := encode(resp, &mKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Printf("Cannot encode message: %s", err)
|
||||
log.Error().
|
||||
Str("handler", "Registration").
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
c.String(http.StatusInternalServerError, "")
|
||||
return
|
||||
}
|
||||
@@ -121,12 +131,18 @@ func (h *Headscale) RegistrationHandler(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("[%s] Not registered and not NodeKey rotation. Sending a authurl to register", m.Name)
|
||||
log.Debug().
|
||||
Str("handler", "Registration").
|
||||
Str("machine", m.Name).
|
||||
Msg("Not registered and not NodeKey rotation. Sending a authurl to register")
|
||||
resp.AuthURL = fmt.Sprintf("%s/register?key=%s",
|
||||
h.cfg.ServerURL, mKey.HexString())
|
||||
respBody, err := encode(resp, &mKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Printf("Cannot encode message: %s", err)
|
||||
log.Error().
|
||||
Str("handler", "Registration").
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
c.String(http.StatusInternalServerError, "")
|
||||
return
|
||||
}
|
||||
@@ -135,16 +151,22 @@ func (h *Headscale) RegistrationHandler(c *gin.Context) {
|
||||
}
|
||||
|
||||
// The NodeKey we have matches OldNodeKey, which means this is a refresh after an key expiration
|
||||
if m.NodeKey == wgcfg.Key(req.OldNodeKey).HexString() {
|
||||
log.Printf("[%s] We have the OldNodeKey in the database. This is a key refresh", m.Name)
|
||||
m.NodeKey = wgcfg.Key(req.NodeKey).HexString()
|
||||
db.Save(&m)
|
||||
if m.NodeKey == wgkey.Key(req.OldNodeKey).HexString() {
|
||||
log.Debug().
|
||||
Str("handler", "Registration").
|
||||
Str("machine", m.Name).
|
||||
Msg("We have the OldNodeKey in the database. This is a key refresh")
|
||||
m.NodeKey = wgkey.Key(req.NodeKey).HexString()
|
||||
h.db.Save(&m)
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.User = *m.Namespace.toUser()
|
||||
respBody, err := encode(resp, &mKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Printf("Cannot encode message: %s", err)
|
||||
log.Error().
|
||||
Str("handler", "Registration").
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
c.String(http.StatusInternalServerError, "Extremely sad!")
|
||||
return
|
||||
}
|
||||
@@ -155,25 +177,38 @@ func (h *Headscale) RegistrationHandler(c *gin.Context) {
|
||||
// We arrive here after a client is restarted without finalizing the authentication flow or
|
||||
// when headscale is stopped in the middle of the auth process.
|
||||
if m.Registered {
|
||||
log.Printf("[%s] The node is sending us a new NodeKey, but machine is registered. All clear for /map", m.Name)
|
||||
log.Debug().
|
||||
Str("handler", "Registration").
|
||||
Str("machine", m.Name).
|
||||
Msg("The node is sending us a new NodeKey, but machine is registered. All clear for /map")
|
||||
resp.AuthURL = ""
|
||||
resp.MachineAuthorized = true
|
||||
resp.User = *m.Namespace.toUser()
|
||||
respBody, err := encode(resp, &mKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Printf("Cannot encode message: %s", err)
|
||||
log.Error().
|
||||
Str("handler", "Registration").
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
c.String(http.StatusInternalServerError, "")
|
||||
return
|
||||
}
|
||||
c.Data(200, "application/json; charset=utf-8", respBody)
|
||||
return
|
||||
}
|
||||
log.Printf("[%s] The node is sending us a new NodeKey, sending auth url", m.Name)
|
||||
|
||||
log.Debug().
|
||||
Str("handler", "Registration").
|
||||
Str("machine", m.Name).
|
||||
Msg("The node is sending us a new NodeKey, sending auth url")
|
||||
resp.AuthURL = fmt.Sprintf("%s/register?key=%s",
|
||||
h.cfg.ServerURL, mKey.HexString())
|
||||
respBody, err := encode(resp, &mKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Printf("Cannot encode message: %s", err)
|
||||
log.Error().
|
||||
Str("handler", "Registration").
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
c.String(http.StatusInternalServerError, "")
|
||||
return
|
||||
}
|
||||
@@ -190,36 +225,50 @@ func (h *Headscale) RegistrationHandler(c *gin.Context) {
|
||||
//
|
||||
// At this moment the updates are sent in a quite horrendous way, but they kinda work.
|
||||
func (h *Headscale) PollNetMapHandler(c *gin.Context) {
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("id", c.Param("id")).
|
||||
Msg("PollNetMapHandler called")
|
||||
body, _ := io.ReadAll(c.Request.Body)
|
||||
mKeyStr := c.Param("id")
|
||||
mKey, err := wgcfg.ParseHexKey(mKeyStr)
|
||||
mKey, err := wgkey.ParseHex(mKeyStr)
|
||||
if err != nil {
|
||||
log.Printf("Cannot parse client key: %s", err)
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Err(err).
|
||||
Msg("Cannot parse client key")
|
||||
c.String(http.StatusBadRequest, "")
|
||||
return
|
||||
}
|
||||
req := tailcfg.MapRequest{}
|
||||
err = decode(body, &req, &mKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Printf("Cannot decode message: %s", err)
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Err(err).
|
||||
Msg("Cannot decode message")
|
||||
c.String(http.StatusBadRequest, "")
|
||||
return
|
||||
}
|
||||
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
log.Printf("Cannot open DB: %s", err)
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
var m Machine
|
||||
if db.First(&m, "machine_key = ?", mKey.HexString()).RecordNotFound() {
|
||||
log.Printf("Ignoring request, cannot find machine with key %s", mKey.HexString())
|
||||
if result := h.db.Preload("Namespace").First(&m, "machine_key = ?", mKey.HexString()); errors.Is(result.Error, gorm.ErrRecordNotFound) {
|
||||
log.Warn().
|
||||
Str("handler", "PollNetMap").
|
||||
Msgf("Ignoring request, cannot find machine with key %s", mKey.HexString())
|
||||
c.String(http.StatusUnauthorized, "")
|
||||
return
|
||||
}
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("id", c.Param("id")).
|
||||
Str("machine", m.Name).
|
||||
Msg("Found machine in database")
|
||||
|
||||
hostinfo, _ := json.Marshal(req.Hostinfo)
|
||||
m.Name = req.Hostinfo.Hostname
|
||||
m.HostInfo = datatypes.JSON(hostinfo)
|
||||
m.DiscoKey = wgcfg.Key(req.DiscoKey).HexString()
|
||||
m.DiscoKey = wgkey.Key(req.DiscoKey).HexString()
|
||||
now := time.Now().UTC()
|
||||
|
||||
// From Tailscale client:
|
||||
@@ -235,19 +284,31 @@ func (h *Headscale) PollNetMapHandler(c *gin.Context) {
|
||||
m.Endpoints = datatypes.JSON(endpoints)
|
||||
m.LastSeen = &now
|
||||
}
|
||||
db.Save(&m)
|
||||
h.db.Save(&m)
|
||||
|
||||
update := make(chan []byte, 1)
|
||||
|
||||
pollData := make(chan []byte, 1)
|
||||
update := make(chan []byte, 1)
|
||||
cancelKeepAlive := make(chan []byte, 1)
|
||||
defer close(pollData)
|
||||
|
||||
cancelKeepAlive := make(chan []byte, 1)
|
||||
defer close(cancelKeepAlive)
|
||||
h.pollMu.Lock()
|
||||
h.clientsPolling[m.ID] = update
|
||||
h.pollMu.Unlock()
|
||||
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("id", c.Param("id")).
|
||||
Str("machine", m.Name).
|
||||
Msg("Storing update channel")
|
||||
h.clientsPolling.Store(m.ID, update)
|
||||
|
||||
data, err := h.getMapResponse(mKey, req, m)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("id", c.Param("id")).
|
||||
Str("machine", m.Name).
|
||||
Err(err).
|
||||
Msg("Failed to get Map response")
|
||||
c.String(http.StatusInternalServerError, ":(")
|
||||
return
|
||||
}
|
||||
@@ -257,127 +318,201 @@ func (h *Headscale) PollNetMapHandler(c *gin.Context) {
|
||||
// empty endpoints to peers)
|
||||
|
||||
// Details on the protocol can be found in https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go#L696
|
||||
log.Printf("[%s] ReadOnly=%t OmitPeers=%t Stream=%t", m.Name, req.ReadOnly, req.OmitPeers, req.Stream)
|
||||
log.Debug().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("id", c.Param("id")).
|
||||
Str("machine", m.Name).
|
||||
Bool("readOnly", req.ReadOnly).
|
||||
Bool("omitPeers", req.OmitPeers).
|
||||
Bool("stream", req.Stream).
|
||||
Msg("Client map request processed")
|
||||
|
||||
if req.ReadOnly {
|
||||
log.Printf("[%s] Client is starting up. Asking for DERP map", m.Name)
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("Client is starting up. Asking for DERP map")
|
||||
c.Data(200, "application/json; charset=utf-8", *data)
|
||||
return
|
||||
}
|
||||
if req.OmitPeers && !req.Stream {
|
||||
log.Printf("[%s] Client sent endpoint update and is ok with a response without peer list", m.Name)
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("Client sent endpoint update and is ok with a response without peer list")
|
||||
c.Data(200, "application/json; charset=utf-8", *data)
|
||||
return
|
||||
} else if req.OmitPeers && req.Stream {
|
||||
log.Printf("[%s] Warning, ignoring request, don't know how to handle it", m.Name)
|
||||
log.Warn().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("Ignoring request, don't know how to handle it")
|
||||
c.String(http.StatusBadRequest, "")
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("[%s] Client is ready to access the tailnet", m.Name)
|
||||
log.Printf("[%s] Sending initial map", m.Name)
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("Client is ready to access the tailnet")
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("Sending initial map")
|
||||
pollData <- *data
|
||||
|
||||
log.Printf("[%s] Notifying peers", m.Name)
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("Notifying peers")
|
||||
peers, _ := h.getPeers(m)
|
||||
h.pollMu.Lock()
|
||||
for _, p := range *peers {
|
||||
pUp, ok := h.clientsPolling[uint64(p.ID)]
|
||||
pUp, ok := h.clientsPolling.Load(uint64(p.ID))
|
||||
if ok {
|
||||
log.Printf("[%s] Notifying peer %s (%s)", m.Name, p.Name, p.Addresses[0])
|
||||
pUp <- []byte{}
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Str("peer", m.Name).
|
||||
Str("address", p.Addresses[0].String()).
|
||||
Msgf("Notifying peer %s (%s)", p.Name, p.Addresses[0])
|
||||
pUp.(chan []byte) <- []byte{}
|
||||
} else {
|
||||
log.Printf("[%s] Peer %s does not appear to be polling", m.Name, p.Name)
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Str("peer", m.Name).
|
||||
Msgf("Peer %s does not appear to be polling", p.Name)
|
||||
}
|
||||
}
|
||||
h.pollMu.Unlock()
|
||||
|
||||
go h.keepAlive(cancelKeepAlive, pollData, mKey, req, m)
|
||||
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-pollData:
|
||||
log.Printf("[%s] Sending data (%d bytes)", m.Name, len(data))
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Int("bytes", len(data)).
|
||||
Msg("Sending data")
|
||||
_, err := w.Write(data)
|
||||
if err != nil {
|
||||
log.Printf("[%s] 🤮 Cannot write data: %s", m.Name, err)
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Err(err).
|
||||
Msg("Cannot write data")
|
||||
}
|
||||
now := time.Now().UTC()
|
||||
m.LastSeen = &now
|
||||
db.Save(&m)
|
||||
h.db.Save(&m)
|
||||
return true
|
||||
|
||||
case <-update:
|
||||
log.Printf("[%s] Received a request for update", m.Name)
|
||||
log.Debug().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("Received a request for update")
|
||||
data, err := h.getMapResponse(mKey, req, m)
|
||||
if err != nil {
|
||||
log.Printf("[%s] Could not get the map update: %s", m.Name, err)
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Err(err).
|
||||
Msg("Could not get the map update")
|
||||
}
|
||||
_, err = w.Write(*data)
|
||||
if err != nil {
|
||||
log.Printf("[%s] Could not write the map response: %s", m.Name, err)
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Err(err).
|
||||
Msg("Could not write the map response")
|
||||
}
|
||||
return true
|
||||
|
||||
case <-c.Request.Context().Done():
|
||||
log.Printf("[%s] The client has closed the connection", m.Name)
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("The client has closed the connection")
|
||||
now := time.Now().UTC()
|
||||
m.LastSeen = &now
|
||||
db.Save(&m)
|
||||
h.pollMu.Lock()
|
||||
h.db.Save(&m)
|
||||
cancelKeepAlive <- []byte{}
|
||||
delete(h.clientsPolling, m.ID)
|
||||
h.clientsPolling.Delete(m.ID)
|
||||
close(update)
|
||||
h.pollMu.Unlock()
|
||||
return false
|
||||
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (h *Headscale) keepAlive(cancel chan []byte, pollData chan []byte, mKey wgcfg.Key, req tailcfg.MapRequest, m Machine) {
|
||||
func (h *Headscale) keepAlive(cancel chan []byte, pollData chan []byte, mKey wgkey.Key, req tailcfg.MapRequest, m Machine) {
|
||||
for {
|
||||
select {
|
||||
case <-cancel:
|
||||
return
|
||||
|
||||
default:
|
||||
h.pollMu.Lock()
|
||||
data, err := h.getMapKeepAliveResponse(mKey, req, m)
|
||||
if err != nil {
|
||||
log.Printf("Error generating the keep alive msg: %s", err)
|
||||
log.Error().
|
||||
Str("func", "keepAlive").
|
||||
Err(err).
|
||||
Msg("Error generating the keep alive msg")
|
||||
return
|
||||
}
|
||||
log.Printf("[%s] Sending keepalive", m.Name)
|
||||
|
||||
log.Debug().
|
||||
Str("func", "keepAlive").
|
||||
Str("machine", m.Name).
|
||||
Msg("Sending keepalive")
|
||||
pollData <- *data
|
||||
h.pollMu.Unlock()
|
||||
|
||||
time.Sleep(60 * time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) getMapResponse(mKey wgcfg.Key, req tailcfg.MapRequest, m Machine) (*[]byte, error) {
|
||||
func (h *Headscale) getMapResponse(mKey wgkey.Key, req tailcfg.MapRequest, m Machine) (*[]byte, error) {
|
||||
log.Trace().
|
||||
Str("func", "getMapResponse").
|
||||
Str("machine", req.Hostinfo.Hostname).
|
||||
Msg("Creating Map response")
|
||||
node, err := m.toNode()
|
||||
if err != nil {
|
||||
log.Printf("Cannot convert to node: %s", err)
|
||||
log.Error().
|
||||
Str("func", "getMapResponse").
|
||||
Err(err).
|
||||
Msg("Cannot convert to node")
|
||||
return nil, err
|
||||
}
|
||||
peers, err := h.getPeers(m)
|
||||
if err != nil {
|
||||
log.Printf("Cannot fetch peers: %s", err)
|
||||
log.Error().
|
||||
Str("func", "getMapResponse").
|
||||
Err(err).
|
||||
Msg("Cannot fetch peers")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
profile := tailcfg.UserProfile{
|
||||
ID: tailcfg.UserID(m.NamespaceID),
|
||||
LoginName: m.Namespace.Name,
|
||||
DisplayName: m.Namespace.Name,
|
||||
}
|
||||
|
||||
resp := tailcfg.MapResponse{
|
||||
KeepAlive: false,
|
||||
Node: node,
|
||||
Peers: *peers,
|
||||
DNS: []netaddr.IP{},
|
||||
SearchPaths: []string{},
|
||||
Domain: "foobar@example.com",
|
||||
PacketFilter: tailcfg.FilterAllowAll,
|
||||
Domain: "headscale.net",
|
||||
PacketFilter: *h.aclRules,
|
||||
DERPMap: h.cfg.DerpMap,
|
||||
UserProfiles: []tailcfg.UserProfile{},
|
||||
Roles: []tailcfg.Role{},
|
||||
UserProfiles: []tailcfg.UserProfile{profile},
|
||||
}
|
||||
|
||||
var respBody []byte
|
||||
@@ -403,7 +538,7 @@ func (h *Headscale) getMapResponse(mKey wgcfg.Key, req tailcfg.MapRequest, m Mac
|
||||
return &data, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) getMapKeepAliveResponse(mKey wgcfg.Key, req tailcfg.MapRequest, m Machine) (*[]byte, error) {
|
||||
func (h *Headscale) getMapKeepAliveResponse(mKey wgkey.Key, req tailcfg.MapRequest, m Machine) (*[]byte, error) {
|
||||
resp := tailcfg.MapResponse{
|
||||
KeepAlive: true,
|
||||
}
|
||||
@@ -429,31 +564,55 @@ func (h *Headscale) getMapKeepAliveResponse(mKey wgcfg.Key, req tailcfg.MapReque
|
||||
return &data, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) handleAuthKey(c *gin.Context, db *gorm.DB, idKey wgcfg.Key, req tailcfg.RegisterRequest, m Machine) {
|
||||
func (h *Headscale) handleAuthKey(c *gin.Context, db *gorm.DB, idKey wgkey.Key, req tailcfg.RegisterRequest, m Machine) {
|
||||
log.Debug().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", req.Hostinfo.Hostname).
|
||||
Msgf("Processing auth key for %s", req.Hostinfo.Hostname)
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
pak, err := h.checkKeyValidity(req.Auth.AuthKey)
|
||||
if err != nil {
|
||||
resp.MachineAuthorized = false
|
||||
respBody, err := encode(resp, &idKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Printf("Cannot encode message: %s", err)
|
||||
log.Error().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", m.Name).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
c.String(http.StatusInternalServerError, "")
|
||||
return
|
||||
}
|
||||
c.Data(200, "application/json; charset=utf-8", respBody)
|
||||
log.Printf("[%s] Failed authentication via AuthKey", m.Name)
|
||||
log.Error().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", m.Name).
|
||||
Msg("Failed authentication via AuthKey")
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", m.Name).
|
||||
Msg("Authentication key was valid, proceeding to acquire an IP address")
|
||||
ip, err := h.getAvailableIP()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
log.Error().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", m.Name).
|
||||
Msg("Failed to find an available IP")
|
||||
return
|
||||
}
|
||||
log.Info().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", m.Name).
|
||||
Str("ip", ip.String()).
|
||||
Msgf("Assining %s to %s", ip, m.Name)
|
||||
|
||||
m.AuthKeyID = uint(pak.ID)
|
||||
m.IPAddress = ip.String()
|
||||
m.NamespaceID = pak.NamespaceID
|
||||
m.NodeKey = wgcfg.Key(req.NodeKey).HexString() // we update it just in case
|
||||
m.NodeKey = wgkey.Key(req.NodeKey).HexString() // we update it just in case
|
||||
m.Registered = true
|
||||
m.RegisterMethod = "authKey"
|
||||
db.Save(&m)
|
||||
@@ -462,10 +621,18 @@ func (h *Headscale) handleAuthKey(c *gin.Context, db *gorm.DB, idKey wgcfg.Key,
|
||||
resp.User = *pak.Namespace.toUser()
|
||||
respBody, err := encode(resp, &idKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Printf("Cannot encode message: %s", err)
|
||||
log.Error().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", m.Name).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
c.String(http.StatusInternalServerError, "Extremely sad!")
|
||||
return
|
||||
}
|
||||
c.Data(200, "application/json; charset=utf-8", respBody)
|
||||
log.Printf("[%s] Successfully authenticated via AuthKey", m.Name)
|
||||
log.Info().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", m.Name).
|
||||
Str("ip", ip.String()).
|
||||
Msg("Successfully authenticated via AuthKey")
|
||||
}
|
||||
|
78
app.go
78
app.go
@@ -3,17 +3,20 @@ package headscale
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"golang.org/x/crypto/acme/autocert"
|
||||
"gorm.io/gorm"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/wgengine/wgcfg"
|
||||
"tailscale.com/types/wgkey"
|
||||
)
|
||||
|
||||
// Config contains the initial Headscale configuration
|
||||
@@ -23,6 +26,7 @@ type Config struct {
|
||||
PrivateKeyPath string
|
||||
DerpMap *tailcfg.DERPMap
|
||||
EphemeralNodeInactivityTimeout time.Duration
|
||||
IPPrefix netaddr.IPPrefix
|
||||
|
||||
DBtype string
|
||||
DBpath string
|
||||
@@ -32,6 +36,7 @@ type Config struct {
|
||||
DBuser string
|
||||
DBpass string
|
||||
|
||||
TLSLetsEncryptListen string
|
||||
TLSLetsEncryptHostname string
|
||||
TLSLetsEncryptCacheDir string
|
||||
TLSLetsEncryptChallengeType string
|
||||
@@ -43,14 +48,17 @@ type Config struct {
|
||||
// Headscale represents the base app of the service
|
||||
type Headscale struct {
|
||||
cfg Config
|
||||
db *gorm.DB
|
||||
dbString string
|
||||
dbType string
|
||||
dbDebug bool
|
||||
publicKey *wgcfg.Key
|
||||
privateKey *wgcfg.PrivateKey
|
||||
publicKey *wgkey.Key
|
||||
privateKey *wgkey.Private
|
||||
|
||||
pollMu sync.Mutex
|
||||
clientsPolling map[uint64]chan []byte // this is by all means a hackity hack
|
||||
aclPolicy *ACLPolicy
|
||||
aclRules *[]tailcfg.FilterRule
|
||||
|
||||
clientsPolling sync.Map
|
||||
}
|
||||
|
||||
// NewHeadscale returns the Headscale app
|
||||
@@ -59,7 +67,7 @@ func NewHeadscale(cfg Config) (*Headscale, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
privKey, err := wgcfg.ParsePrivateKey(string(content))
|
||||
privKey, err := wgkey.ParsePrivate(string(content))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -73,7 +81,7 @@ func NewHeadscale(cfg Config) (*Headscale, error) {
|
||||
case "sqlite3":
|
||||
dbString = cfg.DBpath
|
||||
default:
|
||||
return nil, errors.New("Unsupported DB")
|
||||
return nil, errors.New("unsupported DB")
|
||||
}
|
||||
|
||||
h := Headscale{
|
||||
@@ -82,12 +90,14 @@ func NewHeadscale(cfg Config) (*Headscale, error) {
|
||||
dbString: dbString,
|
||||
privateKey: privKey,
|
||||
publicKey: &pubKey,
|
||||
aclRules: &tailcfg.FilterAllowAll, // default allowall
|
||||
}
|
||||
|
||||
err = h.initDB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h.clientsPolling = make(map[uint64]chan []byte)
|
||||
|
||||
return &h, nil
|
||||
}
|
||||
|
||||
@@ -107,47 +117,58 @@ func (h *Headscale) ExpireEphemeralNodes(milliSeconds int64) {
|
||||
}
|
||||
|
||||
func (h *Headscale) expireEphemeralNodesWorker() {
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
log.Printf("Cannot open DB: %s", err)
|
||||
return
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
namespaces, err := h.ListNamespaces()
|
||||
if err != nil {
|
||||
log.Printf("Error listing namespaces: %s", err)
|
||||
log.Error().Err(err).Msg("Error listing namespaces")
|
||||
return
|
||||
}
|
||||
for _, ns := range *namespaces {
|
||||
machines, err := h.ListMachinesInNamespace(ns.Name)
|
||||
if err != nil {
|
||||
log.Printf("Error listing machines in namespace %s: %s", ns.Name, err)
|
||||
log.Error().Err(err).Str("namespace", ns.Name).Msg("Error listing machines in namespace")
|
||||
return
|
||||
}
|
||||
for _, m := range *machines {
|
||||
if m.AuthKey != nil && m.LastSeen != nil && m.AuthKey.Ephemeral && time.Now().After(m.LastSeen.Add(h.cfg.EphemeralNodeInactivityTimeout)) {
|
||||
log.Printf("[%s] Ephemeral client removed from database\n", m.Name)
|
||||
err = db.Unscoped().Delete(m).Error
|
||||
log.Info().Str("machine", m.Name).Msg("Ephemeral client removed from database")
|
||||
err = h.db.Unscoped().Delete(m).Error
|
||||
if err != nil {
|
||||
log.Printf("[%s] 🤮 Cannot delete ephemeral machine from the database: %s", m.Name, err)
|
||||
log.Error().Err(err).Str("machine", m.Name).Msg("🤮 Cannot delete ephemeral machine from the database")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WatchForKVUpdates checks the KV DB table for requests to perform tailnet upgrades
|
||||
// This is a way to communitate the CLI with the headscale server
|
||||
func (h *Headscale) watchForKVUpdates(milliSeconds int64) {
|
||||
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
|
||||
for range ticker.C {
|
||||
h.watchForKVUpdatesWorker()
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) watchForKVUpdatesWorker() {
|
||||
h.checkForNamespacesPendingUpdates()
|
||||
// more functions will come here in the future
|
||||
}
|
||||
|
||||
// Serve launches a GIN server with the Headscale API
|
||||
func (h *Headscale) Serve() error {
|
||||
r := gin.Default()
|
||||
r.GET("/health", func(c *gin.Context) { c.JSON(200, gin.H{"healthy": "ok"}) })
|
||||
r.GET("/key", h.KeyHandler)
|
||||
r.GET("/register", h.RegisterWebAPI)
|
||||
r.POST("/machine/:id/map", h.PollNetMapHandler)
|
||||
r.POST("/machine/:id", h.RegistrationHandler)
|
||||
var err error
|
||||
|
||||
go h.watchForKVUpdates(5000)
|
||||
|
||||
if h.cfg.TLSLetsEncryptHostname != "" {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "https://") {
|
||||
log.Println("WARNING: listening with TLS but ServerURL does not start with https://")
|
||||
log.Warn().Msg("Listening with TLS but ServerURL does not start with https://")
|
||||
}
|
||||
|
||||
m := autocert.Manager{
|
||||
@@ -163,27 +184,30 @@ func (h *Headscale) Serve() error {
|
||||
if h.cfg.TLSLetsEncryptChallengeType == "TLS-ALPN-01" {
|
||||
// Configuration via autocert with TLS-ALPN-01 (https://tools.ietf.org/html/rfc8737)
|
||||
// The RFC requires that the validation is done on port 443; in other words, headscale
|
||||
// must be configured to run on port 443.
|
||||
// must be reachable on port 443.
|
||||
err = s.ListenAndServeTLS("", "")
|
||||
} else if h.cfg.TLSLetsEncryptChallengeType == "HTTP-01" {
|
||||
// Configuration via autocert with HTTP-01. This requires listening on
|
||||
// port 80 for the certificate validation in addition to the headscale
|
||||
// service, which can be configured to run on any other port.
|
||||
go func() {
|
||||
log.Fatal(http.ListenAndServe(":http", m.HTTPHandler(http.HandlerFunc(h.redirect))))
|
||||
|
||||
log.Fatal().
|
||||
Err(http.ListenAndServe(h.cfg.TLSLetsEncryptListen, m.HTTPHandler(http.HandlerFunc(h.redirect)))).
|
||||
Msg("failed to set up a HTTP server")
|
||||
}()
|
||||
err = s.ListenAndServeTLS("", "")
|
||||
} else {
|
||||
return errors.New("Unknown value for TLSLetsEncryptChallengeType")
|
||||
return errors.New("unknown value for TLSLetsEncryptChallengeType")
|
||||
}
|
||||
} else if h.cfg.TLSCertPath == "" {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "http://") {
|
||||
log.Println("WARNING: listening without TLS but ServerURL does not start with http://")
|
||||
log.Warn().Msg("Listening without TLS but ServerURL does not start with http://")
|
||||
}
|
||||
err = r.Run(h.cfg.Addr)
|
||||
} else {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "https://") {
|
||||
log.Println("WARNING: listening with TLS but ServerURL does not start with https://")
|
||||
log.Warn().Msg("Listening with TLS but ServerURL does not start with https://")
|
||||
}
|
||||
err = r.RunTLS(h.cfg.Addr, h.cfg.TLSCertPath, h.cfg.TLSKeyPath)
|
||||
}
|
||||
|
12
app_test.go
12
app_test.go
@@ -5,9 +5,8 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
_ "github.com/jinzhu/gorm/dialects/sqlite" // sql driver
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) {
|
||||
@@ -38,7 +37,9 @@ func (s *Suite) ResetDB(c *check.C) {
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
cfg := Config{}
|
||||
cfg := Config{
|
||||
IPPrefix: netaddr.MustParseIPPrefix("10.27.0.0/23"),
|
||||
}
|
||||
|
||||
h = Headscale{
|
||||
cfg: cfg,
|
||||
@@ -49,4 +50,9 @@ func (s *Suite) ResetDB(c *check.C) {
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
db, err := h.openDB()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
h.db = db
|
||||
}
|
||||
|
17
cli.go
17
cli.go
@@ -2,9 +2,9 @@ package headscale
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
|
||||
"tailscale.com/wgengine/wgcfg"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/types/wgkey"
|
||||
)
|
||||
|
||||
// RegisterMachine is executed from the CLI to register a new Machine using its MachineKey
|
||||
@@ -13,18 +13,13 @@ func (h *Headscale) RegisterMachine(key string, namespace string) (*Machine, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mKey, err := wgcfg.ParseHexKey(key)
|
||||
mKey, err := wgkey.ParseHex(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
log.Printf("Cannot open DB: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
m := Machine{}
|
||||
if db.First(&m, "machine_key = ?", mKey.HexString()).RecordNotFound() {
|
||||
if result := h.db.First(&m, "machine_key = ?", mKey.HexString()); errors.Is(result.Error, gorm.ErrRecordNotFound) {
|
||||
return nil, errors.New("Machine not found")
|
||||
}
|
||||
|
||||
@@ -40,6 +35,6 @@ func (h *Headscale) RegisterMachine(key string, namespace string) (*Machine, err
|
||||
m.NamespaceID = ns.ID
|
||||
m.Registered = true
|
||||
m.RegisterMethod = "cli"
|
||||
db.Save(&m)
|
||||
h.db.Save(&m)
|
||||
return &m, nil
|
||||
}
|
||||
|
@@ -8,12 +8,6 @@ func (s *Suite) TestRegisterMachine(c *check.C) {
|
||||
n, err := h.CreateNamespace("test")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
m := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "8ce002a935f8c394e55e78fbbb410576575ff8ec5cfa2e627e4b807f1be15b0e",
|
||||
@@ -21,8 +15,9 @@ func (s *Suite) TestRegisterMachine(c *check.C) {
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
NamespaceID: n.ID,
|
||||
IPAddress: "10.0.0.1",
|
||||
}
|
||||
db.Save(&m)
|
||||
h.db.Save(&m)
|
||||
|
||||
_, err = h.GetMachine("test", "testmachine")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
@@ -8,12 +8,19 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var NamespaceCmd = &cobra.Command{
|
||||
Use: "namespace",
|
||||
func init() {
|
||||
rootCmd.AddCommand(namespaceCmd)
|
||||
namespaceCmd.AddCommand(createNamespaceCmd)
|
||||
namespaceCmd.AddCommand(listNamespacesCmd)
|
||||
namespaceCmd.AddCommand(destroyNamespaceCmd)
|
||||
}
|
||||
|
||||
var namespaceCmd = &cobra.Command{
|
||||
Use: "namespaces",
|
||||
Short: "Manage the namespaces of Headscale",
|
||||
}
|
||||
|
||||
var CreateNamespaceCmd = &cobra.Command{
|
||||
var createNamespaceCmd = &cobra.Command{
|
||||
Use: "create NAME",
|
||||
Short: "Creates a new namespace",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -41,7 +48,7 @@ var CreateNamespaceCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var DestroyNamespaceCmd = &cobra.Command{
|
||||
var destroyNamespaceCmd = &cobra.Command{
|
||||
Use: "destroy NAME",
|
||||
Short: "Destroys a namespace",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -69,7 +76,7 @@ var DestroyNamespaceCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var ListNamespacesCmd = &cobra.Command{
|
||||
var listNamespacesCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all the namespaces",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
@@ -3,12 +3,32 @@ package cli
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
survey "github.com/AlecAivazis/survey/v2"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var RegisterCmd = &cobra.Command{
|
||||
func init() {
|
||||
rootCmd.AddCommand(nodeCmd)
|
||||
nodeCmd.PersistentFlags().StringP("namespace", "n", "", "Namespace")
|
||||
err := nodeCmd.MarkPersistentFlagRequired("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(listNodesCmd)
|
||||
nodeCmd.AddCommand(registerNodeCmd)
|
||||
nodeCmd.AddCommand(deleteNodeCmd)
|
||||
}
|
||||
|
||||
var nodeCmd = &cobra.Command{
|
||||
Use: "nodes",
|
||||
Short: "Manage the nodes of Headscale",
|
||||
}
|
||||
|
||||
var registerNodeCmd = &cobra.Command{
|
||||
Use: "register machineID",
|
||||
Short: "Registers a machine to your network",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -41,7 +61,7 @@ var RegisterCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var ListNodesCmd = &cobra.Command{
|
||||
var listNodesCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List the nodes in a given namespace",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
@@ -65,19 +85,62 @@ var ListNodesCmd = &cobra.Command{
|
||||
log.Fatalf("Error getting nodes: %s", err)
|
||||
}
|
||||
|
||||
fmt.Printf("name\t\tlast seen\t\tephemeral\n")
|
||||
fmt.Printf("ID\tname\t\tlast seen\t\tephemeral\n")
|
||||
for _, m := range *machines {
|
||||
var ephemeral bool
|
||||
if m.AuthKey != nil && m.AuthKey.Ephemeral {
|
||||
ephemeral = true
|
||||
}
|
||||
fmt.Printf("%s\t%s\t%t\n", m.Name, m.LastSeen.Format("2006-01-02 15:04:05"), ephemeral)
|
||||
var lastSeen time.Time
|
||||
if m.LastSeen != nil {
|
||||
lastSeen = *m.LastSeen
|
||||
}
|
||||
fmt.Printf("%d\t%s\t%s\t%t\n", m.ID, m.Name, lastSeen.Format("2006-01-02 15:04:05"), ephemeral)
|
||||
}
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
var NodeCmd = &cobra.Command{
|
||||
Use: "node",
|
||||
Short: "Manage the nodes of Headscale",
|
||||
var deleteNodeCmd = &cobra.Command{
|
||||
Use: "delete ID",
|
||||
Short: "Delete a node",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("Missing parameters")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
h, err := getHeadscaleApp()
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
id, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Error converting ID to integer: %s", err)
|
||||
}
|
||||
m, err := h.GetMachineByID(uint64(id))
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting node: %s", err)
|
||||
}
|
||||
|
||||
confirm := false
|
||||
prompt := &survey.Confirm{
|
||||
Message: fmt.Sprintf("Do you want to remove the node %s?", m.Name),
|
||||
}
|
||||
err = survey.AskOne(prompt, &confirm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if confirm {
|
||||
err = h.DeleteMachine(m)
|
||||
if err != nil {
|
||||
log.Fatalf("Error deleting node: %s", err)
|
||||
}
|
||||
fmt.Printf("Node deleted\n")
|
||||
} else {
|
||||
fmt.Printf("Node not deleted\n")
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@@ -10,12 +10,27 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var PreauthkeysCmd = &cobra.Command{
|
||||
Use: "preauthkey",
|
||||
func init() {
|
||||
rootCmd.AddCommand(preauthkeysCmd)
|
||||
preauthkeysCmd.PersistentFlags().StringP("namespace", "n", "", "Namespace")
|
||||
err := preauthkeysCmd.MarkPersistentFlagRequired("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
preauthkeysCmd.AddCommand(listPreAuthKeys)
|
||||
preauthkeysCmd.AddCommand(createPreAuthKeyCmd)
|
||||
preauthkeysCmd.AddCommand(expirePreAuthKeyCmd)
|
||||
createPreAuthKeyCmd.PersistentFlags().Bool("reusable", false, "Make the preauthkey reusable")
|
||||
createPreAuthKeyCmd.PersistentFlags().Bool("ephemeral", false, "Preauthkey for ephemeral nodes")
|
||||
createPreAuthKeyCmd.Flags().StringP("expiration", "e", "", "Human-readable expiration of the key (30m, 24h, 365d...)")
|
||||
}
|
||||
|
||||
var preauthkeysCmd = &cobra.Command{
|
||||
Use: "preauthkeys",
|
||||
Short: "Handle the preauthkeys in Headscale",
|
||||
}
|
||||
|
||||
var ListPreAuthKeys = &cobra.Command{
|
||||
var listPreAuthKeys = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List the preauthkeys for this namespace",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
@@ -44,11 +59,19 @@ var ListPreAuthKeys = &cobra.Command{
|
||||
if k.Expiration != nil {
|
||||
expiration = k.Expiration.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
|
||||
var reusable string
|
||||
if k.Ephemeral {
|
||||
reusable = "N/A"
|
||||
} else {
|
||||
reusable = fmt.Sprintf("%v", k.Reusable)
|
||||
}
|
||||
|
||||
fmt.Printf(
|
||||
"key: %s, namespace: %s, reusable: %v, ephemeral: %v, expiration: %s, created_at: %s\n",
|
||||
"key: %s, namespace: %s, reusable: %s, ephemeral: %v, expiration: %s, created_at: %s\n",
|
||||
k.Key,
|
||||
k.Namespace.Name,
|
||||
k.Reusable,
|
||||
reusable,
|
||||
k.Ephemeral,
|
||||
expiration,
|
||||
k.CreatedAt.Format("2006-01-02 15:04:05"),
|
||||
@@ -57,7 +80,7 @@ var ListPreAuthKeys = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var CreatePreAuthKeyCmd = &cobra.Command{
|
||||
var createPreAuthKeyCmd = &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Creates a new preauthkey in the specified namespace",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
@@ -94,6 +117,45 @@ var CreatePreAuthKeyCmd = &cobra.Command{
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("Key: %s\n", k.Key)
|
||||
fmt.Printf("%s\n", k.Key)
|
||||
},
|
||||
}
|
||||
|
||||
var expirePreAuthKeyCmd = &cobra.Command{
|
||||
Use: "expire",
|
||||
Short: "Expire a preauthkey",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("missing parameters")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
n, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
}
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
|
||||
h, err := getHeadscaleApp()
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
|
||||
k, err := h.GetPreAuthKey(n, args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting the key: %s", err)
|
||||
}
|
||||
|
||||
err = h.MarkExpirePreAuthKey(k)
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(k, err, o)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Println("Expired")
|
||||
},
|
||||
}
|
||||
|
28
cmd/headscale/cli/root.go
Normal file
28
cmd/headscale/cli/root.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.PersistentFlags().StringP("output", "o", "", "Output format. Empty for human-readable, 'json' or 'json-line'")
|
||||
}
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "headscale",
|
||||
Short: "headscale - a Tailscale control server",
|
||||
Long: `
|
||||
headscale is an open source implementation of the Tailscale control server
|
||||
|
||||
https://github.com/juanfont/headscale`,
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
@@ -8,12 +8,23 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var RoutesCmd = &cobra.Command{
|
||||
func init() {
|
||||
rootCmd.AddCommand(routesCmd)
|
||||
routesCmd.PersistentFlags().StringP("namespace", "n", "", "Namespace")
|
||||
err := routesCmd.MarkPersistentFlagRequired("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
routesCmd.AddCommand(listRoutesCmd)
|
||||
routesCmd.AddCommand(enableRouteCmd)
|
||||
}
|
||||
|
||||
var routesCmd = &cobra.Command{
|
||||
Use: "routes",
|
||||
Short: "Manage the routes of Headscale",
|
||||
}
|
||||
|
||||
var ListRoutesCmd = &cobra.Command{
|
||||
var listRoutesCmd = &cobra.Command{
|
||||
Use: "list NODE",
|
||||
Short: "List the routes exposed by this node",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -49,7 +60,7 @@ var ListRoutesCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var EnableRouteCmd = &cobra.Command{
|
||||
var enableRouteCmd = &cobra.Command{
|
||||
Use: "enable node-name route",
|
||||
Short: "Allows exposing a route declared by this node to the rest of the nodes",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
|
@@ -6,7 +6,11 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var ServeCmd = &cobra.Command{
|
||||
func init() {
|
||||
rootCmd.AddCommand(serveCmd)
|
||||
}
|
||||
|
||||
var serveCmd = &cobra.Command{
|
||||
Use: "serve",
|
||||
Short: "Launches the headscale server",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
|
@@ -5,15 +5,16 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/yaml.v2"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
@@ -36,6 +37,10 @@ func LoadConfig(path string) error {
|
||||
viper.SetDefault("tls_letsencrypt_cache_dir", "/var/www/.cache")
|
||||
viper.SetDefault("tls_letsencrypt_challenge_type", "HTTP-01")
|
||||
|
||||
viper.SetDefault("ip_prefix", "100.64.0.0/10")
|
||||
|
||||
viper.SetDefault("log_level", "debug")
|
||||
|
||||
err := viper.ReadInConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Fatal error reading config file: %s \n", err)
|
||||
@@ -48,7 +53,9 @@ func LoadConfig(path string) error {
|
||||
}
|
||||
|
||||
if (viper.GetString("tls_letsencrypt_hostname") != "") && (viper.GetString("tls_letsencrypt_challenge_type") == "TLS-ALPN-01") && (!strings.HasSuffix(viper.GetString("listen_addr"), ":443")) {
|
||||
errorText += "Fatal config error: when using tls_letsencrypt_hostname with TLS-ALPN-01 as challenge type, listen_addr must end in :443\n"
|
||||
// this is only a warning because there could be something sitting in front of headscale that redirects the traffic (e.g. an iptables rule)
|
||||
log.Warn().
|
||||
Msg("Warning: when using tls_letsencrypt_hostname with TLS-ALPN-01 as challenge type, headscale must be reachable on port 443, i.e. listen_addr should probably end in :443")
|
||||
}
|
||||
|
||||
if (viper.GetString("tls_letsencrypt_challenge_type") != "HTTP-01") && (viper.GetString("tls_letsencrypt_challenge_type") != "TLS-ALPN-01") {
|
||||
@@ -78,9 +85,13 @@ func absPath(path string) string {
|
||||
}
|
||||
|
||||
func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
derpMap, err := loadDerpMap(absPath(viper.GetString("derp_map_path")))
|
||||
derpPath := absPath(viper.GetString("derp_map_path"))
|
||||
derpMap, err := loadDerpMap(derpPath)
|
||||
if err != nil {
|
||||
log.Printf("Could not load DERP servers map file: %s", err)
|
||||
log.Error().
|
||||
Str("path", derpPath).
|
||||
Err(err).
|
||||
Msg("Could not load DERP servers map file")
|
||||
}
|
||||
|
||||
// Minimum inactivity time out is keepalive timeout (60s) plus a few seconds
|
||||
@@ -96,6 +107,7 @@ func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
Addr: viper.GetString("listen_addr"),
|
||||
PrivateKeyPath: absPath(viper.GetString("private_key_path")),
|
||||
DerpMap: derpMap,
|
||||
IPPrefix: netaddr.MustParseIPPrefix(viper.GetString("ip_prefix")),
|
||||
|
||||
EphemeralNodeInactivityTimeout: viper.GetDuration("ephemeral_node_inactivity_timeout"),
|
||||
|
||||
@@ -108,6 +120,7 @@ func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
DBpass: viper.GetString("db_pass"),
|
||||
|
||||
TLSLetsEncryptHostname: viper.GetString("tls_letsencrypt_hostname"),
|
||||
TLSLetsEncryptListen: viper.GetString("tls_letsencrypt_listen"),
|
||||
TLSLetsEncryptCacheDir: absPath(viper.GetString("tls_letsencrypt_cache_dir")),
|
||||
TLSLetsEncryptChallengeType: viper.GetString("tls_letsencrypt_challenge_type"),
|
||||
|
||||
@@ -119,6 +132,20 @@ func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We are doing this here, as in the future could be cool to have it also hot-reload
|
||||
|
||||
if viper.GetString("acl_policy_path") != "" {
|
||||
aclPath := absPath(viper.GetString("acl_policy_path"))
|
||||
err = h.LoadACLPolicy(aclPath)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("path", aclPath).
|
||||
Err(err).
|
||||
Msg("Could not load the ACL policy")
|
||||
}
|
||||
}
|
||||
|
||||
return h, nil
|
||||
}
|
||||
|
||||
@@ -145,24 +172,24 @@ func JsonOutput(result interface{}, errResult error, outputFormat string) {
|
||||
if errResult != nil {
|
||||
j, err = json.MarshalIndent(ErrorOutput{errResult.Error()}, "", "\t")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
} else {
|
||||
j, err = json.MarshalIndent(result, "", "\t")
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
}
|
||||
case "json-line":
|
||||
if errResult != nil {
|
||||
j, err = json.Marshal(ErrorOutput{errResult.Error()})
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
} else {
|
||||
j, err = json.Marshal(result)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
27
cmd/headscale/cli/version.go
Normal file
27
cmd/headscale/cli/version.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/spf13/cobra"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var version = "dev"
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(versionCmd)
|
||||
}
|
||||
|
||||
var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print the version.",
|
||||
Long: "The version of headscale.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(map[string]string{"version": version}, nil, o)
|
||||
return
|
||||
}
|
||||
fmt.Println(version)
|
||||
},
|
||||
}
|
@@ -1,93 +1,65 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/efekarakus/termcolor"
|
||||
"github.com/juanfont/headscale/cmd/headscale/cli"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var version = "dev"
|
||||
|
||||
var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print the version.",
|
||||
Long: "The version of headscale.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
if strings.HasPrefix(o, "json") {
|
||||
cli.JsonOutput(map[string]string{"version": version}, nil, o)
|
||||
return
|
||||
}
|
||||
fmt.Println(version)
|
||||
},
|
||||
}
|
||||
|
||||
var headscaleCmd = &cobra.Command{
|
||||
Use: "headscale",
|
||||
Short: "headscale - a Tailscale control server",
|
||||
Long: `
|
||||
headscale is an open source implementation of the Tailscale control server
|
||||
|
||||
Juan Font Alonso <juanfontalonso@gmail.com> - 2021
|
||||
https://gitlab.com/juanfont/headscale`,
|
||||
}
|
||||
|
||||
func main() {
|
||||
var colors bool
|
||||
switch l := termcolor.SupportLevel(os.Stderr); l {
|
||||
case termcolor.Level16M:
|
||||
colors = true
|
||||
case termcolor.Level256:
|
||||
colors = true
|
||||
case termcolor.LevelBasic:
|
||||
colors = true
|
||||
default:
|
||||
// no color, return text as is.
|
||||
log.Trace().Msg("Colors are not supported, disabling")
|
||||
colors = false
|
||||
}
|
||||
|
||||
// Adhere to no-color.org manifesto of allowing users to
|
||||
// turn off color in cli/services
|
||||
if _, noColorIsSet := os.LookupEnv("NO_COLOR"); noColorIsSet {
|
||||
log.Trace().Msg("NO_COLOR is set, disabling colors")
|
||||
colors = false
|
||||
}
|
||||
|
||||
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
|
||||
log.Logger = log.Output(zerolog.ConsoleWriter{
|
||||
Out: os.Stdout,
|
||||
TimeFormat: time.RFC3339,
|
||||
NoColor: !colors,
|
||||
})
|
||||
|
||||
err := cli.LoadConfig("")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
|
||||
headscaleCmd.AddCommand(cli.NamespaceCmd)
|
||||
headscaleCmd.AddCommand(cli.NodeCmd)
|
||||
headscaleCmd.AddCommand(cli.PreauthkeysCmd)
|
||||
headscaleCmd.AddCommand(cli.RoutesCmd)
|
||||
headscaleCmd.AddCommand(cli.ServeCmd)
|
||||
headscaleCmd.AddCommand(versionCmd)
|
||||
|
||||
cli.NodeCmd.PersistentFlags().StringP("namespace", "n", "", "Namespace")
|
||||
err = cli.NodeCmd.MarkPersistentFlagRequired("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
logLevel := viper.GetString("log_level")
|
||||
switch logLevel {
|
||||
case "trace":
|
||||
zerolog.SetGlobalLevel(zerolog.TraceLevel)
|
||||
case "debug":
|
||||
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
||||
case "info":
|
||||
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
||||
case "warn":
|
||||
zerolog.SetGlobalLevel(zerolog.WarnLevel)
|
||||
case "error":
|
||||
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
|
||||
default:
|
||||
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
||||
}
|
||||
|
||||
cli.PreauthkeysCmd.PersistentFlags().StringP("namespace", "n", "", "Namespace")
|
||||
err = cli.PreauthkeysCmd.MarkPersistentFlagRequired("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
cli.RoutesCmd.PersistentFlags().StringP("namespace", "n", "", "Namespace")
|
||||
err = cli.RoutesCmd.MarkPersistentFlagRequired("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
cli.NamespaceCmd.AddCommand(cli.CreateNamespaceCmd)
|
||||
cli.NamespaceCmd.AddCommand(cli.ListNamespacesCmd)
|
||||
cli.NamespaceCmd.AddCommand(cli.DestroyNamespaceCmd)
|
||||
|
||||
cli.NodeCmd.AddCommand(cli.ListNodesCmd)
|
||||
cli.NodeCmd.AddCommand(cli.RegisterCmd)
|
||||
|
||||
cli.RoutesCmd.AddCommand(cli.ListRoutesCmd)
|
||||
cli.RoutesCmd.AddCommand(cli.EnableRouteCmd)
|
||||
|
||||
cli.PreauthkeysCmd.AddCommand(cli.ListPreAuthKeys)
|
||||
cli.PreauthkeysCmd.AddCommand(cli.CreatePreAuthKeyCmd)
|
||||
|
||||
cli.CreatePreAuthKeyCmd.PersistentFlags().Bool("reusable", false, "Make the preauthkey reusable")
|
||||
cli.CreatePreAuthKeyCmd.PersistentFlags().Bool("ephemeral", false, "Preauthkey for ephemeral nodes")
|
||||
cli.CreatePreAuthKeyCmd.Flags().StringP("expiration", "e", "", "Human-readable expiration of the key (30m, 24h, 365d...)")
|
||||
|
||||
headscaleCmd.PersistentFlags().StringP("output", "o", "", "Output format. Empty for human-readable, 'json' or 'json-line'")
|
||||
|
||||
if err := headscaleCmd.Execute(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
cli.Execute()
|
||||
}
|
||||
|
@@ -51,12 +51,13 @@ func (*Suite) TestPostgresConfigLoading(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Test that config file was interpreted correctly
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8000")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8000")
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8080")
|
||||
c.Assert(viper.GetString("derp_map_path"), check.Equals, "derp.yaml")
|
||||
c.Assert(viper.GetString("db_type"), check.Equals, "postgres")
|
||||
c.Assert(viper.GetString("db_port"), check.Equals, "5432")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
}
|
||||
|
||||
@@ -83,12 +84,13 @@ func (*Suite) TestSqliteConfigLoading(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Test that config file was interpreted correctly
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8000")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8000")
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8080")
|
||||
c.Assert(viper.GetString("derp_map_path"), check.Equals, "derp.yaml")
|
||||
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite3")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "db.sqlite")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
}
|
||||
|
||||
@@ -123,9 +125,8 @@ func (*Suite) TestTLSConfigValidation(c *check.C) {
|
||||
fmt.Println(tmp)
|
||||
|
||||
// Check configuration validation errors (2)
|
||||
configYaml = []byte("---\nserver_url: \"http://127.0.0.1:8000\"\ntls_letsencrypt_hostname: \"example.com\"\ntls_letsencrypt_challenge_type: \"TLS-ALPN-01\"")
|
||||
configYaml = []byte("---\nserver_url: \"http://127.0.0.1:8080\"\ntls_letsencrypt_hostname: \"example.com\"\ntls_letsencrypt_challenge_type: \"TLS-ALPN-01\"")
|
||||
writeConfig(c, tmpDir, configYaml)
|
||||
err = cli.LoadConfig(tmpDir)
|
||||
c.Assert(err, check.NotNil)
|
||||
c.Assert(err, check.ErrorMatches, "Fatal config error: when using tls_letsencrypt_hostname with TLS-ALPN-01 as challenge type, listen_addr must end in :443.*")
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"server_url": "http://127.0.0.1:8000",
|
||||
"listen_addr": "0.0.0.0:8000",
|
||||
"server_url": "http://127.0.0.1:8080",
|
||||
"listen_addr": "0.0.0.0:8080",
|
||||
"private_key_path": "private.key",
|
||||
"derp_map_path": "derp.yaml",
|
||||
"ephemeral_node_inactivity_timeout": "30m",
|
||||
@@ -11,8 +11,10 @@
|
||||
"db_user": "foo",
|
||||
"db_pass": "bar",
|
||||
"tls_letsencrypt_hostname": "",
|
||||
"tls_letsencrypt_listen": ":http",
|
||||
"tls_letsencrypt_cache_dir": ".cache",
|
||||
"tls_letsencrypt_challenge_type": "HTTP-01",
|
||||
"tls_cert_path": "",
|
||||
"tls_key_path": ""
|
||||
"tls_key_path": "",
|
||||
"acl_policy_path": ""
|
||||
}
|
||||
|
@@ -1,14 +1,16 @@
|
||||
{
|
||||
"server_url": "http://127.0.0.1:8000",
|
||||
"listen_addr": "0.0.0.0:8000",
|
||||
"server_url": "http://127.0.0.1:8080",
|
||||
"listen_addr": "0.0.0.0:8080",
|
||||
"private_key_path": "private.key",
|
||||
"derp_map_path": "derp.yaml",
|
||||
"ephemeral_node_inactivity_timeout": "30m",
|
||||
"db_type": "sqlite3",
|
||||
"db_path": "db.sqlite",
|
||||
"tls_letsencrypt_hostname": "",
|
||||
"tls_letsencrypt_listen": ":http",
|
||||
"tls_letsencrypt_cache_dir": ".cache",
|
||||
"tls_letsencrypt_challenge_type": "HTTP-01",
|
||||
"tls_cert_path": "",
|
||||
"tls_key_path": ""
|
||||
"tls_key_path": "",
|
||||
"acl_policy_path": ""
|
||||
}
|
||||
|
83
db.go
83
db.go
@@ -3,9 +3,10 @@ package headscale
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/jinzhu/gorm"
|
||||
_ "github.com/jinzhu/gorm/dialects/postgres" // sql driver
|
||||
_ "github.com/jinzhu/gorm/dialects/sqlite" // sql driver
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/driver/sqlite"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
)
|
||||
|
||||
const dbVersion = "1"
|
||||
@@ -17,63 +18,89 @@ type KV struct {
|
||||
}
|
||||
|
||||
func (h *Headscale) initDB() error {
|
||||
db, err := gorm.Open(h.dbType, h.dbString)
|
||||
db, err := h.openDB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.db = db
|
||||
|
||||
if h.dbType == "postgres" {
|
||||
db.Exec("create extension if not exists \"uuid-ossp\";")
|
||||
}
|
||||
db.AutoMigrate(&Machine{})
|
||||
db.AutoMigrate(&KV{})
|
||||
db.AutoMigrate(&Namespace{})
|
||||
db.AutoMigrate(&PreAuthKey{})
|
||||
db.Close()
|
||||
err = db.AutoMigrate(&Machine{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = db.AutoMigrate(&KV{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = db.AutoMigrate(&Namespace{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = db.AutoMigrate(&PreAuthKey{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = h.setValue("db_version", dbVersion)
|
||||
return err
|
||||
}
|
||||
|
||||
func (h *Headscale) db() (*gorm.DB, error) {
|
||||
db, err := gorm.Open(h.dbType, h.dbString)
|
||||
func (h *Headscale) openDB() (*gorm.DB, error) {
|
||||
var db *gorm.DB
|
||||
var err error
|
||||
|
||||
var log logger.Interface
|
||||
if h.dbDebug {
|
||||
log = logger.Default
|
||||
} else {
|
||||
log = logger.Default.LogMode(logger.Silent)
|
||||
}
|
||||
|
||||
switch h.dbType {
|
||||
case "sqlite3":
|
||||
db, err = gorm.Open(sqlite.Open(h.dbString), &gorm.Config{
|
||||
DisableForeignKeyConstraintWhenMigrating: true,
|
||||
Logger: log,
|
||||
})
|
||||
case "postgres":
|
||||
db, err = gorm.Open(postgres.Open(h.dbString), &gorm.Config{
|
||||
DisableForeignKeyConstraintWhenMigrating: true,
|
||||
Logger: log,
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if h.dbDebug {
|
||||
db.LogMode(true)
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// getValue returns the value for the given key in KV
|
||||
func (h *Headscale) getValue(key string) (string, error) {
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer db.Close()
|
||||
var row KV
|
||||
if db.First(&row, "key = ?", key).RecordNotFound() {
|
||||
if result := h.db.First(&row, "key = ?", key); errors.Is(result.Error, gorm.ErrRecordNotFound) {
|
||||
return "", errors.New("not found")
|
||||
}
|
||||
return row.Value, nil
|
||||
}
|
||||
|
||||
// setValue sets value for the given key in KV
|
||||
func (h *Headscale) setValue(key string, value string) error {
|
||||
kv := KV{
|
||||
Key: key,
|
||||
Value: value,
|
||||
}
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
_, err = h.getValue(key)
|
||||
|
||||
_, err := h.getValue(key)
|
||||
if err == nil {
|
||||
db.Model(&kv).Where("key = ?", key).Update("value", value)
|
||||
h.db.Model(&kv).Where("key = ?", key).Update("value", value)
|
||||
return nil
|
||||
}
|
||||
|
||||
db.Create(kv)
|
||||
h.db.Create(kv)
|
||||
return nil
|
||||
}
|
||||
|
93
derp.yaml
93
derp.yaml
@@ -1,5 +1,5 @@
|
||||
# This file contains some of the official Tailscale DERP servers,
|
||||
# shamelessly taken from https://github.com/tailscale/tailscale/blob/main/derp/derpmap/derpmap.go
|
||||
# shamelessly taken from https://github.com/tailscale/tailscale/blob/main/net/dnsfallback/dns-fallback-servers.json
|
||||
#
|
||||
# If you plan to somehow use headscale, please deploy your own DERP infra
|
||||
regions:
|
||||
@@ -16,6 +16,14 @@ regions:
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
- name: 1b
|
||||
regionid: 1
|
||||
hostname: derp1b.tailscale.com
|
||||
ipv4: 45.55.35.93
|
||||
ipv6: "2604:a880:800:a1::f:2001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
2:
|
||||
regionid: 2
|
||||
regioncode: sfo
|
||||
@@ -29,6 +37,14 @@ regions:
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
- name: 2b
|
||||
regionid: 2
|
||||
hostname: derp2b.tailscale.com
|
||||
ipv4: 64.227.106.23
|
||||
ipv6: "2604:a880:4:1d0::29:9000"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
3:
|
||||
regionid: 3
|
||||
regioncode: sin
|
||||
@@ -54,4 +70,77 @@ regions:
|
||||
ipv6: "2a03:b0c0:3:e0::36e:900"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
derptestport: 0
|
||||
- name: 4b
|
||||
regionid: 4
|
||||
hostname: derp4b.tailscale.com
|
||||
ipv4: 157.230.25.0
|
||||
ipv6: "2a03:b0c0:3:e0::58f:3001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
5:
|
||||
regionid: 5
|
||||
regioncode: syd
|
||||
regionname: Sydney
|
||||
nodes:
|
||||
- name: 5a
|
||||
regionid: 5
|
||||
hostname: derp5.tailscale.com
|
||||
ipv4: 103.43.75.49
|
||||
ipv6: "2001:19f0:5801:10b7:5400:2ff:feaa:284c"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
6:
|
||||
regionid: 6
|
||||
regioncode: blr
|
||||
regionname: Bangalore
|
||||
nodes:
|
||||
- name: 6a
|
||||
regionid: 6
|
||||
hostname: derp6.tailscale.com
|
||||
ipv4: 68.183.90.120
|
||||
ipv6: "2400:6180:100:d0::982:d001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
7:
|
||||
regionid: 7
|
||||
regioncode: tok
|
||||
regionname: Tokyo
|
||||
nodes:
|
||||
- name: 7a
|
||||
regionid: 7
|
||||
hostname: derp7.tailscale.com
|
||||
ipv4: 167.179.89.145
|
||||
ipv6: "2401:c080:1000:467f:5400:2ff:feee:22aa"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
8:
|
||||
regionid: 8
|
||||
regioncode: lhr
|
||||
regionname: London
|
||||
nodes:
|
||||
- name: 8a
|
||||
regionid: 8
|
||||
hostname: derp8.tailscale.com
|
||||
ipv4: 167.71.139.179
|
||||
ipv6: "2a03:b0c0:1:e0::3cc:e001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
9:
|
||||
regionid: 9
|
||||
regioncode: sao
|
||||
regionname: São Paulo
|
||||
nodes:
|
||||
- name: 9a
|
||||
regionid: 9
|
||||
hostname: derp9.tailscale.com
|
||||
ipv4: 207.148.3.137
|
||||
ipv6: "2001:19f0:6401:1d9c:5400:2ff:feef:bb82"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
|
@@ -1,62 +0,0 @@
|
||||
FROM golang:alpine
|
||||
|
||||
# Set necessary environmet variables needed for our image
|
||||
ENV GO111MODULE=on \
|
||||
CGO_ENABLED=0 \
|
||||
GOOS=linux \
|
||||
GOARCH=amd64
|
||||
|
||||
|
||||
|
||||
ENV PATH /usr/lib/postgresql/$PG_MAJOR/bin:$PATH
|
||||
ENV PGDATA /var/lib/postgresql/data
|
||||
ENV POSTGRES_DB headscale
|
||||
ENV POSTGRES_USER admin
|
||||
|
||||
ENV LANG en_US.utf8
|
||||
|
||||
RUN apk update && \
|
||||
apk add git su-exec tzdata libpq postgresql-client postgresql postgresql-contrib gnupg supervisor inotify-tools wireguard-tools openssh && \
|
||||
mkdir /docker-entrypoint-initdb.d && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
RUN gpg --keyserver ipv4.pool.sks-keyservers.net --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4
|
||||
RUN gpg --list-keys --fingerprint --with-colons | sed -E -n -e 's/^fpr:::::::::([0-9A-F]+):$/\1:6:/p' | gpg --import-ownertrust
|
||||
RUN wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/1.7/gosu-amd64" && \
|
||||
wget -O /usr/local/bin/gosu.asc "https://github.com/tianon/gosu/releases/download/1.7/gosu-amd64.asc" && \
|
||||
gpg --verify /usr/local/bin/gosu.asc && \
|
||||
rm /usr/local/bin/gosu.asc && \
|
||||
chmod +x /usr/local/bin/gosu
|
||||
RUN apk --purge del gnupg ca-certificates
|
||||
|
||||
VOLUME /var/lib/postgresql/data
|
||||
|
||||
|
||||
|
||||
|
||||
RUN rm -rf /etc/ssh/ssh_host_rsa_key /etc/ssh/ssh_host_dsa_key
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
RUN git clone https://github.com/juanfont/headscale.git
|
||||
|
||||
WORKDIR /build/headscale
|
||||
|
||||
RUN go build cmd/headscale/headscale.go
|
||||
|
||||
COPY headscale.sh /headscale.sh
|
||||
COPY postgres.sh /postgres.sh
|
||||
COPY supervisord.conf /etc/supervisord.conf
|
||||
|
||||
WORKDIR /
|
||||
|
||||
RUN mkdir -p /run/postgresql
|
||||
RUN chown postgres:postgres /run/postgresql
|
||||
|
||||
RUN adduser -S headscale
|
||||
|
||||
#ENV GIN_MODE release
|
||||
|
||||
EXPOSE 8000
|
||||
|
||||
CMD ["supervisord","--nodaemon", "--configuration", "/etc/supervisord.conf"]
|
@@ -1,28 +0,0 @@
|
||||
#!/bin/bash
|
||||
cd /build/headscale
|
||||
echo 'Writing config...'
|
||||
echo '''
|
||||
{
|
||||
"server_url": "$SERVER_URL",
|
||||
"listen_addr": "0.0.0.0:8000",
|
||||
"private_key_path": "private.key",
|
||||
"public_key_path": "public.key",
|
||||
"db_host": "localhost",
|
||||
"db_port": 5432,
|
||||
"db_name": "headscale",
|
||||
"db_user": "admin",
|
||||
"db_pass": "$POSTGRES_PASSWORD"
|
||||
}
|
||||
''' > config.json
|
||||
|
||||
# Wait until PostgreSQL started and listens on port 5432.
|
||||
while [ -z "`netstat -tln | grep 5432`" ]; do
|
||||
echo 'Waiting for PostgreSQL to start ...'
|
||||
sleep 1
|
||||
done
|
||||
echo 'PostgreSQL started.'
|
||||
|
||||
# Start server.
|
||||
echo 'Starting server...'
|
||||
|
||||
./headscale
|
@@ -1,58 +0,0 @@
|
||||
#!/bin/sh
|
||||
chown -R postgres "$PGDATA"
|
||||
if [ -z "$(ls -A "$PGDATA")" ]; then
|
||||
gosu postgres initdb
|
||||
sed -ri "s/^#(listen_addresses\s*=\s*)\S+/\1'*'/" "$PGDATA"/postgresql.conf
|
||||
|
||||
: ${POSTGRES_USER:="postgres"}
|
||||
: ${POSTGRES_DB:=$POSTGRES_USER}
|
||||
|
||||
if [ "$POSTGRES_PASSWORD" ]; then
|
||||
pass="PASSWORD '$POSTGRES_PASSWORD'"
|
||||
authMethod=md5
|
||||
else
|
||||
echo "==============================="
|
||||
echo "!!! NO PASSWORD SET !!! (Use \$POSTGRES_PASSWORD env var)"
|
||||
echo "==============================="
|
||||
pass=
|
||||
authMethod=trust
|
||||
fi
|
||||
echo
|
||||
|
||||
|
||||
if [ "$POSTGRES_DB" != 'postgres' ]; then
|
||||
createSql="CREATE DATABASE $POSTGRES_DB;"
|
||||
echo $createSql | gosu postgres postgres --single -jE
|
||||
echo
|
||||
fi
|
||||
|
||||
if [ "$POSTGRES_USER" != 'postgres' ]; then
|
||||
op=CREATE
|
||||
else
|
||||
op=ALTER
|
||||
fi
|
||||
|
||||
userSql="$op USER $POSTGRES_USER WITH SUPERUSER $pass;"
|
||||
echo $userSql | gosu postgres postgres --single -jE
|
||||
echo
|
||||
|
||||
gosu postgres pg_ctl -D "$PGDATA" \
|
||||
-o "-c listen_addresses=''" \
|
||||
-w start
|
||||
|
||||
echo
|
||||
for f in /docker-entrypoint-initdb.d/*; do
|
||||
case "$f" in
|
||||
*.sh) echo "$0: running $f"; . "$f" ;;
|
||||
*.sql) echo "$0: running $f"; psql --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" < "$f" && echo ;;
|
||||
*) echo "$0: ignoring $f" ;;
|
||||
esac
|
||||
echo
|
||||
done
|
||||
|
||||
gosu postgres pg_ctl -D "$PGDATA" -m fast -w stop
|
||||
|
||||
{ echo; echo "host all all 0.0.0.0/0 $authMethod"; } >> "$PGDATA"/pg_hba.conf
|
||||
fi
|
||||
|
||||
exec gosu postgres postgres
|
@@ -1,4 +0,0 @@
|
||||
# Example of how to user the docker image
|
||||
POSTGRES_PASSWORD=
|
||||
docker build . -t headscale-docker
|
||||
docker run -p 8000:8000 -v $(pwd)/pgdata:/var/lib/postgresql/data -v "$(pwd)/private.key:/build/headscale/private.key" -v "$(pwd)/public.key:/build/headscale/public.key" -e SERVER_URL=127.0.0.1:8000 -e POSTGRES_PASSWORD=$POSTGRES_PASSWORD -ti headscale-docker
|
@@ -1,13 +0,0 @@
|
||||
[supervisord]
|
||||
nodaemon=true
|
||||
user = root
|
||||
|
||||
[program:headscale]
|
||||
command=/bin/bash -c "/headscale.sh"
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
|
||||
[program:postgres]
|
||||
command=/bin/bash -c "/postgres.sh"
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
44
go.mod
44
go.mod
@@ -3,26 +3,36 @@ module github.com/juanfont/headscale
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||
github.com/gin-gonic/gin v1.7.1
|
||||
github.com/hako/durafmt v0.0.0-20210316092057-3a2c319c1acd
|
||||
github.com/jinzhu/gorm v1.9.16
|
||||
github.com/json-iterator/go v1.1.11 // indirect
|
||||
github.com/klauspost/compress v1.12.2
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/lib/pq v1.10.1 // indirect
|
||||
github.com/AlecAivazis/survey/v2 v2.0.5
|
||||
github.com/Microsoft/go-winio v0.5.0 // indirect
|
||||
github.com/cenkalti/backoff/v3 v3.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.1 // indirect
|
||||
github.com/containerd/continuity v0.1.0 // indirect
|
||||
github.com/docker/cli v20.10.8+incompatible // indirect
|
||||
github.com/docker/docker v20.10.8+incompatible // indirect
|
||||
github.com/efekarakus/termcolor v1.0.1 // indirect
|
||||
github.com/gin-gonic/gin v1.7.2
|
||||
github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b
|
||||
github.com/klauspost/compress v1.13.1
|
||||
github.com/lib/pq v1.10.2 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.7 // indirect
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||
github.com/opencontainers/runc v1.0.1 // indirect
|
||||
github.com/ory/dockertest/v3 v3.7.0 // indirect
|
||||
github.com/rs/zerolog v1.23.0 // indirect
|
||||
github.com/spf13/cobra v1.1.3
|
||||
github.com/spf13/viper v1.7.1
|
||||
github.com/stretchr/testify v1.7.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a
|
||||
golang.org/x/text v0.3.6 // indirect
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
github.com/spf13/viper v1.8.1
|
||||
github.com/tailscale/hujson v0.0.0-20200924210142-dde312d0d6a2
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d // indirect
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gorm.io/datatypes v1.0.1
|
||||
inet.af/netaddr v0.0.0-20210511181906-37180328850c
|
||||
tailscale.com v1.6.0
|
||||
|
||||
gorm.io/driver/postgres v1.1.0
|
||||
gorm.io/driver/sqlite v1.1.4
|
||||
gorm.io/gorm v1.21.11
|
||||
inet.af/netaddr v0.0.0-20210603230628-bf05d8b52dda
|
||||
tailscale.com v1.10.0
|
||||
)
|
||||
|
246
integration_test.go
Normal file
246
integration_test.go
Normal file
@@ -0,0 +1,246 @@
|
||||
// +build integration
|
||||
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"inet.af/netaddr"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
var _ = check.Suite(&IntegrationSuite{})
|
||||
|
||||
type IntegrationSuite struct{}
|
||||
|
||||
var integrationTmpDir string
|
||||
var ih Headscale
|
||||
|
||||
var pool dockertest.Pool
|
||||
var network dockertest.Network
|
||||
var headscale dockertest.Resource
|
||||
var tailscaleCount int = 10
|
||||
var tailscales map[string]dockertest.Resource
|
||||
|
||||
func executeCommand(resource *dockertest.Resource, cmd []string) (string, error) {
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
|
||||
exitCode, err := resource.Exec(
|
||||
cmd,
|
||||
dockertest.ExecOptions{
|
||||
StdOut: &stdout,
|
||||
StdErr: &stderr,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if exitCode != 0 {
|
||||
fmt.Println("Command: ", cmd)
|
||||
fmt.Println("stdout: ", stdout.String())
|
||||
fmt.Println("stderr: ", stderr.String())
|
||||
return "", fmt.Errorf("command failed with: %s", stderr.String())
|
||||
}
|
||||
|
||||
return stdout.String(), nil
|
||||
}
|
||||
|
||||
func dockerRestartPolicy(config *docker.HostConfig) {
|
||||
// set AutoRemove to true so that stopped container goes away by itself
|
||||
config.AutoRemove = true
|
||||
config.RestartPolicy = docker.RestartPolicy{
|
||||
Name: "no",
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationSuite) SetUpSuite(c *check.C) {
|
||||
var err error
|
||||
h = Headscale{
|
||||
dbType: "sqlite3",
|
||||
dbString: "integration_test_db.sqlite3",
|
||||
}
|
||||
|
||||
if ppool, err := dockertest.NewPool(""); err == nil {
|
||||
pool = *ppool
|
||||
} else {
|
||||
log.Fatalf("Could not connect to docker: %s", err)
|
||||
}
|
||||
|
||||
if pnetwork, err := pool.CreateNetwork("headscale-test"); err == nil {
|
||||
network = *pnetwork
|
||||
} else {
|
||||
log.Fatalf("Could not create network: %s", err)
|
||||
}
|
||||
|
||||
headscaleBuildOptions := &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile",
|
||||
ContextDir: ".",
|
||||
}
|
||||
|
||||
tailscaleBuildOptions := &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile.tailscale",
|
||||
ContextDir: ".",
|
||||
}
|
||||
|
||||
currentPath, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatalf("Could not determine current path: %s", err)
|
||||
}
|
||||
|
||||
headscaleOptions := &dockertest.RunOptions{
|
||||
Name: "headscale",
|
||||
Mounts: []string{
|
||||
fmt.Sprintf("%s/integration_test/etc:/etc/headscale", currentPath),
|
||||
fmt.Sprintf("%s/derp.yaml:/etc/headscale/derp.yaml", currentPath),
|
||||
},
|
||||
Networks: []*dockertest.Network{&network},
|
||||
// Cmd: []string{"sleep", "3600"},
|
||||
Cmd: []string{"headscale", "serve"},
|
||||
PortBindings: map[docker.Port][]docker.PortBinding{
|
||||
"8080/tcp": []docker.PortBinding{{HostPort: "8080"}},
|
||||
},
|
||||
Env: []string{},
|
||||
}
|
||||
|
||||
fmt.Println("Creating headscale container")
|
||||
if pheadscale, err := pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, dockerRestartPolicy); err == nil {
|
||||
headscale = *pheadscale
|
||||
} else {
|
||||
log.Fatalf("Could not start resource: %s", err)
|
||||
}
|
||||
fmt.Println("Created headscale container")
|
||||
|
||||
fmt.Println("Creating tailscale containers")
|
||||
tailscales = make(map[string]dockertest.Resource)
|
||||
for i := 0; i < tailscaleCount; i++ {
|
||||
hostname := fmt.Sprintf("tailscale%d", i)
|
||||
tailscaleOptions := &dockertest.RunOptions{
|
||||
Name: hostname,
|
||||
Networks: []*dockertest.Network{&network},
|
||||
// Make the container run until killed
|
||||
// Cmd: []string{"sleep", "3600"},
|
||||
Cmd: []string{"tailscaled", "--tun=userspace-networking", "--socks5-server=localhost:1055"},
|
||||
Env: []string{},
|
||||
}
|
||||
|
||||
if pts, err := pool.BuildAndRunWithBuildOptions(tailscaleBuildOptions, tailscaleOptions, dockerRestartPolicy); err == nil {
|
||||
tailscales[hostname] = *pts
|
||||
} else {
|
||||
log.Fatalf("Could not start resource: %s", err)
|
||||
}
|
||||
fmt.Printf("Created %s container\n", hostname)
|
||||
}
|
||||
|
||||
fmt.Println("Waiting for headscale to be ready")
|
||||
hostEndpoint := fmt.Sprintf("localhost:%s", headscale.GetPort("8080/tcp"))
|
||||
|
||||
if err := pool.Retry(func() error {
|
||||
url := fmt.Sprintf("http://%s/health", hostEndpoint)
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("status code not OK")
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.Fatalf("Could not connect to docker: %s", err)
|
||||
}
|
||||
fmt.Println("headscale container is ready")
|
||||
|
||||
fmt.Println("Creating headscale namespace")
|
||||
result, err := executeCommand(
|
||||
&headscale,
|
||||
[]string{"headscale", "namespaces", "create", "test"},
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
fmt.Println("Creating pre auth key")
|
||||
authKey, err := executeCommand(
|
||||
&headscale,
|
||||
[]string{"headscale", "-n", "test", "preauthkeys", "create", "--reusable", "--expiration", "24h"},
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
headscaleEndpoint := fmt.Sprintf("http://headscale:%s", headscale.GetPort("8080/tcp"))
|
||||
|
||||
fmt.Printf("Joining tailscale containers to headscale at %s\n", headscaleEndpoint)
|
||||
for hostname, tailscale := range tailscales {
|
||||
command := []string{"tailscale", "up", "-login-server", headscaleEndpoint, "--authkey", strings.TrimSuffix(authKey, "\n"), "--hostname", hostname}
|
||||
|
||||
fmt.Println("Join command:", command)
|
||||
fmt.Printf("Running join command for %s\n", hostname)
|
||||
result, err = executeCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
)
|
||||
fmt.Println("tailscale result: ", result)
|
||||
c.Assert(err, check.IsNil)
|
||||
fmt.Printf("%s joined\n", hostname)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationSuite) TearDownSuite(c *check.C) {
|
||||
if err := pool.Purge(&headscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
for _, tailscale := range tailscales {
|
||||
if err := pool.Purge(&tailscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationSuite) TestListNodes(c *check.C) {
|
||||
fmt.Println("Listing nodes")
|
||||
result, err := executeCommand(
|
||||
&headscale,
|
||||
[]string{"headscale", "-n", "test", "nodes", "list"},
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
for hostname, _ := range tailscales {
|
||||
c.Assert(strings.Contains(result, hostname), check.Equals, true)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationSuite) TestGetIpAddresses(c *check.C) {
|
||||
ipPrefix := netaddr.MustParseIPPrefix("100.64.0.0/10")
|
||||
ips := make(map[string]netaddr.IP)
|
||||
for hostname, tailscale := range tailscales {
|
||||
command := []string{"tailscale", "ip"}
|
||||
|
||||
result, err := executeCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
ip, err := netaddr.ParseIP(strings.TrimSuffix(result, "\n"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
fmt.Printf("IP for %s: %s", hostname, result)
|
||||
|
||||
// c.Assert(ip.Valid(), check.IsTrue)
|
||||
c.Assert(ip.Is4(), check.Equals, true)
|
||||
c.Assert(ipPrefix.Contains(ip), check.Equals, true)
|
||||
|
||||
ips[hostname] = ip
|
||||
}
|
||||
}
|
3
integration_test/.gitignore
vendored
Normal file
3
integration_test/.gitignore
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
derp.yaml
|
||||
*.sqlite
|
||||
*.sqlite3
|
11
integration_test/etc/config.json
Normal file
11
integration_test/etc/config.json
Normal file
@@ -0,0 +1,11 @@
|
||||
{
|
||||
"server_url": "http://headscale:8080",
|
||||
"listen_addr": "0.0.0.0:8080",
|
||||
"private_key_path": "private.key",
|
||||
"derp_map_path": "derp.yaml",
|
||||
"ephemeral_node_inactivity_timeout": "30m",
|
||||
"db_type": "sqlite3",
|
||||
"db_path": "/tmp/integration_test_db.sqlite3",
|
||||
"acl_policy_path": "",
|
||||
"log_level": "trace"
|
||||
}
|
1
integration_test/etc/private.key
Normal file
1
integration_test/etc/private.key
Normal file
@@ -0,0 +1 @@
|
||||
SEmQwCu+tGywQWEUsf93TpTRUvlB7WhnCdHgWrSXjEA=
|
2
k8s/.gitignore
vendored
Normal file
2
k8s/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
/**/site
|
||||
/**/secrets
|
98
k8s/README.md
Normal file
98
k8s/README.md
Normal file
@@ -0,0 +1,98 @@
|
||||
# Deploying Headscale on Kubernetes
|
||||
|
||||
This directory contains [Kustomize](https://kustomize.io) templates that deploy
|
||||
Headscale in various configurations.
|
||||
|
||||
These templates currently support Rancher k3s. Other clusters may require
|
||||
adaptation, especially around volume claims and ingress.
|
||||
|
||||
Commands below assume this directory is your current working directory.
|
||||
|
||||
# Generate secrets and site configuration
|
||||
|
||||
Run `./init.bash` to generate keys, passwords, and site configuration files.
|
||||
|
||||
Edit `base/site/public.env`, changing `public-hostname` to the public DNS name
|
||||
that will be used for your headscale deployment.
|
||||
|
||||
Set `public-proto` to "https" if you're planning to use TLS & Let's Encrypt.
|
||||
|
||||
Configure DERP servers by editing `base/site/derp.yaml` if needed.
|
||||
|
||||
# Add the image to the registry
|
||||
|
||||
You'll somehow need to get `headscale:latest` into your cluster image registry.
|
||||
|
||||
An easy way to do this with k3s:
|
||||
- Reconfigure k3s to use docker instead of containerd (`k3s server --docker`)
|
||||
- `docker build -t headscale:latest ..` from here
|
||||
|
||||
# Create the namespace
|
||||
|
||||
If it doesn't already exist, `kubectl create ns headscale`.
|
||||
|
||||
# Deploy headscale
|
||||
|
||||
## sqlite
|
||||
|
||||
`kubectl -n headscale apply -k ./sqlite`
|
||||
|
||||
## postgres
|
||||
|
||||
`kubectl -n headscale apply -k ./postgres`
|
||||
|
||||
# TLS & Let's Encrypt
|
||||
|
||||
Test a staging certificate with your configured DNS name and Let's Encrypt.
|
||||
|
||||
`kubectl -n headscale apply -k ./staging-tls`
|
||||
|
||||
Replace with a production certificate.
|
||||
|
||||
`kubectl -n headscale apply -k ./production-tls`
|
||||
|
||||
## Static / custom TLS certificates
|
||||
|
||||
Only Let's Encrypt is supported. If you need other TLS settings, modify or patch the ingress.
|
||||
|
||||
# Administration
|
||||
|
||||
Use the wrapper script to remotely operate headscale to perform administrative
|
||||
tasks like creating namespaces, authkeys, etc.
|
||||
|
||||
```
|
||||
[c@nix-slate:~/Projects/headscale/k8s]$ ./headscale.bash
|
||||
|
||||
headscale is an open source implementation of the Tailscale control server
|
||||
|
||||
https://gitlab.com/juanfont/headscale
|
||||
|
||||
Usage:
|
||||
headscale [command]
|
||||
|
||||
Available Commands:
|
||||
help Help about any command
|
||||
namespace Manage the namespaces of Headscale
|
||||
node Manage the nodes of Headscale
|
||||
preauthkey Handle the preauthkeys in Headscale
|
||||
routes Manage the routes of Headscale
|
||||
serve Launches the headscale server
|
||||
version Print the version.
|
||||
|
||||
Flags:
|
||||
-h, --help help for headscale
|
||||
-o, --output string Output format. Empty for human-readable, 'json' or 'json-line'
|
||||
|
||||
Use "headscale [command] --help" for more information about a command.
|
||||
|
||||
```
|
||||
|
||||
# TODO / Ideas
|
||||
|
||||
- Github action to publish the docker image
|
||||
- Interpolate `email:` option to the ClusterIssuer from site configuration.
|
||||
This probably needs to be done with a transformer, kustomize vars don't seem to work.
|
||||
- Add kustomize examples for cloud-native ingress, load balancer
|
||||
- CockroachDB for the backend
|
||||
- DERP server deployment
|
||||
- Tor hidden service
|
8
k8s/base/configmap.yaml
Normal file
8
k8s/base/configmap.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: headscale-config
|
||||
data:
|
||||
server_url: $(PUBLIC_PROTO)://$(PUBLIC_HOSTNAME)
|
||||
listen_addr: "0.0.0.0:8080"
|
||||
ephemeral_node_inactivity_timeout: "30m"
|
18
k8s/base/ingress.yaml
Normal file
18
k8s/base/ingress.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: headscale
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
spec:
|
||||
rules:
|
||||
- host: $(PUBLIC_HOSTNAME)
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: headscale
|
||||
port:
|
||||
number: 8080
|
||||
path: /
|
||||
pathType: Prefix
|
42
k8s/base/kustomization.yaml
Normal file
42
k8s/base/kustomization.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
namespace: headscale
|
||||
resources:
|
||||
- configmap.yaml
|
||||
- ingress.yaml
|
||||
- service.yaml
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true
|
||||
configMapGenerator:
|
||||
- name: headscale-site
|
||||
files:
|
||||
- derp.yaml=site/derp.yaml
|
||||
envs:
|
||||
- site/public.env
|
||||
- name: headscale-etc
|
||||
literals:
|
||||
- config.json={}
|
||||
secretGenerator:
|
||||
- name: headscale
|
||||
files:
|
||||
- secrets/private-key
|
||||
vars:
|
||||
- name: PUBLIC_PROTO
|
||||
objRef:
|
||||
kind: ConfigMap
|
||||
name: headscale-site
|
||||
apiVersion: v1
|
||||
fieldRef:
|
||||
fieldPath: data.public-proto
|
||||
- name: PUBLIC_HOSTNAME
|
||||
objRef:
|
||||
kind: ConfigMap
|
||||
name: headscale-site
|
||||
apiVersion: v1
|
||||
fieldRef:
|
||||
fieldPath: data.public-hostname
|
||||
- name: CONTACT_EMAIL
|
||||
objRef:
|
||||
kind: ConfigMap
|
||||
name: headscale-site
|
||||
apiVersion: v1
|
||||
fieldRef:
|
||||
fieldPath: data.contact-email
|
13
k8s/base/service.yaml
Normal file
13
k8s/base/service.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: headscale
|
||||
labels:
|
||||
app: headscale
|
||||
spec:
|
||||
selector:
|
||||
app: headscale
|
||||
ports:
|
||||
- name: http
|
||||
targetPort: http
|
||||
port: 8080
|
3
k8s/headscale.bash
Executable file
3
k8s/headscale.bash
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/usr/bin/env bash
|
||||
set -eu
|
||||
exec kubectl -n headscale exec -ti pod/headscale-0 -- /go/bin/headscale "$@"
|
22
k8s/init.bash
Executable file
22
k8s/init.bash
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env bash
|
||||
set -eux
|
||||
cd $(dirname $0)
|
||||
|
||||
umask 022
|
||||
mkdir -p base/site/
|
||||
[ ! -e base/site/public.env ] && (
|
||||
cat >base/site/public.env <<EOF
|
||||
public-hostname=localhost
|
||||
public-proto=http
|
||||
contact-email=headscale@example.com
|
||||
EOF
|
||||
)
|
||||
[ ! -e base/site/derp.yaml ] && cp ../derp.yaml base/site/derp.yaml
|
||||
|
||||
umask 077
|
||||
mkdir -p base/secrets/
|
||||
[ ! -e base/secrets/private-key ] && (
|
||||
wg genkey > base/secrets/private-key
|
||||
)
|
||||
mkdir -p postgres/secrets/
|
||||
[ ! -e postgres/secrets/password ] && (head -c 32 /dev/urandom | base64 -w0 > postgres/secrets/password)
|
3
k8s/install-cert-manager.bash
Executable file
3
k8s/install-cert-manager.bash
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/usr/bin/env bash
|
||||
set -eux
|
||||
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.4.0/cert-manager.yaml
|
78
k8s/postgres/deployment.yaml
Normal file
78
k8s/postgres/deployment.yaml
Normal file
@@ -0,0 +1,78 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: headscale
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: headscale
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: headscale
|
||||
spec:
|
||||
containers:
|
||||
- name: headscale
|
||||
image: "headscale:latest"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["/go/bin/headscale", "serve"]
|
||||
env:
|
||||
- name: SERVER_URL
|
||||
value: $(PUBLIC_PROTO)://$(PUBLIC_HOSTNAME)
|
||||
- name: LISTEN_ADDR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: listen_addr
|
||||
- name: PRIVATE_KEY_PATH
|
||||
value: /vol/secret/private-key
|
||||
- name: DERP_MAP_PATH
|
||||
value: /vol/config/derp.yaml
|
||||
- name: EPHEMERAL_NODE_INACTIVITY_TIMEOUT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: ephemeral_node_inactivity_timeout
|
||||
- name: DB_TYPE
|
||||
value: postgres
|
||||
- name: DB_HOST
|
||||
value: postgres.headscale.svc.cluster.local
|
||||
- name: DB_PORT
|
||||
value: "5432"
|
||||
- name: DB_USER
|
||||
value: headscale
|
||||
- name: DB_PASS
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: postgresql
|
||||
key: password
|
||||
- name: DB_NAME
|
||||
value: headscale
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
containerPort: 8080
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 15
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /vol/config
|
||||
- name: secret
|
||||
mountPath: /vol/secret
|
||||
- name: etc
|
||||
mountPath: /etc/headscale
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: headscale-site
|
||||
- name: etc
|
||||
configMap:
|
||||
name: headscale-etc
|
||||
- name: secret
|
||||
secret:
|
||||
secretName: headscale
|
13
k8s/postgres/kustomization.yaml
Normal file
13
k8s/postgres/kustomization.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
namespace: headscale
|
||||
bases:
|
||||
- ../base
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- postgres-service.yaml
|
||||
- postgres-statefulset.yaml
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true
|
||||
secretGenerator:
|
||||
- name: postgresql
|
||||
files:
|
||||
- secrets/password
|
13
k8s/postgres/postgres-service.yaml
Normal file
13
k8s/postgres/postgres-service.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: postgres
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
selector:
|
||||
app: postgres
|
||||
ports:
|
||||
- name: postgres
|
||||
targetPort: postgres
|
||||
port: 5432
|
49
k8s/postgres/postgres-statefulset.yaml
Normal file
49
k8s/postgres/postgres-statefulset.yaml
Normal file
@@ -0,0 +1,49 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: postgres
|
||||
spec:
|
||||
serviceName: postgres
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: postgres
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
containers:
|
||||
- name: postgres
|
||||
image: "postgres:13"
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: POSTGRES_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: postgresql
|
||||
key: password
|
||||
- name: POSTGRES_USER
|
||||
value: headscale
|
||||
ports:
|
||||
- name: postgres
|
||||
protocol: TCP
|
||||
containerPort: 5432
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: 5432
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 15
|
||||
volumeMounts:
|
||||
- name: pgdata
|
||||
mountPath: /var/lib/postgresql/data
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: pgdata
|
||||
spec:
|
||||
storageClassName: local-path
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
11
k8s/production-tls/ingress-patch.yaml
Normal file
11
k8s/production-tls/ingress-patch.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: headscale
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: letsencrypt-production
|
||||
traefik.ingress.kubernetes.io/router.tls: "true"
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- $(PUBLIC_HOSTNAME)
|
||||
secretName: production-cert
|
9
k8s/production-tls/kustomization.yaml
Normal file
9
k8s/production-tls/kustomization.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
namespace: headscale
|
||||
bases:
|
||||
- ../base
|
||||
resources:
|
||||
- production-issuer.yaml
|
||||
patches:
|
||||
- path: ingress-patch.yaml
|
||||
target:
|
||||
kind: Ingress
|
16
k8s/production-tls/production-issuer.yaml
Normal file
16
k8s/production-tls/production-issuer.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-production
|
||||
spec:
|
||||
acme:
|
||||
# TODO: figure out how to get kustomize to interpolate this, or use a transformer
|
||||
#email: $(CONTACT_EMAIL)
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
# Secret resource used to store the account's private key.
|
||||
name: letsencrypt-production-acc-key
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
5
k8s/sqlite/kustomization.yaml
Normal file
5
k8s/sqlite/kustomization.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
namespace: headscale
|
||||
bases:
|
||||
- ../base
|
||||
resources:
|
||||
- statefulset.yaml
|
79
k8s/sqlite/statefulset.yaml
Normal file
79
k8s/sqlite/statefulset.yaml
Normal file
@@ -0,0 +1,79 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: headscale
|
||||
spec:
|
||||
serviceName: headscale
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: headscale
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: headscale
|
||||
spec:
|
||||
containers:
|
||||
- name: headscale
|
||||
image: "headscale:latest"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["/go/bin/headscale", "serve"]
|
||||
env:
|
||||
- name: SERVER_URL
|
||||
value: $(PUBLIC_PROTO)://$(PUBLIC_HOSTNAME)
|
||||
- name: LISTEN_ADDR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: listen_addr
|
||||
- name: PRIVATE_KEY_PATH
|
||||
value: /vol/secret/private-key
|
||||
- name: DERP_MAP_PATH
|
||||
value: /vol/config/derp.yaml
|
||||
- name: EPHEMERAL_NODE_INACTIVITY_TIMEOUT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: ephemeral_node_inactivity_timeout
|
||||
- name: DB_TYPE
|
||||
value: sqlite3
|
||||
- name: DB_PATH
|
||||
value: /vol/data/db.sqlite
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
containerPort: 8080
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 15
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /vol/config
|
||||
- name: data
|
||||
mountPath: /vol/data
|
||||
- name: secret
|
||||
mountPath: /vol/secret
|
||||
- name: etc
|
||||
mountPath: /etc/headscale
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: headscale-site
|
||||
- name: etc
|
||||
configMap:
|
||||
name: headscale-etc
|
||||
- name: secret
|
||||
secret:
|
||||
secretName: headscale
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
storageClassName: local-path
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
11
k8s/staging-tls/ingress-patch.yaml
Normal file
11
k8s/staging-tls/ingress-patch.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: headscale
|
||||
annotations:
|
||||
cert-manager.io/cluster-issuer: letsencrypt-staging
|
||||
traefik.ingress.kubernetes.io/router.tls: "true"
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- $(PUBLIC_HOSTNAME)
|
||||
secretName: staging-cert
|
9
k8s/staging-tls/kustomization.yaml
Normal file
9
k8s/staging-tls/kustomization.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
namespace: headscale
|
||||
bases:
|
||||
- ../base
|
||||
resources:
|
||||
- staging-issuer.yaml
|
||||
patches:
|
||||
- path: ingress-patch.yaml
|
||||
target:
|
||||
kind: Ingress
|
16
k8s/staging-tls/staging-issuer.yaml
Normal file
16
k8s/staging-tls/staging-issuer.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-staging
|
||||
spec:
|
||||
acme:
|
||||
# TODO: figure out how to get kustomize to interpolate this, or use a transformer
|
||||
#email: $(CONTACT_EMAIL)
|
||||
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
# Secret resource used to store the account's private key.
|
||||
name: letsencrypt-staging-acc-key
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
58
machine.go
58
machine.go
@@ -3,15 +3,16 @@ package headscale
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"gorm.io/datatypes"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/wgengine/wgcfg"
|
||||
"tailscale.com/types/wgkey"
|
||||
)
|
||||
|
||||
// Machine is a Headscale client
|
||||
@@ -23,7 +24,7 @@ type Machine struct {
|
||||
IPAddress string
|
||||
Name string
|
||||
NamespaceID uint
|
||||
Namespace Namespace
|
||||
Namespace Namespace `gorm:"foreignKey:NamespaceID"`
|
||||
|
||||
Registered bool // temp
|
||||
RegisterMethod string
|
||||
@@ -48,18 +49,18 @@ func (m Machine) isAlreadyRegistered() bool {
|
||||
}
|
||||
|
||||
func (m Machine) toNode() (*tailcfg.Node, error) {
|
||||
nKey, err := wgcfg.ParseHexKey(m.NodeKey)
|
||||
nKey, err := wgkey.ParseHex(m.NodeKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mKey, err := wgcfg.ParseHexKey(m.MachineKey)
|
||||
mKey, err := wgkey.ParseHex(m.MachineKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var discoKey tailcfg.DiscoKey
|
||||
if m.DiscoKey != "" {
|
||||
dKey, err := wgcfg.ParseHexKey(m.DiscoKey)
|
||||
dKey, err := wgkey.ParseHex(m.DiscoKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -71,6 +72,10 @@ func (m Machine) toNode() (*tailcfg.Node, error) {
|
||||
addrs := []netaddr.IPPrefix{}
|
||||
ip, err := netaddr.ParseIPPrefix(fmt.Sprintf("%s/32", m.IPAddress))
|
||||
if err != nil {
|
||||
log.Trace().
|
||||
Str("func", "toNode").
|
||||
Str("ip", m.IPAddress).
|
||||
Msgf("Failed to parse IP Prefix from IP: %s", m.IPAddress)
|
||||
return nil, err
|
||||
}
|
||||
addrs = append(addrs, ip) // missing the ipv6 ?
|
||||
@@ -154,17 +159,10 @@ func (m Machine) toNode() (*tailcfg.Node, error) {
|
||||
}
|
||||
|
||||
func (h *Headscale) getPeers(m Machine) (*[]*tailcfg.Node, error) {
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
log.Printf("Cannot open DB: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
machines := []Machine{}
|
||||
if err = db.Where("namespace_id = ? AND machine_key <> ? AND registered",
|
||||
if err := h.db.Where("namespace_id = ? AND machine_key <> ? AND registered",
|
||||
m.NamespaceID, m.MachineKey).Find(&machines).Error; err != nil {
|
||||
log.Printf("Error accessing db: %s", err)
|
||||
log.Error().Err(err).Msg("Error accessing db")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -195,6 +193,36 @@ func (h *Headscale) GetMachine(namespace string, name string) (*Machine, error)
|
||||
return nil, fmt.Errorf("not found")
|
||||
}
|
||||
|
||||
// GetMachineByID finds a Machine by ID and returns the Machine struct
|
||||
func (h *Headscale) GetMachineByID(id uint64) (*Machine, error) {
|
||||
m := Machine{}
|
||||
if result := h.db.Find(&Machine{ID: id}).First(&m); result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
// DeleteMachine softs deletes a Machine from the database
|
||||
func (h *Headscale) DeleteMachine(m *Machine) error {
|
||||
m.Registered = false
|
||||
namespaceID := m.NamespaceID
|
||||
h.db.Save(&m) // we mark it as unregistered, just in case
|
||||
if err := h.db.Delete(&m).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return h.RequestMapUpdates(namespaceID)
|
||||
}
|
||||
|
||||
// HardDeleteMachine hard deletes a Machine from the database
|
||||
func (h *Headscale) HardDeleteMachine(m *Machine) error {
|
||||
namespaceID := m.NamespaceID
|
||||
if err := h.db.Unscoped().Delete(&m).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
return h.RequestMapUpdates(namespaceID)
|
||||
}
|
||||
|
||||
// GetHostInfo returns a Hostinfo struct for the machine
|
||||
func (m *Machine) GetHostInfo() (*tailcfg.Hostinfo, error) {
|
||||
hostinfo := tailcfg.Hostinfo{}
|
||||
|
@@ -1,6 +1,8 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
@@ -11,12 +13,6 @@ func (s *Suite) TestGetMachine(c *check.C) {
|
||||
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
_, err = h.GetMachine("test", "testmachine")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
@@ -31,12 +27,92 @@ func (s *Suite) TestGetMachine(c *check.C) {
|
||||
RegisterMethod: "authKey",
|
||||
AuthKeyID: uint(pak.ID),
|
||||
}
|
||||
db.Save(&m)
|
||||
h.db.Save(&m)
|
||||
|
||||
m1, err := h.GetMachine("test", "testmachine")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = m1.GetHostInfo()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
}
|
||||
|
||||
func (s *Suite) TestGetMachineByID(c *check.C) {
|
||||
n, err := h.CreateNamespace("test")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = h.GetMachineByID(0)
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
m := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
NamespaceID: n.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
AuthKeyID: uint(pak.ID),
|
||||
}
|
||||
h.db.Save(&m)
|
||||
|
||||
m1, err := h.GetMachineByID(0)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = m1.GetHostInfo()
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *Suite) TestDeleteMachine(c *check.C) {
|
||||
n, err := h.CreateNamespace("test")
|
||||
c.Assert(err, check.IsNil)
|
||||
m := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
NamespaceID: n.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
AuthKeyID: uint(1),
|
||||
}
|
||||
h.db.Save(&m)
|
||||
err = h.DeleteMachine(&m)
|
||||
c.Assert(err, check.IsNil)
|
||||
v, err := h.getValue("namespaces_pending_updates")
|
||||
c.Assert(err, check.IsNil)
|
||||
names := []string{}
|
||||
err = json.Unmarshal([]byte(v), &names)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(names, check.DeepEquals, []string{n.Name})
|
||||
h.checkForNamespacesPendingUpdates()
|
||||
v, _ = h.getValue("namespaces_pending_updates")
|
||||
c.Assert(v, check.Equals, "")
|
||||
_, err = h.GetMachine(n.Name, "testmachine")
|
||||
c.Assert(err, check.NotNil)
|
||||
}
|
||||
|
||||
func (s *Suite) TestHardDeleteMachine(c *check.C) {
|
||||
n, err := h.CreateNamespace("test")
|
||||
c.Assert(err, check.IsNil)
|
||||
m := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine3",
|
||||
NamespaceID: n.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
AuthKeyID: uint(1),
|
||||
}
|
||||
h.db.Save(&m)
|
||||
err = h.HardDeleteMachine(&m)
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = h.GetMachine(n.Name, "testmachine3")
|
||||
c.Assert(err, check.NotNil)
|
||||
}
|
||||
|
169
namespaces.go
169
namespaces.go
@@ -1,10 +1,13 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"log"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/jinzhu/gorm"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
@@ -24,20 +27,16 @@ type Namespace struct {
|
||||
// CreateNamespace creates a new Namespace. Returns error if could not be created
|
||||
// or another namespace already exists
|
||||
func (h *Headscale) CreateNamespace(name string) (*Namespace, error) {
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
log.Printf("Cannot open DB: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
n := Namespace{}
|
||||
if err := db.Where("name = ?", name).First(&n).Error; err == nil {
|
||||
if err := h.db.Where("name = ?", name).First(&n).Error; err == nil {
|
||||
return nil, errorNamespaceExists
|
||||
}
|
||||
n.Name = name
|
||||
if err := db.Create(&n).Error; err != nil {
|
||||
log.Printf("Could not create row: %s", err)
|
||||
if err := h.db.Create(&n).Error; err != nil {
|
||||
log.Error().
|
||||
Str("func", "CreateNamespace").
|
||||
Err(err).
|
||||
Msg("Could not create row")
|
||||
return nil, err
|
||||
}
|
||||
return &n, nil
|
||||
@@ -46,13 +45,6 @@ func (h *Headscale) CreateNamespace(name string) (*Namespace, error) {
|
||||
// DestroyNamespace destroys a Namespace. Returns error if the Namespace does
|
||||
// not exist or if there are machines associated with it.
|
||||
func (h *Headscale) DestroyNamespace(name string) error {
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
log.Printf("Cannot open DB: %s", err)
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
n, err := h.GetNamespace(name)
|
||||
if err != nil {
|
||||
return errorNamespaceNotFound
|
||||
@@ -66,8 +58,7 @@ func (h *Headscale) DestroyNamespace(name string) error {
|
||||
return errorNamespaceNotEmpty
|
||||
}
|
||||
|
||||
err = db.Unscoped().Delete(&n).Error
|
||||
if err != nil {
|
||||
if result := h.db.Unscoped().Delete(&n); result.Error != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -76,15 +67,8 @@ func (h *Headscale) DestroyNamespace(name string) error {
|
||||
|
||||
// GetNamespace fetches a namespace by name
|
||||
func (h *Headscale) GetNamespace(name string) (*Namespace, error) {
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
log.Printf("Cannot open DB: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
n := Namespace{}
|
||||
if db.First(&n, "name = ?", name).RecordNotFound() {
|
||||
if result := h.db.First(&n, "name = ?", name); errors.Is(result.Error, gorm.ErrRecordNotFound) {
|
||||
return nil, errorNamespaceNotFound
|
||||
}
|
||||
return &n, nil
|
||||
@@ -92,14 +76,8 @@ func (h *Headscale) GetNamespace(name string) (*Namespace, error) {
|
||||
|
||||
// ListNamespaces gets all the existing namespaces
|
||||
func (h *Headscale) ListNamespaces() (*[]Namespace, error) {
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
log.Printf("Cannot open DB: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
defer db.Close()
|
||||
namespaces := []Namespace{}
|
||||
if err := db.Find(&namespaces).Error; err != nil {
|
||||
if err := h.db.Find(&namespaces).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &namespaces, nil
|
||||
@@ -111,15 +89,9 @@ func (h *Headscale) ListMachinesInNamespace(name string) (*[]Machine, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
log.Printf("Cannot open DB: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
machines := []Machine{}
|
||||
if err := db.Preload("AuthKey").Where(&Machine{NamespaceID: n.ID}).Find(&machines).Error; err != nil {
|
||||
if err := h.db.Preload("AuthKey").Where(&Machine{NamespaceID: n.ID}).Find(&machines).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &machines, nil
|
||||
@@ -131,26 +103,117 @@ func (h *Headscale) SetMachineNamespace(m *Machine, namespaceName string) error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
log.Printf("Cannot open DB: %s", err)
|
||||
m.NamespaceID = n.ID
|
||||
h.db.Save(&m)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RequestMapUpdates signals the KV worker to update the maps for this namespace
|
||||
func (h *Headscale) RequestMapUpdates(namespaceID uint) error {
|
||||
namespace := Namespace{}
|
||||
if err := h.db.First(&namespace, namespaceID).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
defer db.Close()
|
||||
m.NamespaceID = n.ID
|
||||
db.Save(&m)
|
||||
return nil
|
||||
|
||||
v, err := h.getValue("namespaces_pending_updates")
|
||||
if err != nil || v == "" {
|
||||
err = h.setValue("namespaces_pending_updates", fmt.Sprintf(`["%s"]`, namespace.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
names := []string{}
|
||||
err = json.Unmarshal([]byte(v), &names)
|
||||
if err != nil {
|
||||
err = h.setValue("namespaces_pending_updates", fmt.Sprintf(`["%s"]`, namespace.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
names = append(names, namespace.Name)
|
||||
data, err := json.Marshal(names)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "RequestMapUpdates").
|
||||
Err(err).
|
||||
Msg("Could not marshal namespaces_pending_updates")
|
||||
return err
|
||||
}
|
||||
return h.setValue("namespaces_pending_updates", string(data))
|
||||
}
|
||||
|
||||
func (h *Headscale) checkForNamespacesPendingUpdates() {
|
||||
v, err := h.getValue("namespaces_pending_updates")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if v == "" {
|
||||
return
|
||||
}
|
||||
|
||||
names := []string{}
|
||||
err = json.Unmarshal([]byte(v), &names)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, name := range names {
|
||||
log.Trace().
|
||||
Str("func", "RequestMapUpdates").
|
||||
Str("machine", name).
|
||||
Msg("Sending updates to nodes in namespace")
|
||||
machines, err := h.ListMachinesInNamespace(name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
for _, m := range *machines {
|
||||
peers, _ := h.getPeers(m)
|
||||
for _, p := range *peers {
|
||||
pUp, ok := h.clientsPolling.Load(uint64(p.ID))
|
||||
if ok {
|
||||
log.Info().
|
||||
Str("func", "checkForNamespacesPendingUpdates").
|
||||
Str("machine", m.Name).
|
||||
Str("peer", m.Name).
|
||||
Str("address", p.Addresses[0].String()).
|
||||
Msgf("Notifying peer %s (%s)", p.Name, p.Addresses[0])
|
||||
pUp.(chan []byte) <- []byte{}
|
||||
} else {
|
||||
log.Info().
|
||||
Str("func", "checkForNamespacesPendingUpdates").
|
||||
Str("machine", m.Name).
|
||||
Str("peer", m.Name).
|
||||
Msgf("Peer %s does not appear to be polling", p.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
newV, err := h.getValue("namespaces_pending_updates")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if v == newV { // only clear when no changes, so we notified everybody
|
||||
err = h.setValue("namespaces_pending_updates", "")
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "checkForNamespacesPendingUpdates").
|
||||
Err(err).
|
||||
Msg("Could not save to KV")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Namespace) toUser() *tailcfg.User {
|
||||
u := tailcfg.User{
|
||||
ID: tailcfg.UserID(n.ID),
|
||||
LoginName: "",
|
||||
LoginName: n.Name,
|
||||
DisplayName: n.Name,
|
||||
ProfilePicURL: "",
|
||||
Domain: "",
|
||||
Domain: "headscale.net",
|
||||
Logins: []tailcfg.LoginID{},
|
||||
Roles: []tailcfg.RoleID{},
|
||||
Created: time.Time{},
|
||||
}
|
||||
return &u
|
||||
|
@@ -30,11 +30,6 @@ func (s *Suite) TestDestroyNamespaceErrors(c *check.C) {
|
||||
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
m := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
@@ -46,7 +41,7 @@ func (s *Suite) TestDestroyNamespaceErrors(c *check.C) {
|
||||
RegisterMethod: "authKey",
|
||||
AuthKeyID: uint(pak.ID),
|
||||
}
|
||||
db.Save(&m)
|
||||
h.db.Save(&m)
|
||||
|
||||
err = h.DestroyNamespace("test")
|
||||
c.Assert(err, check.Equals, errorNamespaceNotEmpty)
|
||||
|
@@ -3,8 +3,10 @@ package headscale
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"log"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const errorAuthKeyNotFound = Error("AuthKey not found")
|
||||
@@ -31,13 +33,6 @@ func (h *Headscale) CreatePreAuthKey(namespaceName string, reusable bool, epheme
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
log.Printf("Cannot open DB: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
now := time.Now().UTC()
|
||||
kstr, err := h.generateKey()
|
||||
if err != nil {
|
||||
@@ -53,7 +48,7 @@ func (h *Headscale) CreatePreAuthKey(namespaceName string, reusable bool, epheme
|
||||
CreatedAt: &now,
|
||||
Expiration: expiration,
|
||||
}
|
||||
db.Save(&k)
|
||||
h.db.Save(&k)
|
||||
|
||||
return &k, nil
|
||||
}
|
||||
@@ -64,31 +59,41 @@ func (h *Headscale) GetPreAuthKeys(namespaceName string) (*[]PreAuthKey, error)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
log.Printf("Cannot open DB: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
keys := []PreAuthKey{}
|
||||
if err := db.Preload("Namespace").Where(&PreAuthKey{NamespaceID: n.ID}).Find(&keys).Error; err != nil {
|
||||
if err := h.db.Preload("Namespace").Where(&PreAuthKey{NamespaceID: n.ID}).Find(&keys).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &keys, nil
|
||||
}
|
||||
|
||||
// checkKeyValidity does the heavy lifting for validation of the PreAuthKey coming from a node
|
||||
// If returns no error and a PreAuthKey, it can be used
|
||||
func (h *Headscale) checkKeyValidity(k string) (*PreAuthKey, error) {
|
||||
db, err := h.db()
|
||||
// GetPreAuthKey returns a PreAuthKey for a given key
|
||||
func (h *Headscale) GetPreAuthKey(namespace string, key string) (*PreAuthKey, error) {
|
||||
pak, err := h.checkKeyValidity(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
if pak.Namespace.Name != namespace {
|
||||
return nil, errors.New("Namespace mismatch")
|
||||
}
|
||||
|
||||
return pak, nil
|
||||
}
|
||||
|
||||
// MarkExpirePreAuthKey marks a PreAuthKey as expired
|
||||
func (h *Headscale) MarkExpirePreAuthKey(k *PreAuthKey) error {
|
||||
if err := h.db.Model(&k).Update("Expiration", time.Now()).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkKeyValidity does the heavy lifting for validation of the PreAuthKey coming from a node
|
||||
// If returns no error and a PreAuthKey, it can be used
|
||||
func (h *Headscale) checkKeyValidity(k string) (*PreAuthKey, error) {
|
||||
pak := PreAuthKey{}
|
||||
if db.Preload("Namespace").First(&pak, "key = ?", k).RecordNotFound() {
|
||||
if result := h.db.Preload("Namespace").First(&pak, "key = ?", k); errors.Is(result.Error, gorm.ErrRecordNotFound) {
|
||||
return nil, errorAuthKeyNotFound
|
||||
}
|
||||
|
||||
@@ -101,7 +106,7 @@ func (h *Headscale) checkKeyValidity(k string) (*PreAuthKey, error) {
|
||||
}
|
||||
|
||||
machines := []Machine{}
|
||||
if err := db.Preload("AuthKey").Where(&Machine{AuthKeyID: uint(pak.ID)}).Find(&machines).Error; err != nil {
|
||||
if err := h.db.Preload("AuthKey").Where(&Machine{AuthKeyID: uint(pak.ID)}).Find(&machines).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@@ -73,11 +73,6 @@ func (*Suite) TestAlreadyUsedKey(c *check.C) {
|
||||
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
m := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
@@ -89,7 +84,7 @@ func (*Suite) TestAlreadyUsedKey(c *check.C) {
|
||||
RegisterMethod: "authKey",
|
||||
AuthKeyID: uint(pak.ID),
|
||||
}
|
||||
db.Save(&m)
|
||||
h.db.Save(&m)
|
||||
|
||||
p, err := h.checkKeyValidity(pak.Key)
|
||||
c.Assert(err, check.Equals, errorAuthKeyNotReusableAlreadyUsed)
|
||||
@@ -103,11 +98,6 @@ func (*Suite) TestReusableBeingUsedKey(c *check.C) {
|
||||
pak, err := h.CreatePreAuthKey(n.Name, true, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
m := Machine{
|
||||
ID: 1,
|
||||
MachineKey: "foo",
|
||||
@@ -119,7 +109,7 @@ func (*Suite) TestReusableBeingUsedKey(c *check.C) {
|
||||
RegisterMethod: "authKey",
|
||||
AuthKeyID: uint(pak.ID),
|
||||
}
|
||||
db.Save(&m)
|
||||
h.db.Save(&m)
|
||||
|
||||
p, err := h.checkKeyValidity(pak.Key)
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -145,11 +135,6 @@ func (*Suite) TestEphemeralKey(c *check.C) {
|
||||
pak, err := h.CreatePreAuthKey(n.Name, false, true, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
now := time.Now()
|
||||
m := Machine{
|
||||
ID: 0,
|
||||
@@ -163,7 +148,7 @@ func (*Suite) TestEphemeralKey(c *check.C) {
|
||||
LastSeen: &now,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
}
|
||||
db.Save(&m)
|
||||
h.db.Save(&m)
|
||||
|
||||
_, err = h.checkKeyValidity(pak.Key)
|
||||
// Ephemeral keys are by definition reusable
|
||||
@@ -178,3 +163,20 @@ func (*Suite) TestEphemeralKey(c *check.C) {
|
||||
_, err = h.GetMachine("test7", "testest")
|
||||
c.Assert(err, check.NotNil)
|
||||
}
|
||||
|
||||
func (*Suite) TestExpirePreauthKey(c *check.C) {
|
||||
n, err := h.CreateNamespace("test3")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := h.CreatePreAuthKey(n.Name, true, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(pak.Expiration, check.IsNil)
|
||||
|
||||
err = h.MarkExpirePreAuthKey(pak)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(pak.Expiration, check.NotNil)
|
||||
|
||||
p, err := h.checkKeyValidity(pak.Key)
|
||||
c.Assert(err, check.Equals, errorAuthKeyExpired)
|
||||
c.Assert(p, check.IsNil)
|
||||
}
|
||||
|
18
routes.go
18
routes.go
@@ -3,7 +3,6 @@ package headscale
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log"
|
||||
|
||||
"gorm.io/datatypes"
|
||||
"inet.af/netaddr"
|
||||
@@ -42,29 +41,20 @@ func (h *Headscale) EnableNodeRoute(namespace string, nodeName string, routeStr
|
||||
|
||||
for _, rIP := range hi.RoutableIPs {
|
||||
if rIP == route {
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
log.Printf("Cannot open DB: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
routes, _ := json.Marshal([]string{routeStr}) // TODO: only one for the time being, so overwriting the rest
|
||||
m.EnabledRoutes = datatypes.JSON(routes)
|
||||
db.Save(&m)
|
||||
db.Close()
|
||||
h.db.Save(&m)
|
||||
|
||||
// THIS IS COMPLETELY USELESS.
|
||||
// The peers map is stored in memory in the server process.
|
||||
// Definetely not accessible from the CLI tool.
|
||||
// Definitely not accessible from the CLI tool.
|
||||
// We need RPC to the server - or some kind of 'needsUpdate' field in the DB
|
||||
peers, _ := h.getPeers(*m)
|
||||
h.pollMu.Lock()
|
||||
for _, p := range *peers {
|
||||
if pUp, ok := h.clientsPolling[uint64(p.ID)]; ok {
|
||||
pUp <- []byte{}
|
||||
if pUp, ok := h.clientsPolling.Load(uint64(p.ID)); ok {
|
||||
pUp.(chan []byte) <- []byte{}
|
||||
}
|
||||
}
|
||||
h.pollMu.Unlock()
|
||||
return &rIP, nil
|
||||
}
|
||||
}
|
||||
|
@@ -16,12 +16,6 @@ func (s *Suite) TestGetRoutes(c *check.C) {
|
||||
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
db, err := h.db()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
_, err = h.GetMachine("test", "testmachine")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
@@ -46,7 +40,7 @@ func (s *Suite) TestGetRoutes(c *check.C) {
|
||||
AuthKeyID: uint(pak.ID),
|
||||
HostInfo: datatypes.JSON(hostinfo),
|
||||
}
|
||||
db.Save(&m)
|
||||
h.db.Save(&m)
|
||||
|
||||
r, err := h.GetNodeRoutes("test", "testmachine")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
127
tests/acls/acl_policy_1.hujson
Normal file
127
tests/acls/acl_policy_1.hujson
Normal file
@@ -0,0 +1,127 @@
|
||||
{
|
||||
// Declare static groups of users beyond those in the identity service.
|
||||
"Groups": {
|
||||
"group:example": [
|
||||
"user1@example.com",
|
||||
"user2@example.com",
|
||||
],
|
||||
"group:example2": [
|
||||
"user1@example.com",
|
||||
"user2@example.com",
|
||||
],
|
||||
},
|
||||
// Declare hostname aliases to use in place of IP addresses or subnets.
|
||||
"Hosts": {
|
||||
"example-host-1": "100.100.100.100",
|
||||
"example-host-2": "100.100.101.100/24",
|
||||
},
|
||||
// Define who is allowed to use which tags.
|
||||
"TagOwners": {
|
||||
// Everyone in the montreal-admins or global-admins group are
|
||||
// allowed to tag servers as montreal-webserver.
|
||||
"tag:montreal-webserver": [
|
||||
"group:example",
|
||||
],
|
||||
// Only a few admins are allowed to create API servers.
|
||||
"tag:production": [
|
||||
"group:example",
|
||||
"president@example.com",
|
||||
],
|
||||
},
|
||||
// Access control lists.
|
||||
"ACLs": [
|
||||
// Engineering users, plus the president, can access port 22 (ssh)
|
||||
// and port 3389 (remote desktop protocol) on all servers, and all
|
||||
// ports on git-server or ci-server.
|
||||
{
|
||||
"Action": "accept",
|
||||
"Users": [
|
||||
"group:example2",
|
||||
"192.168.1.0/24"
|
||||
],
|
||||
"Ports": [
|
||||
"*:22,3389",
|
||||
"git-server:*",
|
||||
"ci-server:*"
|
||||
],
|
||||
},
|
||||
// Allow engineer users to access any port on a device tagged with
|
||||
// tag:production.
|
||||
{
|
||||
"Action": "accept",
|
||||
"Users": [
|
||||
"group:example"
|
||||
],
|
||||
"Ports": [
|
||||
"tag:production:*"
|
||||
],
|
||||
},
|
||||
// Allow servers in the my-subnet host and 192.168.1.0/24 to access hosts
|
||||
// on both networks.
|
||||
{
|
||||
"Action": "accept",
|
||||
"Users": [
|
||||
"example-host-2",
|
||||
],
|
||||
"Ports": [
|
||||
"example-host-1:*",
|
||||
"192.168.1.0/24:*"
|
||||
],
|
||||
},
|
||||
// Allow every user of your network to access anything on the network.
|
||||
// Comment out this section if you want to define specific ACL
|
||||
// restrictions above.
|
||||
{
|
||||
"Action": "accept",
|
||||
"Users": [
|
||||
"*"
|
||||
],
|
||||
"Ports": [
|
||||
"*:*"
|
||||
],
|
||||
},
|
||||
// All users in Montreal are allowed to access the Montreal web
|
||||
// servers.
|
||||
{
|
||||
"Action": "accept",
|
||||
"Users": [
|
||||
"example-host-1"
|
||||
],
|
||||
"Ports": [
|
||||
"tag:montreal-webserver:80,443"
|
||||
],
|
||||
},
|
||||
// Montreal web servers are allowed to make outgoing connections to
|
||||
// the API servers, but only on https port 443.
|
||||
// In contrast, this doesn't grant API servers the right to initiate
|
||||
// any connections.
|
||||
{
|
||||
"Action": "accept",
|
||||
"Users": [
|
||||
"tag:montreal-webserver"
|
||||
],
|
||||
"Ports": [
|
||||
"tag:api-server:443"
|
||||
],
|
||||
},
|
||||
],
|
||||
// Declare tests to check functionality of ACL rules
|
||||
"Tests": [
|
||||
{
|
||||
"User": "user1@example.com",
|
||||
"Allow": [
|
||||
"example-host-1:22",
|
||||
"example-host-2:80"
|
||||
],
|
||||
"Deny": [
|
||||
"exapmle-host-2:100"
|
||||
],
|
||||
},
|
||||
{
|
||||
"User": "user2@example.com",
|
||||
"Allow": [
|
||||
"100.60.3.4:22"
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
24
tests/acls/acl_policy_basic_1.hujson
Normal file
24
tests/acls/acl_policy_basic_1.hujson
Normal file
@@ -0,0 +1,24 @@
|
||||
// This ACL is a very basic example to validate the
|
||||
// expansion of hosts
|
||||
|
||||
|
||||
{
|
||||
"Hosts": {
|
||||
"host-1": "100.100.100.100",
|
||||
"subnet-1": "100.100.101.100/24",
|
||||
},
|
||||
|
||||
"ACLs": [
|
||||
{
|
||||
"Action": "accept",
|
||||
"Users": [
|
||||
"subnet-1",
|
||||
"192.168.1.0/24"
|
||||
],
|
||||
"Ports": [
|
||||
"*:22,3389",
|
||||
"host-1:*",
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
26
tests/acls/acl_policy_basic_groups.hujson
Normal file
26
tests/acls/acl_policy_basic_groups.hujson
Normal file
@@ -0,0 +1,26 @@
|
||||
// This ACL is used to test group expansion
|
||||
|
||||
{
|
||||
"Groups": {
|
||||
"group:example": [
|
||||
"testnamespace",
|
||||
],
|
||||
},
|
||||
|
||||
"Hosts": {
|
||||
"host-1": "100.100.100.100",
|
||||
"subnet-1": "100.100.101.100/24",
|
||||
},
|
||||
|
||||
"ACLs": [
|
||||
{
|
||||
"Action": "accept",
|
||||
"Users": [
|
||||
"group:example",
|
||||
],
|
||||
"Ports": [
|
||||
"host-1:*",
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
20
tests/acls/acl_policy_basic_namespace_as_user.hujson
Normal file
20
tests/acls/acl_policy_basic_namespace_as_user.hujson
Normal file
@@ -0,0 +1,20 @@
|
||||
// This ACL is used to test namespace expansion
|
||||
|
||||
{
|
||||
"Hosts": {
|
||||
"host-1": "100.100.100.100",
|
||||
"subnet-1": "100.100.101.100/24",
|
||||
},
|
||||
|
||||
"ACLs": [
|
||||
{
|
||||
"Action": "accept",
|
||||
"Users": [
|
||||
"testnamespace",
|
||||
],
|
||||
"Ports": [
|
||||
"host-1:*",
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
20
tests/acls/acl_policy_basic_range.hujson
Normal file
20
tests/acls/acl_policy_basic_range.hujson
Normal file
@@ -0,0 +1,20 @@
|
||||
// This ACL is used to test the port range expansion
|
||||
|
||||
{
|
||||
"Hosts": {
|
||||
"host-1": "100.100.100.100",
|
||||
"subnet-1": "100.100.101.100/24",
|
||||
},
|
||||
|
||||
"ACLs": [
|
||||
{
|
||||
"Action": "accept",
|
||||
"Users": [
|
||||
"subnet-1",
|
||||
],
|
||||
"Ports": [
|
||||
"host-1:5400-5500",
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
20
tests/acls/acl_policy_basic_wildcards.hujson
Normal file
20
tests/acls/acl_policy_basic_wildcards.hujson
Normal file
@@ -0,0 +1,20 @@
|
||||
// This ACL is used to test wildcards
|
||||
|
||||
{
|
||||
"Hosts": {
|
||||
"host-1": "100.100.100.100",
|
||||
"subnet-1": "100.100.101.100/24",
|
||||
},
|
||||
|
||||
"ACLs": [
|
||||
{
|
||||
"Action": "accept",
|
||||
"Users": [
|
||||
"*",
|
||||
],
|
||||
"Ports": [
|
||||
"host-1:*",
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
125
tests/acls/acl_policy_invalid.hujson
Normal file
125
tests/acls/acl_policy_invalid.hujson
Normal file
@@ -0,0 +1,125 @@
|
||||
{
|
||||
// Declare static groups of users beyond those in the identity service.
|
||||
"Groups": {
|
||||
"group:example": [
|
||||
"user1@example.com",
|
||||
"user2@example.com",
|
||||
],
|
||||
},
|
||||
// Declare hostname aliases to use in place of IP addresses or subnets.
|
||||
"Hosts": {
|
||||
"example-host-1": "100.100.100.100",
|
||||
"example-host-2": "100.100.101.100/24",
|
||||
},
|
||||
// Define who is allowed to use which tags.
|
||||
"TagOwners": {
|
||||
// Everyone in the montreal-admins or global-admins group are
|
||||
// allowed to tag servers as montreal-webserver.
|
||||
"tag:montreal-webserver": [
|
||||
"group:montreal-admins",
|
||||
"group:global-admins",
|
||||
],
|
||||
// Only a few admins are allowed to create API servers.
|
||||
"tag:api-server": [
|
||||
"group:global-admins",
|
||||
"example-host-1",
|
||||
],
|
||||
},
|
||||
// Access control lists.
|
||||
"ACLs": [
|
||||
// Engineering users, plus the president, can access port 22 (ssh)
|
||||
// and port 3389 (remote desktop protocol) on all servers, and all
|
||||
// ports on git-server or ci-server.
|
||||
{
|
||||
"Action": "accept",
|
||||
"Users": [
|
||||
"group:engineering",
|
||||
"president@example.com"
|
||||
],
|
||||
"Ports": [
|
||||
"*:22,3389",
|
||||
"git-server:*",
|
||||
"ci-server:*"
|
||||
],
|
||||
},
|
||||
// Allow engineer users to access any port on a device tagged with
|
||||
// tag:production.
|
||||
{
|
||||
"Action": "accept",
|
||||
"Users": [
|
||||
"group:engineers"
|
||||
],
|
||||
"Ports": [
|
||||
"tag:production:*"
|
||||
],
|
||||
},
|
||||
// Allow servers in the my-subnet host and 192.168.1.0/24 to access hosts
|
||||
// on both networks.
|
||||
{
|
||||
"Action": "accept",
|
||||
"Users": [
|
||||
"my-subnet",
|
||||
"192.168.1.0/24"
|
||||
],
|
||||
"Ports": [
|
||||
"my-subnet:*",
|
||||
"192.168.1.0/24:*"
|
||||
],
|
||||
},
|
||||
// Allow every user of your network to access anything on the network.
|
||||
// Comment out this section if you want to define specific ACL
|
||||
// restrictions above.
|
||||
{
|
||||
"Action": "accept",
|
||||
"Users": [
|
||||
"*"
|
||||
],
|
||||
"Ports": [
|
||||
"*:*"
|
||||
],
|
||||
},
|
||||
// All users in Montreal are allowed to access the Montreal web
|
||||
// servers.
|
||||
{
|
||||
"Action": "accept",
|
||||
"Users": [
|
||||
"group:montreal-users"
|
||||
],
|
||||
"Ports": [
|
||||
"tag:montreal-webserver:80,443"
|
||||
],
|
||||
},
|
||||
// Montreal web servers are allowed to make outgoing connections to
|
||||
// the API servers, but only on https port 443.
|
||||
// In contrast, this doesn't grant API servers the right to initiate
|
||||
// any connections.
|
||||
{
|
||||
"Action": "accept",
|
||||
"Users": [
|
||||
"tag:montreal-webserver"
|
||||
],
|
||||
"Ports": [
|
||||
"tag:api-server:443"
|
||||
],
|
||||
},
|
||||
],
|
||||
// Declare tests to check functionality of ACL rules
|
||||
"Tests": [
|
||||
{
|
||||
"User": "user1@example.com",
|
||||
"Allow": [
|
||||
"example-host-1:22",
|
||||
"example-host-2:80"
|
||||
],
|
||||
"Deny": [
|
||||
"exapmle-host-2:100"
|
||||
],
|
||||
},
|
||||
{
|
||||
"User": "user2@example.com",
|
||||
"Allow": [
|
||||
"100.60.3.4:22"
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
1
tests/acls/broken.hujson
Normal file
1
tests/acls/broken.hujson
Normal file
@@ -0,0 +1 @@
|
||||
{
|
4
tests/acls/invalid.hujson
Normal file
4
tests/acls/invalid.hujson
Normal file
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"valid_json": true,
|
||||
"but_a_policy_though": false
|
||||
}
|
108
utils.go
108
utils.go
@@ -7,18 +7,13 @@ package headscale
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
mathrand "math/rand"
|
||||
|
||||
"golang.org/x/crypto/nacl/box"
|
||||
"tailscale.com/wgengine/wgcfg"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/types/wgkey"
|
||||
)
|
||||
|
||||
// Error is used to compare errors as per https://dave.cheney.net/2016/04/07/constant-errors
|
||||
@@ -26,11 +21,11 @@ type Error string
|
||||
|
||||
func (e Error) Error() string { return string(e) }
|
||||
|
||||
func decode(msg []byte, v interface{}, pubKey *wgcfg.Key, privKey *wgcfg.PrivateKey) error {
|
||||
func decode(msg []byte, v interface{}, pubKey *wgkey.Key, privKey *wgkey.Private) error {
|
||||
return decodeMsg(msg, v, pubKey, privKey)
|
||||
}
|
||||
|
||||
func decodeMsg(msg []byte, v interface{}, pubKey *wgcfg.Key, privKey *wgcfg.PrivateKey) error {
|
||||
func decodeMsg(msg []byte, v interface{}, pubKey *wgkey.Key, privKey *wgkey.Private) error {
|
||||
decrypted, err := decryptMsg(msg, pubKey, privKey)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -42,7 +37,7 @@ func decodeMsg(msg []byte, v interface{}, pubKey *wgcfg.Key, privKey *wgcfg.Priv
|
||||
return nil
|
||||
}
|
||||
|
||||
func decryptMsg(msg []byte, pubKey *wgcfg.Key, privKey *wgcfg.PrivateKey) ([]byte, error) {
|
||||
func decryptMsg(msg []byte, pubKey *wgkey.Key, privKey *wgkey.Private) ([]byte, error) {
|
||||
var nonce [24]byte
|
||||
if len(msg) < len(nonce)+1 {
|
||||
return nil, fmt.Errorf("response missing nonce, len=%d", len(msg))
|
||||
@@ -58,7 +53,7 @@ func decryptMsg(msg []byte, pubKey *wgcfg.Key, privKey *wgcfg.PrivateKey) ([]byt
|
||||
return decrypted, nil
|
||||
}
|
||||
|
||||
func encode(v interface{}, pubKey *wgcfg.Key, privKey *wgcfg.PrivateKey) ([]byte, error) {
|
||||
func encode(v interface{}, pubKey *wgkey.Key, privKey *wgkey.Private) ([]byte, error) {
|
||||
b, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -66,7 +61,7 @@ func encode(v interface{}, pubKey *wgcfg.Key, privKey *wgcfg.PrivateKey) ([]byte
|
||||
return encodeMsg(b, pubKey, privKey)
|
||||
}
|
||||
|
||||
func encodeMsg(b []byte, pubKey *wgcfg.Key, privKey *wgcfg.PrivateKey) ([]byte, error) {
|
||||
func encodeMsg(b []byte, pubKey *wgkey.Key, privKey *wgkey.Private) ([]byte, error) {
|
||||
var nonce [24]byte
|
||||
if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
|
||||
panic(err)
|
||||
@@ -76,52 +71,71 @@ func encodeMsg(b []byte, pubKey *wgcfg.Key, privKey *wgcfg.PrivateKey) ([]byte,
|
||||
return msg, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) getAvailableIP() (*net.IP, error) {
|
||||
db, err := h.db()
|
||||
func (h *Headscale) getAvailableIP() (*netaddr.IP, error) {
|
||||
ipPrefix := h.cfg.IPPrefix
|
||||
|
||||
usedIps, err := h.getUsedIPs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer db.Close()
|
||||
i := 0
|
||||
|
||||
// Get the first IP in our prefix
|
||||
ip := ipPrefix.IP()
|
||||
|
||||
for {
|
||||
ip, err := getRandomIP()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if !ipPrefix.Contains(ip) {
|
||||
return nil, fmt.Errorf("could not find any suitable IP in %s", ipPrefix)
|
||||
}
|
||||
m := Machine{}
|
||||
if db.First(&m, "ip_address = ?", ip.String()).RecordNotFound() {
|
||||
return ip, nil
|
||||
|
||||
// Some OS (including Linux) does not like when IPs ends with 0 or 255, which
|
||||
// is typically called network or broadcast. Lets avoid them and continue
|
||||
// to look when we get one of those traditionally reserved IPs.
|
||||
ipRaw := ip.As4()
|
||||
if ipRaw[3] == 0 || ipRaw[3] == 255 {
|
||||
ip = ip.Next()
|
||||
continue
|
||||
}
|
||||
i++
|
||||
if i == 100 { // really random number
|
||||
break
|
||||
|
||||
if ip.IsZero() &&
|
||||
ip.IsLoopback() {
|
||||
|
||||
ip = ip.Next()
|
||||
continue
|
||||
}
|
||||
|
||||
if !containsIPs(usedIps, ip) {
|
||||
return &ip, nil
|
||||
}
|
||||
|
||||
ip = ip.Next()
|
||||
}
|
||||
return nil, errors.New("Could not find an available IP address in 100.64.0.0/10")
|
||||
}
|
||||
|
||||
func getRandomIP() (*net.IP, error) {
|
||||
mathrand.Seed(time.Now().Unix())
|
||||
ipo, ipnet, err := net.ParseCIDR("100.64.0.0/10")
|
||||
if err == nil {
|
||||
ip := ipo.To4()
|
||||
// fmt.Println("In Randomize IPAddr: IP ", ip, " IPNET: ", ipnet)
|
||||
// fmt.Println("Final address is ", ip)
|
||||
// fmt.Println("Broadcast address is ", ipb)
|
||||
// fmt.Println("Network address is ", ipn)
|
||||
r := mathrand.Uint32()
|
||||
ipRaw := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(ipRaw, r)
|
||||
// ipRaw[3] = 254
|
||||
// fmt.Println("ipRaw is ", ipRaw)
|
||||
for i, v := range ipRaw {
|
||||
// fmt.Println("IP Before: ", ip[i], " v is ", v, " Mask is: ", ipnet.Mask[i])
|
||||
ip[i] = ip[i] + (v &^ ipnet.Mask[i])
|
||||
// fmt.Println("IP After: ", ip[i])
|
||||
func (h *Headscale) getUsedIPs() ([]netaddr.IP, error) {
|
||||
var addresses []string
|
||||
h.db.Model(&Machine{}).Pluck("ip_address", &addresses)
|
||||
|
||||
ips := make([]netaddr.IP, len(addresses))
|
||||
for index, addr := range addresses {
|
||||
if addr != "" {
|
||||
ip, err := netaddr.ParseIP(addr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse ip from database, %w", err)
|
||||
}
|
||||
|
||||
ips[index] = ip
|
||||
}
|
||||
// fmt.Println("FINAL IP: ", ip.String())
|
||||
return &ip, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
func containsIPs(ips []netaddr.IP, ip netaddr.IP) bool {
|
||||
for _, v := range ips {
|
||||
if v == ip {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
155
utils_test.go
Normal file
155
utils_test.go
Normal file
@@ -0,0 +1,155 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"gopkg.in/check.v1"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
|
||||
func (s *Suite) TestGetAvailableIp(c *check.C) {
|
||||
ip, err := h.getAvailableIP()
|
||||
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
expected := netaddr.MustParseIP("10.27.0.1")
|
||||
|
||||
c.Assert(ip.String(), check.Equals, expected.String())
|
||||
}
|
||||
|
||||
func (s *Suite) TestGetUsedIps(c *check.C) {
|
||||
ip, err := h.getAvailableIP()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
n, err := h.CreateNamespace("test_ip")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = h.GetMachine("test", "testmachine")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
m := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
NamespaceID: n.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
AuthKeyID: uint(pak.ID),
|
||||
IPAddress: ip.String(),
|
||||
}
|
||||
h.db.Save(&m)
|
||||
|
||||
ips, err := h.getUsedIPs()
|
||||
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
expected := netaddr.MustParseIP("10.27.0.1")
|
||||
|
||||
c.Assert(ips[0], check.Equals, expected)
|
||||
|
||||
m1, err := h.GetMachineByID(0)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
c.Assert(m1.IPAddress, check.Equals, expected.String())
|
||||
}
|
||||
|
||||
func (s *Suite) TestGetMultiIp(c *check.C) {
|
||||
n, err := h.CreateNamespace("test-ip-multi")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
for i := 1; i <= 350; i++ {
|
||||
ip, err := h.getAvailableIP()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = h.GetMachine("test", "testmachine")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
m := Machine{
|
||||
ID: uint64(i),
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
NamespaceID: n.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
AuthKeyID: uint(pak.ID),
|
||||
IPAddress: ip.String(),
|
||||
}
|
||||
h.db.Save(&m)
|
||||
}
|
||||
|
||||
ips, err := h.getUsedIPs()
|
||||
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
c.Assert(len(ips), check.Equals, 350)
|
||||
|
||||
c.Assert(ips[0], check.Equals, netaddr.MustParseIP("10.27.0.1"))
|
||||
c.Assert(ips[9], check.Equals, netaddr.MustParseIP("10.27.0.10"))
|
||||
c.Assert(ips[300], check.Equals, netaddr.MustParseIP("10.27.1.47"))
|
||||
|
||||
// Check that we can read back the IPs
|
||||
m1, err := h.GetMachineByID(1)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(m1.IPAddress, check.Equals, netaddr.MustParseIP("10.27.0.1").String())
|
||||
|
||||
m50, err := h.GetMachineByID(50)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(m50.IPAddress, check.Equals, netaddr.MustParseIP("10.27.0.50").String())
|
||||
|
||||
expectedNextIP := netaddr.MustParseIP("10.27.1.97")
|
||||
nextIP, err := h.getAvailableIP()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
c.Assert(nextIP.String(), check.Equals, expectedNextIP.String())
|
||||
|
||||
// If we call get Available again, we should receive
|
||||
// the same IP, as it has not been reserved.
|
||||
nextIP2, err := h.getAvailableIP()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
c.Assert(nextIP2.String(), check.Equals, expectedNextIP.String())
|
||||
}
|
||||
|
||||
func (s *Suite) TestGetAvailableIpMachineWithoutIP(c *check.C) {
|
||||
ip, err := h.getAvailableIP()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
expected := netaddr.MustParseIP("10.27.0.1")
|
||||
|
||||
c.Assert(ip.String(), check.Equals, expected.String())
|
||||
|
||||
n, err := h.CreateNamespace("test_ip")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = h.GetMachine("test", "testmachine")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
m := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
NamespaceID: n.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
AuthKeyID: uint(pak.ID),
|
||||
}
|
||||
h.db.Save(&m)
|
||||
|
||||
ip2, err := h.getAvailableIP()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
c.Assert(ip2.String(), check.Equals, expected.String())
|
||||
}
|
Reference in New Issue
Block a user