mirror of
https://github.com/juanfont/headscale.git
synced 2025-08-17 13:47:28 +00:00
Compare commits
82 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
e27753e46e | ||
![]() |
11fbef4bf0 | ||
![]() |
c4e6ad1ec7 | ||
![]() |
263a3f1983 | ||
![]() |
8acaea0fbe | ||
![]() |
bd6adfaec6 | ||
![]() |
4b4a5a4b93 | ||
![]() |
b098d84557 | ||
![]() |
b937f9b762 | ||
![]() |
55f3e07bd4 | ||
![]() |
2780623076 | ||
![]() |
75a342f96e | ||
![]() |
729cd54401 | ||
![]() |
a023f51971 | ||
![]() |
5076eb9215 | ||
![]() |
7edd0cd14c | ||
![]() |
7ce4738d8a | ||
![]() |
7287e0259c | ||
![]() |
d86de68b40 | ||
![]() |
4ba107a765 | ||
![]() |
187b016d09 | ||
![]() |
7010f5afad | ||
![]() |
48b73fa12f | ||
![]() |
1ecd0d7ca4 | ||
![]() |
6faaae0c5f | ||
![]() |
e4ef65be76 | ||
![]() |
39c661d408 | ||
![]() |
91a48d6a43 | ||
![]() |
123f0fa185 | ||
![]() |
ba3dffecbf | ||
![]() |
8735e5675c | ||
![]() |
3f5e06a0f8 | ||
![]() |
ba40a40b73 | ||
![]() |
b3732e7fb9 | ||
![]() |
104776ee84 | ||
![]() |
01e781e546 | ||
![]() |
e77c16b55a | ||
![]() |
987bbee1db | ||
![]() |
74d2fe1baa | ||
![]() |
98e63d5561 | ||
![]() |
059f13fc9d | ||
![]() |
ebd27b46af | ||
![]() |
ca8d814918 | ||
![]() |
0aeeaac361 | ||
![]() |
28ed8a5742 | ||
![]() |
f749be1490 | ||
![]() |
693bce1b10 | ||
![]() |
4f97e077db | ||
![]() |
c883e79884 | ||
![]() |
a054e2514a | ||
![]() |
c49fe26da7 | ||
![]() |
d93a7f2e02 | ||
![]() |
88d7ac04bf | ||
![]() |
1f422af1c8 | ||
![]() |
53168d54d8 | ||
![]() |
b0ec945dbb | ||
![]() |
48ef6e5a6f | ||
![]() |
8d1adaaef3 | ||
![]() |
dd8c0d1e9e | ||
![]() |
57b79aa852 | ||
![]() |
2f883410d2 | ||
![]() |
6fa61380b2 | ||
![]() |
47b61c0cea | ||
![]() |
d739ac830f | ||
![]() |
26024fedc7 | ||
![]() |
a376b697c0 | ||
![]() |
bc2574680d | ||
![]() |
f194b41435 | ||
![]() |
350f7da55d | ||
![]() |
36f5f78f46 | ||
![]() |
55fe5b0b41 | ||
![]() |
7d1a5c00a0 | ||
![]() |
036061664e | ||
![]() |
5b1b40ce93 | ||
![]() |
a8d9fdce3c | ||
![]() |
700382cba4 | ||
![]() |
9698abbfd5 | ||
![]() |
5bfcf5c917 | ||
![]() |
8eb7d47072 | ||
![]() |
ab61c87701 | ||
![]() |
c1e6157847 | ||
![]() |
4c849539fc |
@@ -2,6 +2,7 @@
|
||||
// ignoring it let us speed up the integration test
|
||||
// development
|
||||
integration_test.go
|
||||
integration_test/
|
||||
|
||||
Dockerfile*
|
||||
docker-compose*
|
||||
|
41
.github/workflows/lint.yml
vendored
Normal file
41
.github/workflows/lint.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: CI
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
# The "build" workflow
|
||||
lint:
|
||||
# The type of runner that the job will run on
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
# Install and run golangci-lint as a separate step, it's much faster this
|
||||
# way because this action has caching. It'll get run again in `make lint`
|
||||
# below, but it's still much faster in the end than installing
|
||||
# golangci-lint manually in the `Run lint` step.
|
||||
- uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
args: --timeout 4m
|
||||
|
||||
# Setup Go
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16.3" # The Go version to download (if necessary) and use.
|
||||
|
||||
# Install all the dependencies
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
go version
|
||||
go install golang.org/x/lint/golint@latest
|
||||
sudo apt update
|
||||
sudo apt install -y make
|
||||
|
||||
- name: Run lint
|
||||
with:
|
||||
args: --timeout 4m
|
||||
run: make lint
|
23
.github/workflows/test-integration.yml
vendored
Normal file
23
.github/workflows/test-integration.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: CI
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
# The "build" workflow
|
||||
integration-test:
|
||||
# The type of runner that the job will run on
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
# Setup Go
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16.3"
|
||||
|
||||
- name: Run Integration tests
|
||||
run: go test -tags integration -timeout 30m
|
46
.github/workflows/test.yml
vendored
46
.github/workflows/test.yml
vendored
@@ -10,36 +10,24 @@ jobs:
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@v2
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
# Install and run golangci-lint as a separate step, it's much faster this
|
||||
# way because this action has caching. It'll get run again in `make lint`
|
||||
# below, but it's still much faster in the end than installing
|
||||
# golangci-lint manually in the `Run lint` step.
|
||||
- uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
args: --timeout 2m
|
||||
|
||||
# Setup Go
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.16.3' # The Go version to download (if necessary) and use.
|
||||
# Setup Go
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16.3" # The Go version to download (if necessary) and use.
|
||||
|
||||
# Install all the dependencies
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
go version
|
||||
go install golang.org/x/lint/golint@latest
|
||||
sudo apt update
|
||||
sudo apt install -y make
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
||||
# Install all the dependencies
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
go version
|
||||
sudo apt update
|
||||
sudo apt install -y make
|
||||
|
||||
- name: Run lint
|
||||
run: make lint
|
||||
- name: Run tests
|
||||
run: make test
|
||||
|
||||
- name: Run build
|
||||
run: make
|
||||
- name: Run build
|
||||
run: make
|
||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@@ -19,3 +19,5 @@ config.json
|
||||
*.key
|
||||
/db.sqlite
|
||||
*.sqlite3
|
||||
|
||||
test_output/
|
||||
|
@@ -10,7 +10,7 @@ COPY . /go/src/headscale
|
||||
RUN go install -a -ldflags="-extldflags=-static" -tags netgo,sqlite_omit_load_extension ./cmd/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
FROM ubuntu:latest
|
||||
FROM ubuntu:20.04
|
||||
|
||||
COPY --from=build /go/bin/headscale /usr/local/bin/headscale
|
||||
ENV TZ UTC
|
||||
|
2
Makefile
2
Makefile
@@ -2,7 +2,7 @@
|
||||
version = $(shell ./scripts/version-at-commit.sh)
|
||||
|
||||
build:
|
||||
go build -ldflags "-s -w -X main.version=$(version)" cmd/headscale/headscale.go
|
||||
go build -ldflags "-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.version=$(version)" cmd/headscale/headscale.go
|
||||
|
||||
dev: lint test build
|
||||
|
||||
|
@@ -25,14 +25,13 @@ Headscale implements this coordination server.
|
||||
- [X] JSON-formatted output
|
||||
- [X] ACLs
|
||||
- [X] Support for alternative IP ranges in the tailnets (default Tailscale's 100.64.0.0/10)
|
||||
- [ ] Share nodes between ~~users~~ namespaces
|
||||
- [ ] DNS
|
||||
- [X] DNS (passing DNS servers to nodes)
|
||||
- [X] Share nodes between ~~users~~ namespaces
|
||||
- [ ] MagicDNS / Smart DNS
|
||||
|
||||
|
||||
## Roadmap 🤷
|
||||
|
||||
We are now focusing on adding integration tests with the official clients.
|
||||
|
||||
Suggestions/PRs welcomed!
|
||||
|
||||
|
||||
|
297
api.go
297
api.go
@@ -13,9 +13,7 @@ import (
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"gorm.io/datatypes"
|
||||
"gorm.io/gorm"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/wgkey"
|
||||
)
|
||||
@@ -35,8 +33,6 @@ func (h *Headscale) RegisterWebAPI(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
// spew.Dump(c.Params)
|
||||
|
||||
c.Data(http.StatusOK, "text/html; charset=utf-8", []byte(fmt.Sprintf(`
|
||||
<html>
|
||||
<body>
|
||||
@@ -82,14 +78,16 @@ func (h *Headscale) RegistrationHandler(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
var m Machine
|
||||
if result := h.db.Preload("Namespace").First(&m, "machine_key = ?", mKey.HexString()); errors.Is(result.Error, gorm.ErrRecordNotFound) {
|
||||
log.Info().Str("machine", req.Hostinfo.Hostname).Msg("New machine")
|
||||
m = Machine{
|
||||
Expiry: &req.Expiry,
|
||||
MachineKey: mKey.HexString(),
|
||||
Name: req.Hostinfo.Hostname,
|
||||
NodeKey: wgkey.Key(req.NodeKey).HexString(),
|
||||
Expiry: &req.Expiry,
|
||||
MachineKey: mKey.HexString(),
|
||||
Name: req.Hostinfo.Hostname,
|
||||
NodeKey: wgkey.Key(req.NodeKey).HexString(),
|
||||
LastSuccessfulUpdate: &now,
|
||||
}
|
||||
if err := h.db.Create(&m).Error; err != nil {
|
||||
log.Error().
|
||||
@@ -215,272 +213,12 @@ func (h *Headscale) RegistrationHandler(c *gin.Context) {
|
||||
c.Data(200, "application/json; charset=utf-8", respBody)
|
||||
}
|
||||
|
||||
// PollNetMapHandler takes care of /machine/:id/map
|
||||
//
|
||||
// This is the busiest endpoint, as it keeps the HTTP long poll that updates
|
||||
// the clients when something in the network changes.
|
||||
//
|
||||
// The clients POST stuff like HostInfo and their Endpoints here, but
|
||||
// only after their first request (marked with the ReadOnly field).
|
||||
//
|
||||
// At this moment the updates are sent in a quite horrendous way, but they kinda work.
|
||||
func (h *Headscale) PollNetMapHandler(c *gin.Context) {
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("id", c.Param("id")).
|
||||
Msg("PollNetMapHandler called")
|
||||
body, _ := io.ReadAll(c.Request.Body)
|
||||
mKeyStr := c.Param("id")
|
||||
mKey, err := wgkey.ParseHex(mKeyStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Err(err).
|
||||
Msg("Cannot parse client key")
|
||||
c.String(http.StatusBadRequest, "")
|
||||
return
|
||||
}
|
||||
req := tailcfg.MapRequest{}
|
||||
err = decode(body, &req, &mKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Err(err).
|
||||
Msg("Cannot decode message")
|
||||
c.String(http.StatusBadRequest, "")
|
||||
return
|
||||
}
|
||||
|
||||
var m Machine
|
||||
if result := h.db.Preload("Namespace").First(&m, "machine_key = ?", mKey.HexString()); errors.Is(result.Error, gorm.ErrRecordNotFound) {
|
||||
log.Warn().
|
||||
Str("handler", "PollNetMap").
|
||||
Msgf("Ignoring request, cannot find machine with key %s", mKey.HexString())
|
||||
c.String(http.StatusUnauthorized, "")
|
||||
return
|
||||
}
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("id", c.Param("id")).
|
||||
Str("machine", m.Name).
|
||||
Msg("Found machine in database")
|
||||
|
||||
hostinfo, _ := json.Marshal(req.Hostinfo)
|
||||
m.Name = req.Hostinfo.Hostname
|
||||
m.HostInfo = datatypes.JSON(hostinfo)
|
||||
m.DiscoKey = wgkey.Key(req.DiscoKey).HexString()
|
||||
now := time.Now().UTC()
|
||||
|
||||
// From Tailscale client:
|
||||
//
|
||||
// ReadOnly is whether the client just wants to fetch the MapResponse,
|
||||
// without updating their Endpoints. The Endpoints field will be ignored and
|
||||
// LastSeen will not be updated and peers will not be notified of changes.
|
||||
//
|
||||
// The intended use is for clients to discover the DERP map at start-up
|
||||
// before their first real endpoint update.
|
||||
if !req.ReadOnly {
|
||||
endpoints, _ := json.Marshal(req.Endpoints)
|
||||
m.Endpoints = datatypes.JSON(endpoints)
|
||||
m.LastSeen = &now
|
||||
}
|
||||
h.db.Save(&m)
|
||||
|
||||
update := make(chan []byte, 1)
|
||||
|
||||
pollData := make(chan []byte, 1)
|
||||
defer close(pollData)
|
||||
|
||||
cancelKeepAlive := make(chan []byte, 1)
|
||||
defer close(cancelKeepAlive)
|
||||
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("id", c.Param("id")).
|
||||
Str("machine", m.Name).
|
||||
Msg("Storing update channel")
|
||||
h.clientsPolling.Store(m.ID, update)
|
||||
|
||||
data, err := h.getMapResponse(mKey, req, m)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("id", c.Param("id")).
|
||||
Str("machine", m.Name).
|
||||
Err(err).
|
||||
Msg("Failed to get Map response")
|
||||
c.String(http.StatusInternalServerError, ":(")
|
||||
return
|
||||
}
|
||||
|
||||
// We update our peers if the client is not sending ReadOnly in the MapRequest
|
||||
// so we don't distribute its initial request (it comes with
|
||||
// empty endpoints to peers)
|
||||
|
||||
// Details on the protocol can be found in https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go#L696
|
||||
log.Debug().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("id", c.Param("id")).
|
||||
Str("machine", m.Name).
|
||||
Bool("readOnly", req.ReadOnly).
|
||||
Bool("omitPeers", req.OmitPeers).
|
||||
Bool("stream", req.Stream).
|
||||
Msg("Client map request processed")
|
||||
|
||||
if req.ReadOnly {
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("Client is starting up. Asking for DERP map")
|
||||
c.Data(200, "application/json; charset=utf-8", *data)
|
||||
return
|
||||
}
|
||||
if req.OmitPeers && !req.Stream {
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("Client sent endpoint update and is ok with a response without peer list")
|
||||
c.Data(200, "application/json; charset=utf-8", *data)
|
||||
return
|
||||
} else if req.OmitPeers && req.Stream {
|
||||
log.Warn().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("Ignoring request, don't know how to handle it")
|
||||
c.String(http.StatusBadRequest, "")
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("Client is ready to access the tailnet")
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("Sending initial map")
|
||||
pollData <- *data
|
||||
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("Notifying peers")
|
||||
peers, _ := h.getPeers(m)
|
||||
for _, p := range *peers {
|
||||
pUp, ok := h.clientsPolling.Load(uint64(p.ID))
|
||||
if ok {
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Str("peer", m.Name).
|
||||
Str("address", p.Addresses[0].String()).
|
||||
Msgf("Notifying peer %s (%s)", p.Name, p.Addresses[0])
|
||||
pUp.(chan []byte) <- []byte{}
|
||||
} else {
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Str("peer", m.Name).
|
||||
Msgf("Peer %s does not appear to be polling", p.Name)
|
||||
}
|
||||
}
|
||||
|
||||
go h.keepAlive(cancelKeepAlive, pollData, mKey, req, m)
|
||||
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
select {
|
||||
case data := <-pollData:
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Int("bytes", len(data)).
|
||||
Msg("Sending data")
|
||||
_, err := w.Write(data)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Err(err).
|
||||
Msg("Cannot write data")
|
||||
}
|
||||
now := time.Now().UTC()
|
||||
m.LastSeen = &now
|
||||
h.db.Save(&m)
|
||||
return true
|
||||
|
||||
case <-update:
|
||||
log.Debug().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("Received a request for update")
|
||||
data, err := h.getMapResponse(mKey, req, m)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Err(err).
|
||||
Msg("Could not get the map update")
|
||||
}
|
||||
_, err = w.Write(*data)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Err(err).
|
||||
Msg("Could not write the map response")
|
||||
}
|
||||
return true
|
||||
|
||||
case <-c.Request.Context().Done():
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("The client has closed the connection")
|
||||
now := time.Now().UTC()
|
||||
m.LastSeen = &now
|
||||
h.db.Save(&m)
|
||||
cancelKeepAlive <- []byte{}
|
||||
h.clientsPolling.Delete(m.ID)
|
||||
close(update)
|
||||
return false
|
||||
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (h *Headscale) keepAlive(cancel chan []byte, pollData chan []byte, mKey wgkey.Key, req tailcfg.MapRequest, m Machine) {
|
||||
for {
|
||||
select {
|
||||
case <-cancel:
|
||||
return
|
||||
|
||||
default:
|
||||
data, err := h.getMapKeepAliveResponse(mKey, req, m)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "keepAlive").
|
||||
Err(err).
|
||||
Msg("Error generating the keep alive msg")
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Str("func", "keepAlive").
|
||||
Str("machine", m.Name).
|
||||
Msg("Sending keepalive")
|
||||
pollData <- *data
|
||||
|
||||
time.Sleep(60 * time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) getMapResponse(mKey wgkey.Key, req tailcfg.MapRequest, m Machine) (*[]byte, error) {
|
||||
log.Trace().
|
||||
Str("func", "getMapResponse").
|
||||
Str("machine", req.Hostinfo.Hostname).
|
||||
Msg("Creating Map response")
|
||||
node, err := m.toNode()
|
||||
node, err := m.toNode(true)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "getMapResponse").
|
||||
@@ -504,20 +242,30 @@ func (h *Headscale) getMapResponse(mKey wgkey.Key, req tailcfg.MapRequest, m Mac
|
||||
}
|
||||
|
||||
resp := tailcfg.MapResponse{
|
||||
KeepAlive: false,
|
||||
Node: node,
|
||||
Peers: *peers,
|
||||
DNS: []netaddr.IP{},
|
||||
KeepAlive: false,
|
||||
Node: node,
|
||||
Peers: *peers,
|
||||
//TODO(kradalby): As per tailscale docs, if DNSConfig is nil,
|
||||
// it means its not updated, maybe we can have some logic
|
||||
// to check and only pass updates when its updates.
|
||||
// This is probably more relevant if we try to implement
|
||||
// "MagicDNS"
|
||||
DNSConfig: h.cfg.DNSConfig,
|
||||
SearchPaths: []string{},
|
||||
Domain: "headscale.net",
|
||||
PacketFilter: *h.aclRules,
|
||||
DERPMap: h.cfg.DerpMap,
|
||||
UserProfiles: []tailcfg.UserProfile{profile},
|
||||
}
|
||||
log.Trace().
|
||||
Str("func", "getMapResponse").
|
||||
Str("machine", req.Hostinfo.Hostname).
|
||||
Msgf("Generated map response: %s", tailMapResponseToString(resp))
|
||||
|
||||
var respBody []byte
|
||||
if req.Compress == "zstd" {
|
||||
src, _ := json.Marshal(resp)
|
||||
|
||||
encoder, _ := zstd.NewWriter(nil)
|
||||
srcCompressed := encoder.EncodeAll(src, nil)
|
||||
respBody, err = encodeMsg(srcCompressed, &mKey, h.privateKey)
|
||||
@@ -530,7 +278,6 @@ func (h *Headscale) getMapResponse(mKey wgkey.Key, req tailcfg.MapRequest, m Mac
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// spew.Dump(resp)
|
||||
// declare the incoming size on the first 4 bytes
|
||||
data := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(data, uint32(len(respBody)))
|
||||
@@ -607,7 +354,7 @@ func (h *Headscale) handleAuthKey(c *gin.Context, db *gorm.DB, idKey wgkey.Key,
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", m.Name).
|
||||
Str("ip", ip.String()).
|
||||
Msgf("Assining %s to %s", ip, m.Name)
|
||||
Msgf("Assigning %s to %s", ip, m.Name)
|
||||
|
||||
m.AuthKeyID = uint(pak.ID)
|
||||
m.IPAddress = ip.String()
|
||||
|
51
app.go
51
app.go
@@ -43,6 +43,8 @@ type Config struct {
|
||||
|
||||
TLSCertPath string
|
||||
TLSKeyPath string
|
||||
|
||||
DNSConfig *tailcfg.DNSConfig
|
||||
}
|
||||
|
||||
// Headscale represents the base app of the service
|
||||
@@ -58,7 +60,10 @@ type Headscale struct {
|
||||
aclPolicy *ACLPolicy
|
||||
aclRules *[]tailcfg.FilterRule
|
||||
|
||||
clientsPolling sync.Map
|
||||
clientsUpdateChannels sync.Map
|
||||
clientsUpdateChannelMutex sync.Mutex
|
||||
|
||||
lastStateChange sync.Map
|
||||
}
|
||||
|
||||
// NewHeadscale returns the Headscale app
|
||||
@@ -107,9 +112,9 @@ func (h *Headscale) redirect(w http.ResponseWriter, req *http.Request) {
|
||||
http.Redirect(w, req, target, http.StatusFound)
|
||||
}
|
||||
|
||||
// ExpireEphemeralNodes deletes ephemeral machine records that have not been
|
||||
// expireEphemeralNodes deletes ephemeral machine records that have not been
|
||||
// seen for longer than h.cfg.EphemeralNodeInactivityTimeout
|
||||
func (h *Headscale) ExpireEphemeralNodes(milliSeconds int64) {
|
||||
func (h *Headscale) expireEphemeralNodes(milliSeconds int64) {
|
||||
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
|
||||
for range ticker.C {
|
||||
h.expireEphemeralNodesWorker()
|
||||
@@ -135,6 +140,7 @@ func (h *Headscale) expireEphemeralNodesWorker() {
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("machine", m.Name).Msg("🤮 Cannot delete ephemeral machine from the database")
|
||||
}
|
||||
h.notifyChangesToPeers(&m)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -164,7 +170,17 @@ func (h *Headscale) Serve() error {
|
||||
r.POST("/machine/:id", h.RegistrationHandler)
|
||||
var err error
|
||||
|
||||
timeout := 30 * time.Second
|
||||
|
||||
go h.watchForKVUpdates(5000)
|
||||
go h.expireEphemeralNodes(5000)
|
||||
|
||||
s := &http.Server{
|
||||
Addr: h.cfg.Addr,
|
||||
Handler: r,
|
||||
ReadTimeout: timeout,
|
||||
WriteTimeout: timeout,
|
||||
}
|
||||
|
||||
if h.cfg.TLSLetsEncryptHostname != "" {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "https://") {
|
||||
@@ -177,9 +193,11 @@ func (h *Headscale) Serve() error {
|
||||
Cache: autocert.DirCache(h.cfg.TLSLetsEncryptCacheDir),
|
||||
}
|
||||
s := &http.Server{
|
||||
Addr: h.cfg.Addr,
|
||||
TLSConfig: m.TLSConfig(),
|
||||
Handler: r,
|
||||
Addr: h.cfg.Addr,
|
||||
TLSConfig: m.TLSConfig(),
|
||||
Handler: r,
|
||||
ReadTimeout: timeout,
|
||||
WriteTimeout: timeout,
|
||||
}
|
||||
if h.cfg.TLSLetsEncryptChallengeType == "TLS-ALPN-01" {
|
||||
// Configuration via autocert with TLS-ALPN-01 (https://tools.ietf.org/html/rfc8737)
|
||||
@@ -204,12 +222,29 @@ func (h *Headscale) Serve() error {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "http://") {
|
||||
log.Warn().Msg("Listening without TLS but ServerURL does not start with http://")
|
||||
}
|
||||
err = r.Run(h.cfg.Addr)
|
||||
err = s.ListenAndServe()
|
||||
} else {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "https://") {
|
||||
log.Warn().Msg("Listening with TLS but ServerURL does not start with https://")
|
||||
}
|
||||
err = r.RunTLS(h.cfg.Addr, h.cfg.TLSCertPath, h.cfg.TLSKeyPath)
|
||||
err = s.ListenAndServeTLS(h.cfg.TLSCertPath, h.cfg.TLSKeyPath)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (h *Headscale) setLastStateChangeToNow(namespace string) {
|
||||
now := time.Now().UTC()
|
||||
h.lastStateChange.Store(namespace, now)
|
||||
}
|
||||
|
||||
func (h *Headscale) getLastStateChange(namespace string) time.Time {
|
||||
if wrapped, ok := h.lastStateChange.Load(namespace); ok {
|
||||
lastChange, _ := wrapped.(time.Time)
|
||||
return lastChange
|
||||
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
h.lastStateChange.Store(namespace, now)
|
||||
return now
|
||||
}
|
||||
|
@@ -3,8 +3,10 @@ package cli
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -94,9 +96,14 @@ var listNamespacesCmd = &cobra.Command{
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("ID\tName\n")
|
||||
|
||||
d := pterm.TableData{{"ID", "Name", "Created"}}
|
||||
for _, n := range *namespaces {
|
||||
fmt.Printf("%d\t%s\n", n.ID, n.Name)
|
||||
d = append(d, []string{strconv.FormatUint(uint64(n.ID), 10), n.Name, n.CreatedAt.Format("2006-01-02 15:04:05")})
|
||||
}
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(d).Render()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@@ -8,7 +8,11 @@ import (
|
||||
"time"
|
||||
|
||||
survey "github.com/AlecAivazis/survey/v2"
|
||||
"github.com/juanfont/headscale"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/spf13/cobra"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/wgkey"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -21,6 +25,7 @@ func init() {
|
||||
nodeCmd.AddCommand(listNodesCmd)
|
||||
nodeCmd.AddCommand(registerNodeCmd)
|
||||
nodeCmd.AddCommand(deleteNodeCmd)
|
||||
nodeCmd.AddCommand(shareMachineCmd)
|
||||
}
|
||||
|
||||
var nodeCmd = &cobra.Command{
|
||||
@@ -33,7 +38,7 @@ var registerNodeCmd = &cobra.Command{
|
||||
Short: "Registers a machine to your network",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("Missing parameters")
|
||||
return fmt.Errorf("missing parameters")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
@@ -75,9 +80,26 @@ var listNodesCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
|
||||
namespace, err := h.GetNamespace(n)
|
||||
if err != nil {
|
||||
log.Fatalf("Error fetching namespace: %s", err)
|
||||
}
|
||||
|
||||
machines, err := h.ListMachinesInNamespace(n)
|
||||
if err != nil {
|
||||
log.Fatalf("Error fetching machines: %s", err)
|
||||
}
|
||||
|
||||
sharedMachines, err := h.ListSharedMachinesInNamespace(n)
|
||||
if err != nil {
|
||||
log.Fatalf("Error fetching shared machines: %s", err)
|
||||
}
|
||||
|
||||
allMachines := append(*machines, *sharedMachines...)
|
||||
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(machines, err, o)
|
||||
JsonOutput(allMachines, err, o)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -85,19 +107,15 @@ var listNodesCmd = &cobra.Command{
|
||||
log.Fatalf("Error getting nodes: %s", err)
|
||||
}
|
||||
|
||||
fmt.Printf("ID\tname\t\tlast seen\t\tephemeral\n")
|
||||
for _, m := range *machines {
|
||||
var ephemeral bool
|
||||
if m.AuthKey != nil && m.AuthKey.Ephemeral {
|
||||
ephemeral = true
|
||||
}
|
||||
var lastSeen time.Time
|
||||
if m.LastSeen != nil {
|
||||
lastSeen = *m.LastSeen
|
||||
}
|
||||
fmt.Printf("%d\t%s\t%s\t%t\n", m.ID, m.Name, lastSeen.Format("2006-01-02 15:04:05"), ephemeral)
|
||||
d, err := nodesToPtables(*namespace, allMachines)
|
||||
if err != nil {
|
||||
log.Fatalf("Error converting to table: %s", err)
|
||||
}
|
||||
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(d).Render()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@@ -106,7 +124,7 @@ var deleteNodeCmd = &cobra.Command{
|
||||
Short: "Delete a node",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("Missing parameters")
|
||||
return fmt.Errorf("missing parameters")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
@@ -144,3 +162,95 @@ var deleteNodeCmd = &cobra.Command{
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var shareMachineCmd = &cobra.Command{
|
||||
Use: "share ID namespace",
|
||||
Short: "Shares a node from the current namespace to the specified one",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 2 {
|
||||
return fmt.Errorf("missing parameters")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
namespace, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
}
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
h, err := getHeadscaleApp()
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
|
||||
_, err = h.GetNamespace(namespace)
|
||||
if err != nil {
|
||||
log.Fatalf("Error fetching origin namespace: %s", err)
|
||||
}
|
||||
|
||||
destinationNamespace, err := h.GetNamespace(args[1])
|
||||
if err != nil {
|
||||
log.Fatalf("Error fetching destination namespace: %s", err)
|
||||
}
|
||||
|
||||
id, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Error converting ID to integer: %s", err)
|
||||
}
|
||||
machine, err := h.GetMachineByID(uint64(id))
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting node: %s", err)
|
||||
}
|
||||
|
||||
err = h.AddSharedMachineToNamespace(machine, destinationNamespace)
|
||||
if strings.HasPrefix(output, "json") {
|
||||
JsonOutput(map[string]string{"Result": "Node shared"}, err, output)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("Error sharing node: %s\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("Node shared!")
|
||||
},
|
||||
}
|
||||
|
||||
func nodesToPtables(currentNamespace headscale.Namespace, machines []headscale.Machine) (pterm.TableData, error) {
|
||||
d := pterm.TableData{{"ID", "Name", "NodeKey", "Namespace", "IP address", "Ephemeral", "Last seen", "Online"}}
|
||||
|
||||
for _, machine := range machines {
|
||||
var ephemeral bool
|
||||
if machine.AuthKey != nil && machine.AuthKey.Ephemeral {
|
||||
ephemeral = true
|
||||
}
|
||||
var lastSeen time.Time
|
||||
var lastSeenTime string
|
||||
if machine.LastSeen != nil {
|
||||
lastSeen = *machine.LastSeen
|
||||
lastSeenTime = lastSeen.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
nKey, err := wgkey.ParseHex(machine.NodeKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeKey := tailcfg.NodeKey(nKey)
|
||||
|
||||
var online string
|
||||
if lastSeen.After(time.Now().Add(-5 * time.Minute)) { // TODO: Find a better way to reliably show if online
|
||||
online = pterm.LightGreen("true")
|
||||
} else {
|
||||
online = pterm.LightRed("false")
|
||||
}
|
||||
|
||||
var namespace string
|
||||
if currentNamespace.ID == machine.NamespaceID {
|
||||
namespace = pterm.LightMagenta(machine.Namespace.Name)
|
||||
} else {
|
||||
namespace = pterm.LightYellow(machine.Namespace.Name)
|
||||
}
|
||||
d = append(d, []string{strconv.FormatUint(machine.ID, 10), machine.Name, nodeKey.ShortString(), namespace, machine.IPAddress, strconv.FormatBool(ephemeral), lastSeenTime, online})
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
@@ -3,10 +3,12 @@ package cli
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hako/durafmt"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -54,6 +56,8 @@ var listPreAuthKeys = &cobra.Command{
|
||||
fmt.Printf("Error getting the list of keys: %s\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
d := pterm.TableData{{"ID", "Key", "Reusable", "Ephemeral", "Expiration", "Created"}}
|
||||
for _, k := range *keys {
|
||||
expiration := "-"
|
||||
if k.Expiration != nil {
|
||||
@@ -67,15 +71,19 @@ var listPreAuthKeys = &cobra.Command{
|
||||
reusable = fmt.Sprintf("%v", k.Reusable)
|
||||
}
|
||||
|
||||
fmt.Printf(
|
||||
"key: %s, namespace: %s, reusable: %s, ephemeral: %v, expiration: %s, created_at: %s\n",
|
||||
d = append(d, []string{
|
||||
strconv.FormatUint(k.ID, 10),
|
||||
k.Key,
|
||||
k.Namespace.Name,
|
||||
reusable,
|
||||
k.Ephemeral,
|
||||
strconv.FormatBool(k.Ephemeral),
|
||||
expiration,
|
||||
k.CreatedAt.Format("2006-01-02 15:04:05"),
|
||||
)
|
||||
})
|
||||
|
||||
}
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(d).Render()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@@ -5,6 +5,7 @@ import (
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -15,6 +16,9 @@ func init() {
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
enableRouteCmd.Flags().BoolP("all", "a", false, "Enable all routes advertised by the node")
|
||||
|
||||
routesCmd.AddCommand(listRoutesCmd)
|
||||
routesCmd.AddCommand(enableRouteCmd)
|
||||
}
|
||||
@@ -44,19 +48,25 @@ var listRoutesCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
routes, err := h.GetNodeRoutes(n, args[0])
|
||||
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(routes, err, o)
|
||||
return
|
||||
}
|
||||
|
||||
availableRoutes, err := h.GetAdvertisedNodeRoutes(n, args[0])
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println(routes)
|
||||
if strings.HasPrefix(o, "json") {
|
||||
// TODO: Add enable/disabled information to this interface
|
||||
JsonOutput(availableRoutes, err, o)
|
||||
return
|
||||
}
|
||||
|
||||
d := h.RoutesToPtables(n, args[0], *availableRoutes)
|
||||
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(d).Render()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
@@ -64,32 +74,74 @@ var enableRouteCmd = &cobra.Command{
|
||||
Use: "enable node-name route",
|
||||
Short: "Allows exposing a route declared by this node to the rest of the nodes",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 2 {
|
||||
return fmt.Errorf("Missing parameters")
|
||||
all, err := cmd.Flags().GetBool("all")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
}
|
||||
|
||||
if all {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("Missing parameters")
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
if len(args) < 2 {
|
||||
return fmt.Errorf("Missing parameters")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
n, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
}
|
||||
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
|
||||
all, err := cmd.Flags().GetBool("all")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
}
|
||||
|
||||
h, err := getHeadscaleApp()
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
route, err := h.EnableNodeRoute(n, args[0], args[1])
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(route, err, o)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
if all {
|
||||
availableRoutes, err := h.GetAdvertisedNodeRoutes(n, args[0])
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, availableRoute := range *availableRoutes {
|
||||
err = h.EnableNodeRoute(n, args[0], availableRoute.String())
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(availableRoute, err, o)
|
||||
} else {
|
||||
fmt.Printf("Enabled route %s\n", availableRoute)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = h.EnableNodeRoute(n, args[0], args[1])
|
||||
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(args[1], err, o)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("Enabled route %s\n", args[1])
|
||||
}
|
||||
fmt.Printf("Enabled route %s\n", route)
|
||||
},
|
||||
}
|
||||
|
@@ -21,7 +21,7 @@ var serveCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
go h.ExpireEphemeralNodes(5000)
|
||||
|
||||
err = h.Serve()
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
|
@@ -39,7 +39,9 @@ func LoadConfig(path string) error {
|
||||
|
||||
viper.SetDefault("ip_prefix", "100.64.0.0/10")
|
||||
|
||||
viper.SetDefault("log_level", "debug")
|
||||
viper.SetDefault("log_level", "info")
|
||||
|
||||
viper.SetDefault("dns_config", nil)
|
||||
|
||||
err := viper.ReadInConfig()
|
||||
if err != nil {
|
||||
@@ -70,6 +72,45 @@ func LoadConfig(path string) error {
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func GetDNSConfig() *tailcfg.DNSConfig {
|
||||
if viper.IsSet("dns_config") {
|
||||
dnsConfig := &tailcfg.DNSConfig{}
|
||||
|
||||
if viper.IsSet("dns_config.nameservers") {
|
||||
nameserversStr := viper.GetStringSlice("dns_config.nameservers")
|
||||
|
||||
nameservers := make([]netaddr.IP, len(nameserversStr))
|
||||
resolvers := make([]tailcfg.DNSResolver, len(nameserversStr))
|
||||
|
||||
for index, nameserverStr := range nameserversStr {
|
||||
nameserver, err := netaddr.ParseIP(nameserverStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "getDNSConfig").
|
||||
Err(err).
|
||||
Msgf("Could not parse nameserver IP: %s", nameserverStr)
|
||||
}
|
||||
|
||||
nameservers[index] = nameserver
|
||||
resolvers[index] = tailcfg.DNSResolver{
|
||||
Addr: nameserver.String(),
|
||||
}
|
||||
}
|
||||
|
||||
dnsConfig.Nameservers = nameservers
|
||||
dnsConfig.Resolvers = resolvers
|
||||
}
|
||||
if viper.IsSet("dns_config.domains") {
|
||||
dnsConfig.Domains = viper.GetStringSlice("dns_config.domains")
|
||||
}
|
||||
|
||||
return dnsConfig
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func absPath(path string) string {
|
||||
@@ -126,6 +167,8 @@ func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
|
||||
TLSCertPath: absPath(viper.GetString("tls_cert_path")),
|
||||
TLSKeyPath: absPath(viper.GetString("tls_key_path")),
|
||||
|
||||
DNSConfig: GetDNSConfig(),
|
||||
}
|
||||
|
||||
h, err := headscale.NewHeadscale(cfg)
|
||||
|
@@ -58,7 +58,7 @@ func (*Suite) TestPostgresConfigLoading(c *check.C) {
|
||||
c.Assert(viper.GetString("db_port"), check.Equals, "5432")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
c.Assert(viper.GetStringSlice("dns_config.nameservers")[0], check.Equals, "1.1.1.1")
|
||||
}
|
||||
|
||||
func (*Suite) TestSqliteConfigLoading(c *check.C) {
|
||||
@@ -92,6 +92,37 @@ func (*Suite) TestSqliteConfigLoading(c *check.C) {
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
c.Assert(viper.GetStringSlice("dns_config.nameservers")[0], check.Equals, "1.1.1.1")
|
||||
}
|
||||
|
||||
func (*Suite) TestDNSConfigLoading(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "headscale")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Symlink the example config file
|
||||
err = os.Symlink(filepath.Clean(path+"/../../config.json.sqlite.example"), filepath.Join(tmpDir, "config.json"))
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Load example config, it should load without validation errors
|
||||
err = cli.LoadConfig(tmpDir)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
dnsConfig := cli.GetDNSConfig()
|
||||
fmt.Println(dnsConfig)
|
||||
|
||||
c.Assert(dnsConfig.Nameservers[0].String(), check.Equals, "1.1.1.1")
|
||||
|
||||
c.Assert(dnsConfig.Resolvers[0].Addr, check.Equals, "1.1.1.1")
|
||||
}
|
||||
|
||||
func writeConfig(c *check.C, tmpDir string, configYaml []byte) {
|
||||
|
@@ -16,5 +16,10 @@
|
||||
"tls_letsencrypt_challenge_type": "HTTP-01",
|
||||
"tls_cert_path": "",
|
||||
"tls_key_path": "",
|
||||
"acl_policy_path": ""
|
||||
"acl_policy_path": "",
|
||||
"dns_config": {
|
||||
"nameservers": [
|
||||
"1.1.1.1"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@@ -12,5 +12,10 @@
|
||||
"tls_letsencrypt_challenge_type": "HTTP-01",
|
||||
"tls_cert_path": "",
|
||||
"tls_key_path": "",
|
||||
"acl_policy_path": ""
|
||||
"acl_policy_path": "",
|
||||
"dns_config": {
|
||||
"nameservers": [
|
||||
"1.1.1.1"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
5
db.go
5
db.go
@@ -44,6 +44,11 @@ func (h *Headscale) initDB() error {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&SharedMachine{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = h.setValue("db_version", dbVersion)
|
||||
return err
|
||||
}
|
||||
|
@@ -1,7 +1,7 @@
|
||||
# This file contains some of the official Tailscale DERP servers,
|
||||
# shamelessly taken from https://github.com/tailscale/tailscale/blob/main/net/dnsfallback/dns-fallback-servers.json
|
||||
#
|
||||
# If you plan to somehow use headscale, please deploy your own DERP infra
|
||||
# If you plan to somehow use headscale, please deploy your own DERP infra: https://tailscale.com/kb/1118/custom-derp-servers/
|
||||
regions:
|
||||
1:
|
||||
regionid: 1
|
||||
|
11
go.mod
11
go.mod
@@ -5,12 +5,11 @@ go 1.16
|
||||
require (
|
||||
github.com/AlecAivazis/survey/v2 v2.0.5
|
||||
github.com/Microsoft/go-winio v0.5.0 // indirect
|
||||
github.com/cenkalti/backoff/v3 v3.0.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.1 // indirect
|
||||
github.com/containerd/continuity v0.1.0 // indirect
|
||||
github.com/docker/cli v20.10.8+incompatible // indirect
|
||||
github.com/docker/docker v20.10.8+incompatible // indirect
|
||||
github.com/efekarakus/termcolor v1.0.1 // indirect
|
||||
github.com/efekarakus/termcolor v1.0.1
|
||||
github.com/gin-gonic/gin v1.7.2
|
||||
github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b
|
||||
github.com/klauspost/compress v1.13.1
|
||||
@@ -18,15 +17,17 @@ require (
|
||||
github.com/mattn/go-sqlite3 v1.14.7 // indirect
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||
github.com/opencontainers/runc v1.0.1 // indirect
|
||||
github.com/ory/dockertest/v3 v3.7.0 // indirect
|
||||
github.com/rs/zerolog v1.23.0 // indirect
|
||||
github.com/ory/dockertest/v3 v3.7.0
|
||||
github.com/pterm/pterm v0.12.29
|
||||
github.com/rs/zerolog v1.23.0
|
||||
github.com/spf13/cobra v1.1.3
|
||||
github.com/spf13/viper v1.8.1
|
||||
github.com/stretchr/testify v1.7.0 // indirect
|
||||
github.com/tailscale/hujson v0.0.0-20200924210142-dde312d0d6a2
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d // indirect
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 // indirect
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gorm.io/datatypes v1.0.1
|
||||
|
31
go.sum
31
go.sum
@@ -49,6 +49,8 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
|
||||
github.com/Djarvur/go-err113 v0.0.0-20200511133814-5174e21577d5/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
|
||||
github.com/Djarvur/go-err113 v0.1.0/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
|
||||
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/MarvinJWendt/testza v0.1.0 h1:4m+JkB/4e0nUlXdIa10Mg0poUz9CanQKjB3L+xecjAo=
|
||||
github.com/MarvinJWendt/testza v0.1.0/go.mod h1:7AxNvlfeHP7Z/hDQ5JtE3OKYT3XFUeLCDE2DQninSqs=
|
||||
github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
|
||||
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
|
||||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
@@ -92,6 +94,8 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
|
||||
github.com/atomicgo/cursor v0.0.1 h1:xdogsqa6YYlLfM+GyClC/Lchf7aiMerFiZQn7soTOoU=
|
||||
github.com/atomicgo/cursor v0.0.1/go.mod h1:cBON2QmmrysudxNBFthvMtN32r3jxVRIvzkUiF/RuIk=
|
||||
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.38.52/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
@@ -109,7 +113,6 @@ github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n
|
||||
github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
||||
github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
|
||||
github.com/cenkalti/backoff/v4 v4.1.0/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ=
|
||||
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
@@ -150,8 +153,8 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
|
||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
github.com/daixiang0/gci v0.2.4/go.mod h1:+AV8KmHTGxxwp/pY84TLQfFKp2vuKXXJVzF3kD/hfR4=
|
||||
@@ -357,6 +360,8 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gookit/color v1.3.1/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ=
|
||||
github.com/gookit/color v1.4.2 h1:tXy44JFSFkKnELV6WaMo/lLfu/meqITX3iAV52do7lk=
|
||||
github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/goreleaser/chglog v0.1.2/go.mod h1:tTZsFuSZK4epDXfjMkxzcGbrIOXprf0JFp47BjIr3B8=
|
||||
@@ -582,6 +587,8 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
|
||||
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-sqlite3 v1.14.5/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI=
|
||||
github.com/mattn/go-sqlite3 v1.14.7 h1:fxWBnXkxfM6sRiuH3bqJ4CfzZojMOLVc0UTsTglEghA=
|
||||
@@ -731,12 +738,17 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/pterm/pterm v0.12.27/go.mod h1:PhQ89w4i95rhgE+xedAoqous6K9X+r6aSOI2eFF7DZI=
|
||||
github.com/pterm/pterm v0.12.29 h1:wWRNFkC3+fk/agzHIO4aaXtQuRYdXJKngP3ed+LZlMU=
|
||||
github.com/pterm/pterm v0.12.29/go.mod h1:WI3qxgvoQFFGKGjGnJR849gU0TsEOvKn5Q8LlY1U7lg=
|
||||
github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI=
|
||||
github.com/quasilyte/go-ruleguard v0.2.0/go.mod h1:2RT/tf0Ce0UDj5y243iWKosQogJd8+1G3Rs2fxmlYnw=
|
||||
github.com/quasilyte/go-ruleguard v0.2.1/go.mod h1:hN2rVc/uS4bQhQKTio2XaSJSafJwqBUWWwtssT3cQmc=
|
||||
github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0=
|
||||
github.com/quasilyte/regex/syntax v0.0.0-20200805063351-8f842688393c/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
@@ -817,6 +829,7 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/objx v0.3.0 h1:NGXK3lHquSN08v5vWalVI/L8XU9hdzE/G6xsrze47As=
|
||||
github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
@@ -874,6 +887,8 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 h1:QldyIu/L63oPpyvQmHgvgickp1Yw510KJOqX7H24mg8=
|
||||
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
@@ -1028,7 +1043,6 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d h1:20cMwl2fHAzkJMEA+8J4JgqBQcQGzbisXo31MIeenXI=
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
@@ -1140,13 +1154,15 @@ golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 h1:siQdpVirKtzPhKl3lZWozZraCFObP8S1v6PRp0bLrtU=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e h1:WUoyKPm6nCo1BnNUvPGnFG3T5DUVem42yDJZZ4CNxMA=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -1422,6 +1438,7 @@ gorm.io/gorm v1.21.6/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0=
|
||||
gorm.io/gorm v1.21.9/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0=
|
||||
gorm.io/gorm v1.21.11 h1:CxkXW6Cc+VIBlL8yJEHq+Co4RYXdSLiMKNvgoZPjLK4=
|
||||
gorm.io/gorm v1.21.11/go.mod h1:F+OptMscr0P2F2qU97WT1WimdH9GaQPoDW7AYd5i2Y0=
|
||||
gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
@@ -1454,5 +1471,3 @@ sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
|
||||
tailscale.com v1.10.0 h1:3EWYxpXkCmXsMh1WgqoEjQ/xalxzxU+YD5ZmtaHS5cY=
|
||||
tailscale.com v1.10.0/go.mod h1:kgFF5AZPTltwdXjX2/ci4ghlcO3qKNWVIjD9s39pr8c=
|
||||
tailscale.com v1.10.2 h1:0EbwydLGDxw7//yB5/1GTKz3hDJvGTUCajPZZPMDDGQ=
|
||||
tailscale.com v1.10.2/go.mod h1:kgFF5AZPTltwdXjX2/ci4ghlcO3qKNWVIjD9s39pr8c=
|
||||
|
@@ -4,32 +4,67 @@ package headscale
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"inet.af/netaddr"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
var _ = check.Suite(&IntegrationSuite{})
|
||||
|
||||
type IntegrationSuite struct{}
|
||||
|
||||
var integrationTmpDir string
|
||||
var ih Headscale
|
||||
|
||||
var pool dockertest.Pool
|
||||
var network dockertest.Network
|
||||
var headscale dockertest.Resource
|
||||
var tailscaleCount int = 10
|
||||
var tailscaleCount int = 25
|
||||
var tailscales map[string]dockertest.Resource
|
||||
|
||||
type IntegrationTestSuite struct {
|
||||
suite.Suite
|
||||
stats *suite.SuiteInformation
|
||||
}
|
||||
|
||||
func TestIntegrationTestSuite(t *testing.T) {
|
||||
s := new(IntegrationTestSuite)
|
||||
suite.Run(t, s)
|
||||
|
||||
// HandleStats, which allows us to check if we passed and save logs
|
||||
// is called after TearDown, so we cannot tear down containers before
|
||||
// we have potentially saved the logs.
|
||||
for _, tailscale := range tailscales {
|
||||
if err := pool.Purge(&tailscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !s.stats.Passed() {
|
||||
err := saveLog(&headscale, "test_output")
|
||||
if err != nil {
|
||||
log.Printf("Could not save log: %s\n", err)
|
||||
}
|
||||
}
|
||||
if err := pool.Purge(&headscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
if err := network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
func executeCommand(resource *dockertest.Resource, cmd []string) (string, error) {
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
@@ -55,6 +90,48 @@ func executeCommand(resource *dockertest.Resource, cmd []string) (string, error)
|
||||
return stdout.String(), nil
|
||||
}
|
||||
|
||||
func saveLog(resource *dockertest.Resource, basePath string) error {
|
||||
err := os.MkdirAll(basePath, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
|
||||
err = pool.Client.Logs(
|
||||
docker.LogsOptions{
|
||||
Context: context.TODO(),
|
||||
Container: resource.Container.ID,
|
||||
OutputStream: &stdout,
|
||||
ErrorStream: &stderr,
|
||||
Tail: "all",
|
||||
RawTerminal: false,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
Follow: false,
|
||||
Timestamps: false,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Saving logs for %s to %s\n", resource.Container.Name, basePath)
|
||||
|
||||
err = ioutil.WriteFile(path.Join(basePath, resource.Container.Name+".stdout.log"), []byte(stdout.String()), 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(path.Join(basePath, resource.Container.Name+".stderr.log"), []byte(stdout.String()), 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dockerRestartPolicy(config *docker.HostConfig) {
|
||||
// set AutoRemove to true so that stopped container goes away by itself
|
||||
config.AutoRemove = true
|
||||
@@ -63,7 +140,7 @@ func dockerRestartPolicy(config *docker.HostConfig) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationSuite) SetUpSuite(c *check.C) {
|
||||
func (s *IntegrationTestSuite) SetupSuite() {
|
||||
var err error
|
||||
h = Headscale{
|
||||
dbType: "sqlite3",
|
||||
@@ -104,12 +181,10 @@ func (s *IntegrationSuite) SetUpSuite(c *check.C) {
|
||||
fmt.Sprintf("%s/derp.yaml:/etc/headscale/derp.yaml", currentPath),
|
||||
},
|
||||
Networks: []*dockertest.Network{&network},
|
||||
// Cmd: []string{"sleep", "3600"},
|
||||
Cmd: []string{"headscale", "serve"},
|
||||
Cmd: []string{"headscale", "serve"},
|
||||
PortBindings: map[docker.Port][]docker.PortBinding{
|
||||
"8080/tcp": []docker.PortBinding{{HostPort: "8080"}},
|
||||
},
|
||||
Env: []string{},
|
||||
}
|
||||
|
||||
fmt.Println("Creating headscale container")
|
||||
@@ -127,10 +202,7 @@ func (s *IntegrationSuite) SetUpSuite(c *check.C) {
|
||||
tailscaleOptions := &dockertest.RunOptions{
|
||||
Name: hostname,
|
||||
Networks: []*dockertest.Network{&network},
|
||||
// Make the container run until killed
|
||||
// Cmd: []string{"sleep", "3600"},
|
||||
Cmd: []string{"tailscaled", "--tun=userspace-networking", "--socks5-server=localhost:1055"},
|
||||
Env: []string{},
|
||||
Cmd: []string{"tailscaled", "--tun=userspace-networking", "--socks5-server=localhost:1055"},
|
||||
}
|
||||
|
||||
if pts, err := pool.BuildAndRunWithBuildOptions(tailscaleBuildOptions, tailscaleOptions, dockerRestartPolicy); err == nil {
|
||||
@@ -164,14 +236,14 @@ func (s *IntegrationSuite) SetUpSuite(c *check.C) {
|
||||
&headscale,
|
||||
[]string{"headscale", "namespaces", "create", "test"},
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
fmt.Println("Creating pre auth key")
|
||||
authKey, err := executeCommand(
|
||||
&headscale,
|
||||
[]string{"headscale", "-n", "test", "preauthkeys", "create", "--reusable", "--expiration", "24h"},
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
headscaleEndpoint := fmt.Sprintf("http://headscale:%s", headscale.GetPort("8080/tcp"))
|
||||
|
||||
@@ -186,42 +258,126 @@ func (s *IntegrationSuite) SetUpSuite(c *check.C) {
|
||||
command,
|
||||
)
|
||||
fmt.Println("tailscale result: ", result)
|
||||
c.Assert(err, check.IsNil)
|
||||
assert.Nil(s.T(), err)
|
||||
fmt.Printf("%s joined\n", hostname)
|
||||
}
|
||||
|
||||
// The nodes need a bit of time to get their updated maps from headscale
|
||||
// TODO: See if we can have a more deterministic wait here.
|
||||
time.Sleep(60 * time.Second)
|
||||
}
|
||||
|
||||
func (s *IntegrationSuite) TearDownSuite(c *check.C) {
|
||||
if err := pool.Purge(&headscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
for _, tailscale := range tailscales {
|
||||
if err := pool.Purge(&tailscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
func (s *IntegrationTestSuite) TearDownSuite() {
|
||||
}
|
||||
|
||||
func (s *IntegrationSuite) TestListNodes(c *check.C) {
|
||||
func (s *IntegrationTestSuite) HandleStats(suiteName string, stats *suite.SuiteInformation) {
|
||||
s.stats = stats
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TestListNodes() {
|
||||
fmt.Println("Listing nodes")
|
||||
result, err := executeCommand(
|
||||
&headscale,
|
||||
[]string{"headscale", "-n", "test", "nodes", "list"},
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
fmt.Printf("List nodes: \n%s\n", result)
|
||||
|
||||
// Chck that the correct count of host is present in node list
|
||||
lines := strings.Split(result, "\n")
|
||||
assert.Equal(s.T(), len(tailscales), len(lines)-2)
|
||||
|
||||
for hostname, _ := range tailscales {
|
||||
c.Assert(strings.Contains(result, hostname), check.Equals, true)
|
||||
assert.Contains(s.T(), result, hostname)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationSuite) TestGetIpAddresses(c *check.C) {
|
||||
func (s *IntegrationTestSuite) TestGetIpAddresses() {
|
||||
ipPrefix := netaddr.MustParseIPPrefix("100.64.0.0/10")
|
||||
ips, err := getIPs()
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
for hostname, _ := range tailscales {
|
||||
s.T().Run(hostname, func(t *testing.T) {
|
||||
ip := ips[hostname]
|
||||
|
||||
fmt.Printf("IP for %s: %s\n", hostname, ip)
|
||||
|
||||
// c.Assert(ip.Valid(), check.IsTrue)
|
||||
assert.True(t, ip.Is4())
|
||||
assert.True(t, ipPrefix.Contains(ip))
|
||||
|
||||
ips[hostname] = ip
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TestStatus() {
|
||||
ips, err := getIPs()
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
for hostname, tailscale := range tailscales {
|
||||
s.T().Run(hostname, func(t *testing.T) {
|
||||
command := []string{"tailscale", "status"}
|
||||
|
||||
fmt.Printf("Getting status for %s\n", hostname)
|
||||
result, err := executeCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
// fmt.Printf("Status for %s: %s", hostname, result)
|
||||
|
||||
// Check if we have as many nodes in status
|
||||
// as we have IPs/tailscales
|
||||
lines := strings.Split(result, "\n")
|
||||
assert.Equal(t, len(ips), len(lines)-1)
|
||||
assert.Equal(t, len(tailscales), len(lines)-1)
|
||||
|
||||
// Check that all hosts is present in all hosts status
|
||||
for ipHostname, ip := range ips {
|
||||
assert.Contains(t, result, ip.String())
|
||||
assert.Contains(t, result, ipHostname)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TestPingAllPeers() {
|
||||
ips, err := getIPs()
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
for hostname, tailscale := range tailscales {
|
||||
for peername, ip := range ips {
|
||||
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
|
||||
// We currently cant ping ourselves, so skip that.
|
||||
if peername != hostname {
|
||||
// We are only interested in "direct ping" which means what we
|
||||
// might need a couple of more attempts before reaching the node.
|
||||
command := []string{
|
||||
"tailscale", "ping",
|
||||
"--timeout=1s",
|
||||
"--c=20",
|
||||
"--until-direct=true",
|
||||
ip.String(),
|
||||
}
|
||||
|
||||
fmt.Printf("Pinging from %s (%s) to %s (%s)\n", hostname, ips[hostname], peername, ip)
|
||||
result, err := executeCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
fmt.Printf("Result for %s: %s\n", hostname, result)
|
||||
assert.Contains(t, result, "pong")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getIPs() (map[string]netaddr.IP, error) {
|
||||
ips := make(map[string]netaddr.IP)
|
||||
for hostname, tailscale := range tailscales {
|
||||
command := []string{"tailscale", "ip"}
|
||||
@@ -230,17 +386,16 @@ func (s *IntegrationSuite) TestGetIpAddresses(c *check.C) {
|
||||
&tailscale,
|
||||
command,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ip, err := netaddr.ParseIP(strings.TrimSuffix(result, "\n"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
fmt.Printf("IP for %s: %s", hostname, result)
|
||||
|
||||
// c.Assert(ip.Valid(), check.IsTrue)
|
||||
c.Assert(ip.Is4(), check.Equals, true)
|
||||
c.Assert(ipPrefix.Contains(ip), check.Equals, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ips[hostname] = ip
|
||||
}
|
||||
return ips, nil
|
||||
}
|
||||
|
@@ -7,5 +7,5 @@
|
||||
"db_type": "sqlite3",
|
||||
"db_path": "/tmp/integration_test_db.sqlite3",
|
||||
"acl_policy_path": "",
|
||||
"log_level": "trace"
|
||||
"log_level": "debug"
|
||||
}
|
||||
|
208
machine.go
208
machine.go
@@ -2,6 +2,7 @@ package headscale
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -31,8 +32,9 @@ type Machine struct {
|
||||
AuthKeyID uint
|
||||
AuthKey *PreAuthKey
|
||||
|
||||
LastSeen *time.Time
|
||||
Expiry *time.Time
|
||||
LastSeen *time.Time
|
||||
LastSuccessfulUpdate *time.Time
|
||||
Expiry *time.Time
|
||||
|
||||
HostInfo datatypes.JSON
|
||||
Endpoints datatypes.JSON
|
||||
@@ -48,7 +50,9 @@ func (m Machine) isAlreadyRegistered() bool {
|
||||
return m.Registered
|
||||
}
|
||||
|
||||
func (m Machine) toNode() (*tailcfg.Node, error) {
|
||||
// toNode converts a Machine into a Tailscale Node. includeRoutes is false for shared nodes
|
||||
// as per the expected behaviour in the official SaaS
|
||||
func (m Machine) toNode(includeRoutes bool) (*tailcfg.Node, error) {
|
||||
nKey, err := wgkey.ParseHex(m.NodeKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -83,24 +87,26 @@ func (m Machine) toNode() (*tailcfg.Node, error) {
|
||||
allowedIPs := []netaddr.IPPrefix{}
|
||||
allowedIPs = append(allowedIPs, ip) // we append the node own IP, as it is required by the clients
|
||||
|
||||
routesStr := []string{}
|
||||
if len(m.EnabledRoutes) != 0 {
|
||||
allwIps, err := m.EnabledRoutes.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if includeRoutes {
|
||||
routesStr := []string{}
|
||||
if len(m.EnabledRoutes) != 0 {
|
||||
allwIps, err := m.EnabledRoutes.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = json.Unmarshal(allwIps, &routesStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
err = json.Unmarshal(allwIps, &routesStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, aip := range routesStr {
|
||||
ip, err := netaddr.ParseIPPrefix(aip)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
for _, routeStr := range routesStr {
|
||||
ip, err := netaddr.ParseIPPrefix(routeStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allowedIPs = append(allowedIPs, ip)
|
||||
}
|
||||
allowedIPs = append(allowedIPs, ip)
|
||||
}
|
||||
|
||||
endpoints := []string{}
|
||||
@@ -134,13 +140,20 @@ func (m Machine) toNode() (*tailcfg.Node, error) {
|
||||
derp = "127.3.3.40:0" // Zero means disconnected or unknown.
|
||||
}
|
||||
|
||||
var keyExpiry time.Time
|
||||
if m.Expiry != nil {
|
||||
keyExpiry = *m.Expiry
|
||||
} else {
|
||||
keyExpiry = time.Time{}
|
||||
}
|
||||
|
||||
n := tailcfg.Node{
|
||||
ID: tailcfg.NodeID(m.ID), // this is the actual ID
|
||||
StableID: tailcfg.StableNodeID(strconv.FormatUint(m.ID, 10)), // in headscale, unlike tailcontrol server, IDs are permanent
|
||||
Name: hostinfo.Hostname,
|
||||
User: tailcfg.UserID(m.NamespaceID),
|
||||
Key: tailcfg.NodeKey(nKey),
|
||||
KeyExpiry: *m.Expiry,
|
||||
KeyExpiry: keyExpiry,
|
||||
Machine: tailcfg.MachineKey(mKey),
|
||||
DiscoKey: discoKey,
|
||||
Addresses: addrs,
|
||||
@@ -159,6 +172,11 @@ func (m Machine) toNode() (*tailcfg.Node, error) {
|
||||
}
|
||||
|
||||
func (h *Headscale) getPeers(m Machine) (*[]*tailcfg.Node, error) {
|
||||
log.Trace().
|
||||
Str("func", "getPeers").
|
||||
Str("machine", m.Name).
|
||||
Msg("Finding peers")
|
||||
|
||||
machines := []Machine{}
|
||||
if err := h.db.Where("namespace_id = ? AND machine_key <> ? AND registered",
|
||||
m.NamespaceID, m.MachineKey).Find(&machines).Error; err != nil {
|
||||
@@ -166,15 +184,34 @@ func (h *Headscale) getPeers(m Machine) (*[]*tailcfg.Node, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We fetch here machines that are shared to the `Namespace` of the machine we are getting peers for
|
||||
sharedMachines := []SharedMachine{}
|
||||
if err := h.db.Preload("Namespace").Preload("Machine").Where("namespace_id = ?",
|
||||
m.NamespaceID).Find(&sharedMachines).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
peers := []*tailcfg.Node{}
|
||||
for _, mn := range machines {
|
||||
peer, err := mn.toNode()
|
||||
peer, err := mn.toNode(true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
peers = append(peers, peer)
|
||||
}
|
||||
for _, sharedMachine := range sharedMachines {
|
||||
peer, err := sharedMachine.Machine.toNode(false) // shared nodes do not expose their routes
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
peers = append(peers, peer)
|
||||
}
|
||||
sort.Slice(peers, func(i, j int) bool { return peers[i].ID < peers[j].ID })
|
||||
|
||||
log.Trace().
|
||||
Str("func", "getPeers").
|
||||
Str("machine", m.Name).
|
||||
Msgf("Found peers: %s", tailNodesToString(peers))
|
||||
return &peers, nil
|
||||
}
|
||||
|
||||
@@ -190,18 +227,27 @@ func (h *Headscale) GetMachine(namespace string, name string) (*Machine, error)
|
||||
return &m, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("not found")
|
||||
return nil, fmt.Errorf("machine not found")
|
||||
}
|
||||
|
||||
// GetMachineByID finds a Machine by ID and returns the Machine struct
|
||||
func (h *Headscale) GetMachineByID(id uint64) (*Machine, error) {
|
||||
m := Machine{}
|
||||
if result := h.db.Find(&Machine{ID: id}).First(&m); result.Error != nil {
|
||||
if result := h.db.Preload("Namespace").Find(&Machine{ID: id}).First(&m); result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
return &m, nil
|
||||
}
|
||||
|
||||
// UpdateMachine takes a Machine struct pointer (typically already loaded from database
|
||||
// and updates it with the latest data from the database.
|
||||
func (h *Headscale) UpdateMachine(m *Machine) error {
|
||||
if result := h.db.Find(m).First(&m); result.Error != nil {
|
||||
return result.Error
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteMachine softs deletes a Machine from the database
|
||||
func (h *Headscale) DeleteMachine(m *Machine) error {
|
||||
m.Registered = false
|
||||
@@ -238,3 +284,121 @@ func (m *Machine) GetHostInfo() (*tailcfg.Hostinfo, error) {
|
||||
}
|
||||
return &hostinfo, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) notifyChangesToPeers(m *Machine) {
|
||||
peers, err := h.getPeers(*m)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "notifyChangesToPeers").
|
||||
Str("machine", m.Name).
|
||||
Msgf("Error getting peers: %s", err)
|
||||
return
|
||||
}
|
||||
for _, p := range *peers {
|
||||
log.Info().
|
||||
Str("func", "notifyChangesToPeers").
|
||||
Str("machine", m.Name).
|
||||
Str("peer", p.Name).
|
||||
Str("address", p.Addresses[0].String()).
|
||||
Msgf("Notifying peer %s (%s)", p.Name, p.Addresses[0])
|
||||
err := h.sendRequestOnUpdateChannel(p)
|
||||
if err != nil {
|
||||
log.Info().
|
||||
Str("func", "notifyChangesToPeers").
|
||||
Str("machine", m.Name).
|
||||
Str("peer", p.Name).
|
||||
Msgf("Peer %s does not appear to be polling", p.Name)
|
||||
}
|
||||
log.Trace().
|
||||
Str("func", "notifyChangesToPeers").
|
||||
Str("machine", m.Name).
|
||||
Str("peer", p.Name).
|
||||
Str("address", p.Addresses[0].String()).
|
||||
Msgf("Notified peer %s (%s)", p.Name, p.Addresses[0])
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) getOrOpenUpdateChannel(m *Machine) <-chan struct{} {
|
||||
var updateChan chan struct{}
|
||||
if storedChan, ok := h.clientsUpdateChannels.Load(m.ID); ok {
|
||||
if unwrapped, ok := storedChan.(chan struct{}); ok {
|
||||
updateChan = unwrapped
|
||||
} else {
|
||||
log.Error().
|
||||
Str("handler", "openUpdateChannel").
|
||||
Str("machine", m.Name).
|
||||
Msg("Failed to convert update channel to struct{}")
|
||||
}
|
||||
} else {
|
||||
log.Debug().
|
||||
Str("handler", "openUpdateChannel").
|
||||
Str("machine", m.Name).
|
||||
Msg("Update channel not found, creating")
|
||||
|
||||
updateChan = make(chan struct{})
|
||||
h.clientsUpdateChannels.Store(m.ID, updateChan)
|
||||
}
|
||||
return updateChan
|
||||
}
|
||||
|
||||
func (h *Headscale) closeUpdateChannel(m *Machine) {
|
||||
h.clientsUpdateChannelMutex.Lock()
|
||||
defer h.clientsUpdateChannelMutex.Unlock()
|
||||
|
||||
if storedChan, ok := h.clientsUpdateChannels.Load(m.ID); ok {
|
||||
if unwrapped, ok := storedChan.(chan struct{}); ok {
|
||||
close(unwrapped)
|
||||
}
|
||||
}
|
||||
h.clientsUpdateChannels.Delete(m.ID)
|
||||
}
|
||||
|
||||
func (h *Headscale) sendRequestOnUpdateChannel(m *tailcfg.Node) error {
|
||||
h.clientsUpdateChannelMutex.Lock()
|
||||
defer h.clientsUpdateChannelMutex.Unlock()
|
||||
|
||||
pUp, ok := h.clientsUpdateChannels.Load(uint64(m.ID))
|
||||
if ok {
|
||||
log.Info().
|
||||
Str("func", "requestUpdate").
|
||||
Str("machine", m.Name).
|
||||
Msgf("Notifying peer %s", m.Name)
|
||||
|
||||
if update, ok := pUp.(chan struct{}); ok {
|
||||
log.Trace().
|
||||
Str("func", "requestUpdate").
|
||||
Str("machine", m.Name).
|
||||
Msgf("Update channel is %#v", update)
|
||||
|
||||
update <- struct{}{}
|
||||
|
||||
log.Trace().
|
||||
Str("func", "requestUpdate").
|
||||
Str("machine", m.Name).
|
||||
Msgf("Notified machine %s", m.Name)
|
||||
}
|
||||
} else {
|
||||
log.Info().
|
||||
Str("func", "requestUpdate").
|
||||
Str("machine", m.Name).
|
||||
Msgf("Machine %s does not appear to be polling", m.Name)
|
||||
return errors.New("machine does not seem to be polling")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Headscale) isOutdated(m *Machine) bool {
|
||||
err := h.UpdateMachine(m)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
lastChange := h.getLastStateChange(m.Namespace.Name)
|
||||
log.Trace().
|
||||
Str("func", "keepAlive").
|
||||
Str("machine", m.Name).
|
||||
Time("last_successful_update", *m.LastSuccessfulUpdate).
|
||||
Time("last_state_change", lastChange).
|
||||
Msgf("Checking if %s is missing updates", m.Name)
|
||||
return m.LastSuccessfulUpdate.Before(lastChange)
|
||||
}
|
||||
|
@@ -91,12 +91,34 @@ func (h *Headscale) ListMachinesInNamespace(name string) (*[]Machine, error) {
|
||||
}
|
||||
|
||||
machines := []Machine{}
|
||||
if err := h.db.Preload("AuthKey").Where(&Machine{NamespaceID: n.ID}).Find(&machines).Error; err != nil {
|
||||
if err := h.db.Preload("AuthKey").Preload("Namespace").Where(&Machine{NamespaceID: n.ID}).Find(&machines).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &machines, nil
|
||||
}
|
||||
|
||||
// ListSharedMachinesInNamespace returns all the machines that are shared to the specified namespace
|
||||
func (h *Headscale) ListSharedMachinesInNamespace(name string) (*[]Machine, error) {
|
||||
namespace, err := h.GetNamespace(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sharedMachines := []SharedMachine{}
|
||||
if err := h.db.Preload("Namespace").Where(&SharedMachine{NamespaceID: namespace.ID}).Find(&sharedMachines).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
machines := []Machine{}
|
||||
for _, sharedMachine := range sharedMachines {
|
||||
machine, err := h.GetMachineByID(sharedMachine.MachineID) // otherwise not everything comes filled
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
machines = append(machines, *machine)
|
||||
}
|
||||
return &machines, nil
|
||||
}
|
||||
|
||||
// SetMachineNamespace assigns a Machine to a namespace
|
||||
func (h *Headscale) SetMachineNamespace(m *Machine, namespaceName string) error {
|
||||
n, err := h.GetNamespace(namespaceName)
|
||||
@@ -169,25 +191,7 @@ func (h *Headscale) checkForNamespacesPendingUpdates() {
|
||||
continue
|
||||
}
|
||||
for _, m := range *machines {
|
||||
peers, _ := h.getPeers(m)
|
||||
for _, p := range *peers {
|
||||
pUp, ok := h.clientsPolling.Load(uint64(p.ID))
|
||||
if ok {
|
||||
log.Info().
|
||||
Str("func", "checkForNamespacesPendingUpdates").
|
||||
Str("machine", m.Name).
|
||||
Str("peer", m.Name).
|
||||
Str("address", p.Addresses[0].String()).
|
||||
Msgf("Notifying peer %s (%s)", p.Name, p.Addresses[0])
|
||||
pUp.(chan []byte) <- []byte{}
|
||||
} else {
|
||||
log.Info().
|
||||
Str("func", "checkForNamespacesPendingUpdates").
|
||||
Str("machine", m.Name).
|
||||
Str("peer", m.Name).
|
||||
Msgf("Peer %s does not appear to be polling", p.Name)
|
||||
}
|
||||
}
|
||||
h.notifyChangesToPeers(&m)
|
||||
}
|
||||
}
|
||||
newV, err := h.getValue("namespaces_pending_updates")
|
||||
|
454
poll.go
Normal file
454
poll.go
Normal file
@@ -0,0 +1,454 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/datatypes"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/wgkey"
|
||||
)
|
||||
|
||||
// PollNetMapHandler takes care of /machine/:id/map
|
||||
//
|
||||
// This is the busiest endpoint, as it keeps the HTTP long poll that updates
|
||||
// the clients when something in the network changes.
|
||||
//
|
||||
// The clients POST stuff like HostInfo and their Endpoints here, but
|
||||
// only after their first request (marked with the ReadOnly field).
|
||||
//
|
||||
// At this moment the updates are sent in a quite horrendous way, but they kinda work.
|
||||
func (h *Headscale) PollNetMapHandler(c *gin.Context) {
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("id", c.Param("id")).
|
||||
Msg("PollNetMapHandler called")
|
||||
body, _ := io.ReadAll(c.Request.Body)
|
||||
mKeyStr := c.Param("id")
|
||||
mKey, err := wgkey.ParseHex(mKeyStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Err(err).
|
||||
Msg("Cannot parse client key")
|
||||
c.String(http.StatusBadRequest, "")
|
||||
return
|
||||
}
|
||||
req := tailcfg.MapRequest{}
|
||||
err = decode(body, &req, &mKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Err(err).
|
||||
Msg("Cannot decode message")
|
||||
c.String(http.StatusBadRequest, "")
|
||||
return
|
||||
}
|
||||
|
||||
var m Machine
|
||||
if result := h.db.Preload("Namespace").First(&m, "machine_key = ?", mKey.HexString()); errors.Is(result.Error, gorm.ErrRecordNotFound) {
|
||||
log.Warn().
|
||||
Str("handler", "PollNetMap").
|
||||
Msgf("Ignoring request, cannot find machine with key %s", mKey.HexString())
|
||||
c.String(http.StatusUnauthorized, "")
|
||||
return
|
||||
}
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("id", c.Param("id")).
|
||||
Str("machine", m.Name).
|
||||
Msg("Found machine in database")
|
||||
|
||||
hostinfo, _ := json.Marshal(req.Hostinfo)
|
||||
m.Name = req.Hostinfo.Hostname
|
||||
m.HostInfo = datatypes.JSON(hostinfo)
|
||||
m.DiscoKey = wgkey.Key(req.DiscoKey).HexString()
|
||||
now := time.Now().UTC()
|
||||
|
||||
// From Tailscale client:
|
||||
//
|
||||
// ReadOnly is whether the client just wants to fetch the MapResponse,
|
||||
// without updating their Endpoints. The Endpoints field will be ignored and
|
||||
// LastSeen will not be updated and peers will not be notified of changes.
|
||||
//
|
||||
// The intended use is for clients to discover the DERP map at start-up
|
||||
// before their first real endpoint update.
|
||||
if !req.ReadOnly {
|
||||
endpoints, _ := json.Marshal(req.Endpoints)
|
||||
m.Endpoints = datatypes.JSON(endpoints)
|
||||
m.LastSeen = &now
|
||||
}
|
||||
h.db.Save(&m)
|
||||
|
||||
data, err := h.getMapResponse(mKey, req, m)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("id", c.Param("id")).
|
||||
Str("machine", m.Name).
|
||||
Err(err).
|
||||
Msg("Failed to get Map response")
|
||||
c.String(http.StatusInternalServerError, ":(")
|
||||
return
|
||||
}
|
||||
|
||||
// We update our peers if the client is not sending ReadOnly in the MapRequest
|
||||
// so we don't distribute its initial request (it comes with
|
||||
// empty endpoints to peers)
|
||||
|
||||
// Details on the protocol can be found in https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go#L696
|
||||
log.Debug().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("id", c.Param("id")).
|
||||
Str("machine", m.Name).
|
||||
Bool("readOnly", req.ReadOnly).
|
||||
Bool("omitPeers", req.OmitPeers).
|
||||
Bool("stream", req.Stream).
|
||||
Msg("Client map request processed")
|
||||
|
||||
if req.ReadOnly {
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("Client is starting up. Probably interested in a DERP map")
|
||||
c.Data(200, "application/json; charset=utf-8", *data)
|
||||
return
|
||||
}
|
||||
|
||||
// There has been an update to _any_ of the nodes that the other nodes would
|
||||
// need to know about
|
||||
h.setLastStateChangeToNow(m.Namespace.Name)
|
||||
|
||||
// The request is not ReadOnly, so we need to set up channels for updating
|
||||
// peers via longpoll
|
||||
|
||||
// Only create update channel if it has not been created
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("id", c.Param("id")).
|
||||
Str("machine", m.Name).
|
||||
Msg("Loading or creating update channel")
|
||||
updateChan := h.getOrOpenUpdateChannel(&m)
|
||||
|
||||
pollDataChan := make(chan []byte)
|
||||
// defer close(pollData)
|
||||
|
||||
keepAliveChan := make(chan []byte)
|
||||
|
||||
cancelKeepAlive := make(chan struct{})
|
||||
defer close(cancelKeepAlive)
|
||||
|
||||
if req.OmitPeers && !req.Stream {
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("Client sent endpoint update and is ok with a response without peer list")
|
||||
c.Data(200, "application/json; charset=utf-8", *data)
|
||||
|
||||
// It sounds like we should update the nodes when we have received a endpoint update
|
||||
// even tho the comments in the tailscale code dont explicitly say so.
|
||||
go h.notifyChangesToPeers(&m)
|
||||
return
|
||||
} else if req.OmitPeers && req.Stream {
|
||||
log.Warn().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("Ignoring request, don't know how to handle it")
|
||||
c.String(http.StatusBadRequest, "")
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("Client is ready to access the tailnet")
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("Sending initial map")
|
||||
go func() { pollDataChan <- *data }()
|
||||
|
||||
log.Info().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("machine", m.Name).
|
||||
Msg("Notifying peers")
|
||||
go h.notifyChangesToPeers(&m)
|
||||
|
||||
h.PollNetMapStream(c, m, req, mKey, pollDataChan, keepAliveChan, updateChan, cancelKeepAlive)
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMap").
|
||||
Str("id", c.Param("id")).
|
||||
Str("machine", m.Name).
|
||||
Msg("Finished stream, closing PollNetMap session")
|
||||
}
|
||||
|
||||
// PollNetMapStream takes care of /machine/:id/map
|
||||
// stream logic, ensuring we communicate updates and data
|
||||
// to the connected clients.
|
||||
func (h *Headscale) PollNetMapStream(
|
||||
c *gin.Context,
|
||||
m Machine,
|
||||
req tailcfg.MapRequest,
|
||||
mKey wgkey.Key,
|
||||
pollDataChan chan []byte,
|
||||
keepAliveChan chan []byte,
|
||||
updateChan <-chan struct{},
|
||||
cancelKeepAlive chan struct{},
|
||||
) {
|
||||
go h.scheduledPollWorker(cancelKeepAlive, keepAliveChan, mKey, req, m)
|
||||
|
||||
c.Stream(func(w io.Writer) bool {
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Msg("Waiting for data to stream...")
|
||||
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Msgf("pollData is %#v, keepAliveChan is %#v, updateChan is %#v", pollDataChan, keepAliveChan, updateChan)
|
||||
|
||||
select {
|
||||
case data := <-pollDataChan:
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Str("channel", "pollData").
|
||||
Int("bytes", len(data)).
|
||||
Msg("Sending data received via pollData channel")
|
||||
_, err := w.Write(data)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Str("channel", "pollData").
|
||||
Err(err).
|
||||
Msg("Cannot write data")
|
||||
}
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Str("channel", "pollData").
|
||||
Int("bytes", len(data)).
|
||||
Msg("Data from pollData channel written successfully")
|
||||
// TODO: Abstract away all the database calls, this can cause race conditions
|
||||
// when an outdated machine object is kept alive, e.g. db is update from
|
||||
// command line, but then overwritten.
|
||||
err = h.UpdateMachine(&m)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Str("channel", "pollData").
|
||||
Err(err).
|
||||
Msg("Cannot update machine from database")
|
||||
}
|
||||
now := time.Now().UTC()
|
||||
m.LastSeen = &now
|
||||
m.LastSuccessfulUpdate = &now
|
||||
h.db.Save(&m)
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Str("channel", "pollData").
|
||||
Int("bytes", len(data)).
|
||||
Msg("Machine updated successfully after sending pollData")
|
||||
return true
|
||||
|
||||
case data := <-keepAliveChan:
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Str("channel", "keepAlive").
|
||||
Int("bytes", len(data)).
|
||||
Msg("Sending keep alive message")
|
||||
_, err := w.Write(data)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Str("channel", "keepAlive").
|
||||
Err(err).
|
||||
Msg("Cannot write keep alive message")
|
||||
}
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Str("channel", "keepAlive").
|
||||
Int("bytes", len(data)).
|
||||
Msg("Keep alive sent successfully")
|
||||
// TODO: Abstract away all the database calls, this can cause race conditions
|
||||
// when an outdated machine object is kept alive, e.g. db is update from
|
||||
// command line, but then overwritten.
|
||||
err = h.UpdateMachine(&m)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Str("channel", "keepAlive").
|
||||
Err(err).
|
||||
Msg("Cannot update machine from database")
|
||||
}
|
||||
now := time.Now().UTC()
|
||||
m.LastSeen = &now
|
||||
h.db.Save(&m)
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Str("channel", "keepAlive").
|
||||
Int("bytes", len(data)).
|
||||
Msg("Machine updated successfully after sending keep alive")
|
||||
return true
|
||||
|
||||
case <-updateChan:
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Str("channel", "update").
|
||||
Msg("Received a request for update")
|
||||
if h.isOutdated(&m) {
|
||||
log.Debug().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Time("last_successful_update", *m.LastSuccessfulUpdate).
|
||||
Time("last_state_change", h.getLastStateChange(m.Namespace.Name)).
|
||||
Msgf("There has been updates since the last successful update to %s", m.Name)
|
||||
data, err := h.getMapResponse(mKey, req, m)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Str("channel", "update").
|
||||
Err(err).
|
||||
Msg("Could not get the map update")
|
||||
}
|
||||
_, err = w.Write(*data)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Str("channel", "update").
|
||||
Err(err).
|
||||
Msg("Could not write the map response")
|
||||
}
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Str("channel", "update").
|
||||
Msg("Updated Map has been sent")
|
||||
|
||||
// Keep track of the last successful update,
|
||||
// we sometimes end in a state were the update
|
||||
// is not picked up by a client and we use this
|
||||
// to determine if we should "force" an update.
|
||||
// TODO: Abstract away all the database calls, this can cause race conditions
|
||||
// when an outdated machine object is kept alive, e.g. db is update from
|
||||
// command line, but then overwritten.
|
||||
err = h.UpdateMachine(&m)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Str("channel", "update").
|
||||
Err(err).
|
||||
Msg("Cannot update machine from database")
|
||||
}
|
||||
now := time.Now().UTC()
|
||||
m.LastSuccessfulUpdate = &now
|
||||
h.db.Save(&m)
|
||||
} else {
|
||||
log.Trace().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Time("last_successful_update", *m.LastSuccessfulUpdate).
|
||||
Time("last_state_change", h.getLastStateChange(m.Namespace.Name)).
|
||||
Msgf("%s is up to date", m.Name)
|
||||
}
|
||||
return true
|
||||
|
||||
case <-c.Request.Context().Done():
|
||||
log.Info().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Msg("The client has closed the connection")
|
||||
// TODO: Abstract away all the database calls, this can cause race conditions
|
||||
// when an outdated machine object is kept alive, e.g. db is update from
|
||||
// command line, but then overwritten.
|
||||
err := h.UpdateMachine(&m)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "PollNetMapStream").
|
||||
Str("machine", m.Name).
|
||||
Str("channel", "Done").
|
||||
Err(err).
|
||||
Msg("Cannot update machine from database")
|
||||
}
|
||||
now := time.Now().UTC()
|
||||
m.LastSeen = &now
|
||||
h.db.Save(&m)
|
||||
|
||||
cancelKeepAlive <- struct{}{}
|
||||
|
||||
h.closeUpdateChannel(&m)
|
||||
|
||||
close(pollDataChan)
|
||||
|
||||
close(keepAliveChan)
|
||||
|
||||
return false
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (h *Headscale) scheduledPollWorker(
|
||||
cancelChan <-chan struct{},
|
||||
keepAliveChan chan<- []byte,
|
||||
mKey wgkey.Key,
|
||||
req tailcfg.MapRequest,
|
||||
m Machine,
|
||||
) {
|
||||
keepAliveTicker := time.NewTicker(60 * time.Second)
|
||||
updateCheckerTicker := time.NewTicker(30 * time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-cancelChan:
|
||||
return
|
||||
|
||||
case <-keepAliveTicker.C:
|
||||
data, err := h.getMapKeepAliveResponse(mKey, req, m)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "keepAlive").
|
||||
Err(err).
|
||||
Msg("Error generating the keep alive msg")
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Str("func", "keepAlive").
|
||||
Str("machine", m.Name).
|
||||
Msg("Sending keepalive")
|
||||
keepAliveChan <- *data
|
||||
|
||||
case <-updateCheckerTicker.C:
|
||||
// Send an update request regardless of outdated or not, if data is sent
|
||||
// to the node is determined in the updateChan consumer block
|
||||
n, _ := m.toNode(true)
|
||||
err := h.sendRequestOnUpdateChannel(n)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "keepAlive").
|
||||
Str("machine", m.Name).
|
||||
Err(err).
|
||||
Msgf("Failed to send update request to %s", m.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
136
routes.go
136
routes.go
@@ -2,62 +2,142 @@ package headscale
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/pterm/pterm"
|
||||
"gorm.io/datatypes"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
|
||||
// GetNodeRoutes returns the subnet routes advertised by a node (identified by
|
||||
// GetAdvertisedNodeRoutes returns the subnet routes advertised by a node (identified by
|
||||
// namespace and node name)
|
||||
func (h *Headscale) GetNodeRoutes(namespace string, nodeName string) (*[]netaddr.IPPrefix, error) {
|
||||
func (h *Headscale) GetAdvertisedNodeRoutes(namespace string, nodeName string) (*[]netaddr.IPPrefix, error) {
|
||||
m, err := h.GetMachine(namespace, nodeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hi, err := m.GetHostInfo()
|
||||
hostInfo, err := m.GetHostInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &hi.RoutableIPs, nil
|
||||
return &hostInfo.RoutableIPs, nil
|
||||
}
|
||||
|
||||
// GetEnabledNodeRoutes returns the subnet routes enabled by a node (identified by
|
||||
// namespace and node name)
|
||||
func (h *Headscale) GetEnabledNodeRoutes(namespace string, nodeName string) ([]netaddr.IPPrefix, error) {
|
||||
m, err := h.GetMachine(namespace, nodeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data, err := m.EnabledRoutes.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
routesStr := []string{}
|
||||
err = json.Unmarshal(data, &routesStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
routes := make([]netaddr.IPPrefix, len(routesStr))
|
||||
for index, routeStr := range routesStr {
|
||||
route, err := netaddr.ParseIPPrefix(routeStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
routes[index] = route
|
||||
}
|
||||
|
||||
return routes, nil
|
||||
}
|
||||
|
||||
// IsNodeRouteEnabled checks if a certain route has been enabled
|
||||
func (h *Headscale) IsNodeRouteEnabled(namespace string, nodeName string, routeStr string) bool {
|
||||
route, err := netaddr.ParseIPPrefix(routeStr)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
enabledRoutes, err := h.GetEnabledNodeRoutes(namespace, nodeName)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, enabledRoute := range enabledRoutes {
|
||||
if route == enabledRoute {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// EnableNodeRoute enables a subnet route advertised by a node (identified by
|
||||
// namespace and node name)
|
||||
func (h *Headscale) EnableNodeRoute(namespace string, nodeName string, routeStr string) (*netaddr.IPPrefix, error) {
|
||||
func (h *Headscale) EnableNodeRoute(namespace string, nodeName string, routeStr string) error {
|
||||
m, err := h.GetMachine(namespace, nodeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hi, err := m.GetHostInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
route, err := netaddr.ParseIPPrefix(routeStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
for _, rIP := range hi.RoutableIPs {
|
||||
if rIP == route {
|
||||
routes, _ := json.Marshal([]string{routeStr}) // TODO: only one for the time being, so overwriting the rest
|
||||
m.EnabledRoutes = datatypes.JSON(routes)
|
||||
h.db.Save(&m)
|
||||
availableRoutes, err := h.GetAdvertisedNodeRoutes(namespace, nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// THIS IS COMPLETELY USELESS.
|
||||
// The peers map is stored in memory in the server process.
|
||||
// Definitely not accessible from the CLI tool.
|
||||
// We need RPC to the server - or some kind of 'needsUpdate' field in the DB
|
||||
peers, _ := h.getPeers(*m)
|
||||
for _, p := range *peers {
|
||||
if pUp, ok := h.clientsPolling.Load(uint64(p.ID)); ok {
|
||||
pUp.(chan []byte) <- []byte{}
|
||||
}
|
||||
enabledRoutes, err := h.GetEnabledNodeRoutes(namespace, nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
available := false
|
||||
for _, availableRoute := range *availableRoutes {
|
||||
// If the route is available, and not yet enabled, add it to the new routing table
|
||||
if route == availableRoute {
|
||||
available = true
|
||||
if !h.IsNodeRouteEnabled(namespace, nodeName, routeStr) {
|
||||
enabledRoutes = append(enabledRoutes, route)
|
||||
}
|
||||
return &rIP, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("could not find routable range")
|
||||
if !available {
|
||||
return fmt.Errorf("route (%s) is not available on node %s", nodeName, routeStr)
|
||||
}
|
||||
|
||||
routes, err := json.Marshal(enabledRoutes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.EnabledRoutes = datatypes.JSON(routes)
|
||||
h.db.Save(&m)
|
||||
|
||||
err = h.RequestMapUpdates(m.NamespaceID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RoutesToPtables converts the list of routes to a nice table
|
||||
func (h *Headscale) RoutesToPtables(namespace string, nodeName string, availableRoutes []netaddr.IPPrefix) pterm.TableData {
|
||||
d := pterm.TableData{{"Route", "Enabled"}}
|
||||
|
||||
for _, route := range availableRoutes {
|
||||
enabled := h.IsNodeRouteEnabled(namespace, nodeName, route.String())
|
||||
|
||||
d = append(d, []string{route.String(), strconv.FormatBool(enabled)})
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
@@ -16,7 +16,7 @@ func (s *Suite) TestGetRoutes(c *check.C) {
|
||||
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = h.GetMachine("test", "testmachine")
|
||||
_, err = h.GetMachine("test", "test_get_route_machine")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
route, err := netaddr.ParseIPPrefix("10.0.0.0/24")
|
||||
@@ -33,7 +33,7 @@ func (s *Suite) TestGetRoutes(c *check.C) {
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
Name: "test_get_route_machine",
|
||||
NamespaceID: n.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
@@ -42,14 +42,87 @@ func (s *Suite) TestGetRoutes(c *check.C) {
|
||||
}
|
||||
h.db.Save(&m)
|
||||
|
||||
r, err := h.GetNodeRoutes("test", "testmachine")
|
||||
r, err := h.GetAdvertisedNodeRoutes("test", "test_get_route_machine")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(*r), check.Equals, 1)
|
||||
|
||||
_, err = h.EnableNodeRoute("test", "testmachine", "192.168.0.0/24")
|
||||
err = h.EnableNodeRoute("test", "test_get_route_machine", "192.168.0.0/24")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
_, err = h.EnableNodeRoute("test", "testmachine", "10.0.0.0/24")
|
||||
err = h.EnableNodeRoute("test", "test_get_route_machine", "10.0.0.0/24")
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *Suite) TestGetEnableRoutes(c *check.C) {
|
||||
n, err := h.CreateNamespace("test")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = h.GetMachine("test", "test_enable_route_machine")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
route, err := netaddr.ParseIPPrefix(
|
||||
"10.0.0.0/24",
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
route2, err := netaddr.ParseIPPrefix(
|
||||
"150.0.10.0/25",
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
hi := tailcfg.Hostinfo{
|
||||
RoutableIPs: []netaddr.IPPrefix{route, route2},
|
||||
}
|
||||
hostinfo, err := json.Marshal(hi)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
m := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "test_enable_route_machine",
|
||||
NamespaceID: n.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
AuthKeyID: uint(pak.ID),
|
||||
HostInfo: datatypes.JSON(hostinfo),
|
||||
}
|
||||
h.db.Save(&m)
|
||||
|
||||
availableRoutes, err := h.GetAdvertisedNodeRoutes("test", "test_enable_route_machine")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(*availableRoutes), check.Equals, 2)
|
||||
|
||||
enabledRoutes, err := h.GetEnabledNodeRoutes("test", "test_enable_route_machine")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(enabledRoutes), check.Equals, 0)
|
||||
|
||||
err = h.EnableNodeRoute("test", "test_enable_route_machine", "192.168.0.0/24")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
err = h.EnableNodeRoute("test", "test_enable_route_machine", "10.0.0.0/24")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
enabledRoutes1, err := h.GetEnabledNodeRoutes("test", "test_enable_route_machine")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(enabledRoutes1), check.Equals, 1)
|
||||
|
||||
// Adding it twice will just let it pass through
|
||||
err = h.EnableNodeRoute("test", "test_enable_route_machine", "10.0.0.0/24")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
enabledRoutes2, err := h.GetEnabledNodeRoutes("test", "test_enable_route_machine")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(enabledRoutes2), check.Equals, 1)
|
||||
|
||||
err = h.EnableNodeRoute("test", "test_enable_route_machine", "150.0.10.0/25")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
enabledRoutes3, err := h.GetEnabledNodeRoutes("test", "test_enable_route_machine")
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(enabledRoutes3), check.Equals, 2)
|
||||
}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e -o pipefail
|
||||
commit="$1"
|
||||
|
37
sharing.go
Normal file
37
sharing.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package headscale
|
||||
|
||||
import "gorm.io/gorm"
|
||||
|
||||
const errorSameNamespace = Error("Destination namespace same as origin")
|
||||
const errorMachineAlreadyShared = Error("Node already shared to this namespace")
|
||||
|
||||
// SharedMachine is a join table to support sharing nodes between namespaces
|
||||
type SharedMachine struct {
|
||||
gorm.Model
|
||||
MachineID uint64
|
||||
Machine Machine
|
||||
NamespaceID uint
|
||||
Namespace Namespace
|
||||
}
|
||||
|
||||
// AddSharedMachineToNamespace adds a machine as a shared node to a namespace
|
||||
func (h *Headscale) AddSharedMachineToNamespace(m *Machine, ns *Namespace) error {
|
||||
if m.NamespaceID == ns.ID {
|
||||
return errorSameNamespace
|
||||
}
|
||||
|
||||
sharedMachine := SharedMachine{}
|
||||
if err := h.db.Where("machine_id = ? AND namespace_id", m.ID, ns.ID).First(&sharedMachine).Error; err == nil {
|
||||
return errorMachineAlreadyShared
|
||||
}
|
||||
|
||||
sharedMachine = SharedMachine{
|
||||
MachineID: m.ID,
|
||||
Machine: *m,
|
||||
NamespaceID: ns.ID,
|
||||
Namespace: *ns,
|
||||
}
|
||||
h.db.Save(&sharedMachine)
|
||||
|
||||
return nil
|
||||
}
|
359
sharing_test.go
Normal file
359
sharing_test.go
Normal file
@@ -0,0 +1,359 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"gopkg.in/check.v1"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
func (s *Suite) TestBasicSharedNodesInNamespace(c *check.C) {
|
||||
n1, err := h.CreateNamespace("shared1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
n2, err := h.CreateNamespace("shared2")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak1, err := h.CreatePreAuthKey(n1.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak2, err := h.CreatePreAuthKey(n2.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = h.GetMachine(n1.Name, "test_get_shared_nodes_1")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
m1 := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
DiscoKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
Name: "test_get_shared_nodes_1",
|
||||
NamespaceID: n1.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
IPAddress: "100.64.0.1",
|
||||
AuthKeyID: uint(pak1.ID),
|
||||
}
|
||||
h.db.Save(&m1)
|
||||
|
||||
_, err = h.GetMachine(n1.Name, m1.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
m2 := Machine{
|
||||
ID: 1,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_2",
|
||||
NamespaceID: n2.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
IPAddress: "100.64.0.2",
|
||||
AuthKeyID: uint(pak2.ID),
|
||||
}
|
||||
h.db.Save(&m2)
|
||||
|
||||
_, err = h.GetMachine(n2.Name, m2.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
p1s, err := h.getPeers(m1)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(*p1s), check.Equals, 0)
|
||||
|
||||
err = h.AddSharedMachineToNamespace(&m2, n1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
p1sAfter, err := h.getPeers(m1)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(*p1sAfter), check.Equals, 1)
|
||||
c.Assert((*p1sAfter)[0].ID, check.Equals, tailcfg.NodeID(m2.ID))
|
||||
}
|
||||
|
||||
func (s *Suite) TestSameNamespace(c *check.C) {
|
||||
n1, err := h.CreateNamespace("shared1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
n2, err := h.CreateNamespace("shared2")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak1, err := h.CreatePreAuthKey(n1.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak2, err := h.CreatePreAuthKey(n2.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = h.GetMachine(n1.Name, "test_get_shared_nodes_1")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
m1 := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
DiscoKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
Name: "test_get_shared_nodes_1",
|
||||
NamespaceID: n1.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
IPAddress: "100.64.0.1",
|
||||
AuthKeyID: uint(pak1.ID),
|
||||
}
|
||||
h.db.Save(&m1)
|
||||
|
||||
_, err = h.GetMachine(n1.Name, m1.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
m2 := Machine{
|
||||
ID: 1,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_2",
|
||||
NamespaceID: n2.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
IPAddress: "100.64.0.2",
|
||||
AuthKeyID: uint(pak2.ID),
|
||||
}
|
||||
h.db.Save(&m2)
|
||||
|
||||
_, err = h.GetMachine(n2.Name, m2.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
p1s, err := h.getPeers(m1)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(*p1s), check.Equals, 0)
|
||||
|
||||
err = h.AddSharedMachineToNamespace(&m1, n1)
|
||||
c.Assert(err, check.Equals, errorSameNamespace)
|
||||
}
|
||||
|
||||
func (s *Suite) TestAlreadyShared(c *check.C) {
|
||||
n1, err := h.CreateNamespace("shared1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
n2, err := h.CreateNamespace("shared2")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak1, err := h.CreatePreAuthKey(n1.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak2, err := h.CreatePreAuthKey(n2.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = h.GetMachine(n1.Name, "test_get_shared_nodes_1")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
m1 := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
DiscoKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
Name: "test_get_shared_nodes_1",
|
||||
NamespaceID: n1.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
IPAddress: "100.64.0.1",
|
||||
AuthKeyID: uint(pak1.ID),
|
||||
}
|
||||
h.db.Save(&m1)
|
||||
|
||||
_, err = h.GetMachine(n1.Name, m1.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
m2 := Machine{
|
||||
ID: 1,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_2",
|
||||
NamespaceID: n2.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
IPAddress: "100.64.0.2",
|
||||
AuthKeyID: uint(pak2.ID),
|
||||
}
|
||||
h.db.Save(&m2)
|
||||
|
||||
_, err = h.GetMachine(n2.Name, m2.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
p1s, err := h.getPeers(m1)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(*p1s), check.Equals, 0)
|
||||
|
||||
err = h.AddSharedMachineToNamespace(&m2, n1)
|
||||
c.Assert(err, check.IsNil)
|
||||
err = h.AddSharedMachineToNamespace(&m2, n1)
|
||||
c.Assert(err, check.Equals, errorMachineAlreadyShared)
|
||||
}
|
||||
|
||||
func (s *Suite) TestDoNotIncludeRoutesOnShared(c *check.C) {
|
||||
n1, err := h.CreateNamespace("shared1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
n2, err := h.CreateNamespace("shared2")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak1, err := h.CreatePreAuthKey(n1.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak2, err := h.CreatePreAuthKey(n2.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = h.GetMachine(n1.Name, "test_get_shared_nodes_1")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
m1 := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
DiscoKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
Name: "test_get_shared_nodes_1",
|
||||
NamespaceID: n1.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
IPAddress: "100.64.0.1",
|
||||
AuthKeyID: uint(pak1.ID),
|
||||
}
|
||||
h.db.Save(&m1)
|
||||
|
||||
_, err = h.GetMachine(n1.Name, m1.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
m2 := Machine{
|
||||
ID: 1,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_2",
|
||||
NamespaceID: n2.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
IPAddress: "100.64.0.2",
|
||||
AuthKeyID: uint(pak2.ID),
|
||||
}
|
||||
h.db.Save(&m2)
|
||||
|
||||
_, err = h.GetMachine(n2.Name, m2.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
p1s, err := h.getPeers(m1)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(*p1s), check.Equals, 0)
|
||||
|
||||
err = h.AddSharedMachineToNamespace(&m2, n1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
p1sAfter, err := h.getPeers(m1)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(*p1sAfter), check.Equals, 1)
|
||||
c.Assert(len((*p1sAfter)[0].AllowedIPs), check.Equals, 1)
|
||||
}
|
||||
|
||||
func (s *Suite) TestComplexSharingAcrossNamespaces(c *check.C) {
|
||||
n1, err := h.CreateNamespace("shared1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
n2, err := h.CreateNamespace("shared2")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
n3, err := h.CreateNamespace("shared3")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak1, err := h.CreatePreAuthKey(n1.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak2, err := h.CreatePreAuthKey(n2.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak3, err := h.CreatePreAuthKey(n3.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak4, err := h.CreatePreAuthKey(n1.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = h.GetMachine(n1.Name, "test_get_shared_nodes_1")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
m1 := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
DiscoKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
Name: "test_get_shared_nodes_1",
|
||||
NamespaceID: n1.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
IPAddress: "100.64.0.1",
|
||||
AuthKeyID: uint(pak1.ID),
|
||||
}
|
||||
h.db.Save(&m1)
|
||||
|
||||
_, err = h.GetMachine(n1.Name, m1.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
m2 := Machine{
|
||||
ID: 1,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_2",
|
||||
NamespaceID: n2.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
IPAddress: "100.64.0.2",
|
||||
AuthKeyID: uint(pak2.ID),
|
||||
}
|
||||
h.db.Save(&m2)
|
||||
|
||||
_, err = h.GetMachine(n2.Name, m2.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
m3 := Machine{
|
||||
ID: 2,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_3",
|
||||
NamespaceID: n3.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
IPAddress: "100.64.0.3",
|
||||
AuthKeyID: uint(pak3.ID),
|
||||
}
|
||||
h.db.Save(&m3)
|
||||
|
||||
_, err = h.GetMachine(n3.Name, m3.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
m4 := Machine{
|
||||
ID: 3,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_4",
|
||||
NamespaceID: n1.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
IPAddress: "100.64.0.4",
|
||||
AuthKeyID: uint(pak4.ID),
|
||||
}
|
||||
h.db.Save(&m4)
|
||||
|
||||
_, err = h.GetMachine(n1.Name, m4.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
p1s, err := h.getPeers(m1)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(*p1s), check.Equals, 1) // nodes 1 and 4
|
||||
|
||||
err = h.AddSharedMachineToNamespace(&m2, n1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
p1sAfter, err := h.getPeers(m1)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(*p1sAfter), check.Equals, 2) // nodes 1, 2, 4
|
||||
|
||||
pAlone, err := h.getPeers(m3)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(*pAlone), check.Equals, 0) // node 3 is alone
|
||||
}
|
17
utils.go
17
utils.go
@@ -10,9 +10,11 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/nacl/box"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/wgkey"
|
||||
)
|
||||
|
||||
@@ -58,6 +60,7 @@ func encode(v interface{}, pubKey *wgkey.Key, privKey *wgkey.Private) ([]byte, e
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return encodeMsg(b, pubKey, privKey)
|
||||
}
|
||||
|
||||
@@ -139,3 +142,17 @@ func containsIPs(ips []netaddr.IP, ip netaddr.IP) bool {
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func tailNodesToString(nodes []*tailcfg.Node) string {
|
||||
temp := make([]string, len(nodes))
|
||||
|
||||
for index, node := range nodes {
|
||||
temp[index] = node.Name
|
||||
}
|
||||
|
||||
return fmt.Sprintf("[ %s ](%d)", strings.Join(temp, ", "), len(temp))
|
||||
}
|
||||
|
||||
func tailMapResponseToString(resp tailcfg.MapResponse) string {
|
||||
return fmt.Sprintf("{ Node: %s, Peers: %s }", resp.Node.Name, tailNodesToString(resp.Peers))
|
||||
}
|
||||
|
Reference in New Issue
Block a user