Compare commits

...

13 Commits

Author SHA1 Message Date
Kristoffer Dalby
88af29d5f5 derp
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-02-03 16:55:35 +01:00
Kristoffer Dalby
9cedc2942b verbose and test
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-02-03 16:34:13 +01:00
Kristoffer Dalby
062b9a5611 test
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-02-03 15:49:02 +01:00
Kristoffer Dalby
887302e8f1 setup ko image builder for goreleaser
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-02-03 15:38:33 +01:00
Kristoffer Dalby
b1b90d165d make dockerfiles testing only note
Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-02-03 15:36:41 +01:00
derelm
4ea12f472a Fix failover to disabled route #1706 (#1707)
* fix #1706 - failover should disregard disabled routes during failover

* fixe tests for failover; all current tests assume routes to be enabled

* add testcase for #1706 - failover to disabled route
2024-02-03 15:30:15 +01:00
danielalvsaaker
b4210e2c90 Trim client secret after reading from file (#1697)
Reading from file will include a line break, which results in a mismatching client secret
compared to reading directly from the config.
2024-01-25 09:53:34 +01:00
dyz
a369d57a17 fix node expire error due to type in gorm model Update (#1692)
Fixes #1674

Signed-off-by: fortitude.zhang <fortitude.zhang@gmail.com>
2024-01-21 17:38:24 +01:00
Kristoffer Dalby
1e22f17f36 node selfupdate and fix subnet router when ACL is enabled (#1673)
Fixes #1604

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-01-18 17:30:25 +01:00
Kristoffer Dalby
65376e2842 ensure renabled auto-approve routes works (#1670) 2024-01-18 16:36:47 +01:00
Alexander Halbarth
7e8bf4bfe5 Add Customization Options to DERP Map entry of integrated DERP server (#1565)
Co-authored-by: Alexander Halbarth <alexander.halbarth@alite.at>
Co-authored-by: Bela Lemle <bela.lemle@alite.at>
Co-authored-by: Kristoffer Dalby <kristoffer@dalby.cc>
2024-01-16 16:04:03 +01:00
Kristoffer Dalby
3b103280ef implement selfupdate and pass expiry (#1647) 2024-01-05 10:41:56 +01:00
Kristoffer Dalby
a592ae56b4 fix issue where advertise tags causes hang (#1669)
Fixes #1665

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
2024-01-04 21:26:49 +01:00
34 changed files with 1298 additions and 102 deletions

View File

@@ -20,6 +20,6 @@ jobs:
- uses: DeterminateSystems/magic-nix-cache-action@main
- name: Run goreleaser
run: nix develop --command -- goreleaser release --clean
run: nix develop --command -- goreleaser release --clean --verbose
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -0,0 +1,67 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestEnableDisableAutoApprovedRoute
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestEnableDisableAutoApprovedRoute:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestEnableDisableAutoApprovedRoute
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestEnableDisableAutoApprovedRoute$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -0,0 +1,67 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestNodeAdvertiseTagNoACLCommand
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestNodeAdvertiseTagNoACLCommand:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestNodeAdvertiseTagNoACLCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestNodeAdvertiseTagNoACLCommand$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -0,0 +1,67 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestNodeAdvertiseTagWithACLCommand
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestNodeAdvertiseTagWithACLCommand:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestNodeAdvertiseTagWithACLCommand
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestNodeAdvertiseTagWithACLCommand$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -0,0 +1,67 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestSubnetRouteACL
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
TestSubnetRouteACL:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 2
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- uses: satackey/action-docker-layer-caching@main
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v34
with:
files: |
*.nix
go.*
**/*.go
integration_test/
config-example.yaml
- name: Run TestSubnetRouteACL
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.any_changed == 'true'
with:
attempt_limit: 5
command: |
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestSubnetRouteACL$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: logs
path: "control_logs/*.log"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'
with:
name: pprof
path: "control_logs/*.pprof.tar"

View File

@@ -9,20 +9,20 @@ release:
builds:
- id: headscale
main: ./cmd/headscale/headscale.go
main: ./cmd/headscale
mod_timestamp: "{{ .CommitTimestamp }}"
env:
- CGO_ENABLED=0
targets:
- darwin_amd64
- darwin_arm64
- freebsd_amd64
- linux_386
# - darwin_amd64
# - darwin_arm64
# - freebsd_amd64
# - linux_386
- linux_amd64
- linux_arm64
- linux_arm_5
- linux_arm_6
- linux_arm_7
# - linux_arm64
# - linux_arm_5
# - linux_arm_6
# - linux_arm_7
flags:
- -mod=readonly
ldflags:
@@ -63,7 +63,6 @@ nfpms:
bindir: /usr/bin
formats:
- deb
# - rpm
contents:
- src: ./config-example.yaml
dst: /etc/headscale/config.yaml
@@ -80,6 +79,43 @@ nfpms:
postinstall: ./docs/packaging/postinstall.sh
postremove: ./docs/packaging/postremove.sh
kos:
- id: ghcr
build: headscale
main: ./cmd/headscale
base_image: gcr.io/distroless/base-debian11
repository: ghcr.io/juanfont/headscale
env:
- CGO_ENABLED=0
platforms:
- linux/amd64
# - linux/386
# - linux/arm64
# - linux/arm/v7
# - linux/arm/v6
# - linux/arm/v5
tags:
- latest
- '{{.Tag}}'
- '{{ .Major }}.{{ .Minor }}'
- '{{ .Major }}'
- '{{ if not .Prerelease }}stable{{ end }}'
# - id: dockerhub
# build: headscale
# base_image: gcr.io/distroless/base-debian11
# repository: headscale/headscale
# platforms:
# - linux/amd64
# - linux/386
# - linux/arm64
# - linux/arm/v7
# - linux/arm/v6
# - linux/arm/v5
# tags:
# - latest
# - '{{.Tag}}'
checksum:
name_template: "checksums.txt"
snapshot:

View File

@@ -26,7 +26,7 @@ after improving the test harness as part of adopting [#1460](https://github.com/
- Code reorganisation, a lot of code has moved, please review the following PRs accordingly [#1473](https://github.com/juanfont/headscale/pull/1473)
- API: Machine is now Node [#1553](https://github.com/juanfont/headscale/pull/1553)
- Remove support for older Tailscale clients [#1611](https://github.com/juanfont/headscale/pull/1611)
- The latest supported client is 1.32
- The latest supported client is 1.36
- Headscale checks that _at least_ one DERP is defined at start [#1564](https://github.com/juanfont/headscale/pull/1564)
- If no DERP is configured, the server will fail to start, this can be because it cannot load the DERPMap from file or url.
- Embedded DERP server requires a private key [#1611](https://github.com/juanfont/headscale/pull/1611)
@@ -43,6 +43,7 @@ Fix hang on SIGTERM [#1492](https://github.com/juanfont/headscale/pull/1492) tak
Send logs to stderr by default [#1524](https://github.com/juanfont/headscale/pull/1524)
Fix [TS-2023-006](https://tailscale.com/security-bulletins/#ts-2023-006) security UPnP issue [#1563](https://github.com/juanfont/headscale/pull/1563)
Turn off gRPC logging [#1640](https://github.com/juanfont/headscale/pull/1640) fixes [#1259](https://github.com/juanfont/headscale/issues/1259)
Added the possibility to manually create a DERP-map entry which can be customized, instead of automatically creating it. [#1565](https://github.com/juanfont/headscale/pull/1565)
## 0.22.3 (2023-05-12)

View File

@@ -1,4 +1,7 @@
# Builder image
# This Dockerfile and the images produced are for testing headscale,
# and are in no way endorsed by Headscale's maintainers as an
# official nor supported release or distribution.
FROM docker.io/golang:1.21-bookworm AS build
ARG VERSION=dev
ENV GOPATH /go

View File

@@ -1,4 +1,7 @@
# Builder image
# This Dockerfile and the images produced are for testing headscale,
# and are in no way endorsed by Headscale's maintainers as an
# official nor supported release or distribution.
FROM docker.io/golang:1.21-bookworm AS build
ARG VERSION=dev
ENV GOPATH /go

View File

@@ -1,3 +1,7 @@
# This Dockerfile and the images produced are for testing headscale,
# and are in no way endorsed by Headscale's maintainers as an
# official nor supported release or distribution.
FROM golang:latest
RUN apt-get update \

View File

@@ -94,6 +94,16 @@ derp:
#
private_key_path: /var/lib/headscale/derp_server_private.key
# This flag can be used, so the DERP map entry for the embedded DERP server is not written automatically,
# it enables the creation of your very own DERP map entry using a locally available file with the parameter DERP.paths
# If you enable the DERP server and set this to false, it is required to add the DERP server to the DERP map using DERP.paths
automatically_add_embedded_derp_region: true
# For better connection stability (especially when using an Exit-Node and DNS is not working),
# it is possible to optionall add the public IPv4 and IPv6 address to the Derp-Map using:
ipv4: 1.2.3.4
ipv6: 2001:db8::1
# List of externally available DERP maps encoded in JSON
urls:
- https://controlplane.tailscale.com/derpmap/default

View File

@@ -95,6 +95,7 @@
gotestsum
gotests
ksh
ko
# 'dot' is needed for pprof graphs
# go tool pprof -http=: <source>

View File

@@ -268,7 +268,7 @@ func (h *Headscale) scheduledDERPMapUpdateWorker(cancelChan <-chan struct{}) {
case <-ticker.C:
log.Info().Msg("Fetching DERPMap updates")
h.DERPMap = derp.GetDERPMap(h.cfg.DERP)
if h.cfg.DERP.ServerEnabled {
if h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
region, _ := h.DERPServer.GenerateRegion()
h.DERPMap.Regions[region.RegionID] = &region
}
@@ -501,7 +501,9 @@ func (h *Headscale) Serve() error {
return err
}
h.DERPMap.Regions[region.RegionID] = &region
if h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
h.DERPMap.Regions[region.RegionID] = &region
}
go h.DERPServer.ServeSTUN()
}

View File

@@ -340,6 +340,16 @@ func (hsdb *HSDatabase) nodeSetExpiry(node *types.Node, expiry time.Time) error
)
}
node.Expiry = &expiry
stateSelfUpdate := types.StateUpdate{
Type: types.StateSelfUpdate,
ChangeNodes: types.Nodes{node},
}
if stateSelfUpdate.Valid() {
hsdb.notifier.NotifyByMachineKey(stateSelfUpdate, node.MachineKey)
}
stateUpdate := types.StateUpdate{
Type: types.StatePeerChangedPatch,
ChangePatches: []*tailcfg.PeerChange{
@@ -350,7 +360,7 @@ func (hsdb *HSDatabase) nodeSetExpiry(node *types.Node, expiry time.Time) error
},
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyAll(stateUpdate)
hsdb.notifier.NotifyWithIgnore(stateUpdate, node.MachineKey.String())
}
return nil
@@ -729,6 +739,19 @@ func (hsdb *HSDatabase) enableRoutes(node *types.Node, routeStrs ...string) erro
stateUpdate, node.MachineKey.String())
}
// Send an update to the node itself with to ensure it
// has an updated packetfilter allowing the new route
// if it is defined in the ACL.
selfUpdate := types.StateUpdate{
Type: types.StateSelfUpdate,
ChangeNodes: types.Nodes{node},
}
if selfUpdate.Valid() {
hsdb.notifier.NotifyByMachineKey(
selfUpdate,
node.MachineKey)
}
return nil
}
@@ -856,7 +879,7 @@ func (hsdb *HSDatabase) ExpireExpiredNodes(lastCheck time.Time) time.Time {
// checked everything.
started := time.Now()
expired := make([]*tailcfg.PeerChange, 0)
expiredNodes := make([]*types.Node, 0)
nodes, err := hsdb.listNodes()
if err != nil {
@@ -872,17 +895,13 @@ func (hsdb *HSDatabase) ExpireExpiredNodes(lastCheck time.Time) time.Time {
// It will notify about all nodes that has been expired.
// It should only notify about expired nodes since _last check_.
node.Expiry.After(lastCheck) {
expired = append(expired, &tailcfg.PeerChange{
NodeID: tailcfg.NodeID(node.ID),
KeyExpiry: node.Expiry,
})
expiredNodes = append(expiredNodes, &nodes[index])
now := time.Now()
// Do not use setNodeExpiry as that has a notifier hook, which
// can cause a deadlock, we are updating all changed nodes later
// and there is no point in notifiying twice.
if err := hsdb.db.Model(nodes[index]).Updates(types.Node{
Expiry: &now,
if err := hsdb.db.Model(&nodes[index]).Updates(types.Node{
Expiry: &started,
}).Error; err != nil {
log.Error().
Err(err).
@@ -898,6 +917,15 @@ func (hsdb *HSDatabase) ExpireExpiredNodes(lastCheck time.Time) time.Time {
}
}
expired := make([]*tailcfg.PeerChange, len(expiredNodes))
for idx, node := range expiredNodes {
expired[idx] = &tailcfg.PeerChange{
NodeID: tailcfg.NodeID(node.ID),
KeyExpiry: &started,
}
}
// Inform the peers of a node with a lightweight update.
stateUpdate := types.StateUpdate{
Type: types.StatePeerChangedPatch,
ChangePatches: expired,
@@ -906,5 +934,16 @@ func (hsdb *HSDatabase) ExpireExpiredNodes(lastCheck time.Time) time.Time {
hsdb.notifier.NotifyAll(stateUpdate)
}
// Inform the node itself that it has expired.
for _, node := range expiredNodes {
stateSelfUpdate := types.StateUpdate{
Type: types.StateSelfUpdate,
ChangeNodes: types.Nodes{node},
}
if stateSelfUpdate.Valid() {
hsdb.notifier.NotifyByMachineKey(stateSelfUpdate, node.MachineKey)
}
}
return started
}

View File

@@ -349,7 +349,7 @@ func (hsdb *HSDatabase) GetNodePrimaryRoutes(node *types.Node) (types.Routes, er
// SaveNodeRoutes takes a node and updates the database with
// the new routes.
// It returns a bool wheter an update should be sent as the
// It returns a bool whether an update should be sent as the
// saved route impacts nodes.
func (hsdb *HSDatabase) SaveNodeRoutes(node *types.Node) (bool, error) {
hsdb.mu.Lock()
@@ -585,6 +585,10 @@ func (hsdb *HSDatabase) failoverRoute(r *types.Route) ([]key.MachinePublic, erro
continue
}
if !route.Enabled {
continue
}
if hsdb.notifier.IsConnected(route.Node.MachineKey) {
newPrimary = &routes[idx]
break
@@ -639,13 +643,19 @@ func (hsdb *HSDatabase) EnableAutoApprovedRoutes(
aclPolicy *policy.ACLPolicy,
node *types.Node,
) error {
hsdb.mu.Lock()
defer hsdb.mu.Unlock()
if len(aclPolicy.AutoApprovers.ExitNode) == 0 && len(aclPolicy.AutoApprovers.Routes) == 0 {
// No autoapprovers configured
return nil
}
if len(node.IPAddresses) == 0 {
return nil // This node has no IPAddresses, so can't possibly match any autoApprovers ACLs
// This node has no IPAddresses, so can't possibly match any autoApprovers ACLs
return nil
}
hsdb.mu.Lock()
defer hsdb.mu.Unlock()
routes, err := hsdb.getNodeAdvertisedRoutes(node)
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
log.Error().
@@ -657,6 +667,8 @@ func (hsdb *HSDatabase) EnableAutoApprovedRoutes(
return err
}
log.Trace().Interface("routes", routes).Msg("routes for autoapproving")
approvedRoutes := types.Routes{}
for _, advertisedRoute := range routes {
@@ -676,6 +688,13 @@ func (hsdb *HSDatabase) EnableAutoApprovedRoutes(
return err
}
log.Trace().
Str("node", node.Hostname).
Str("user", node.User.Name).
Strs("routeApprovers", routeApprovers).
Str("prefix", netip.Prefix(advertisedRoute.Prefix).String()).
Msg("looking up route for autoapproving")
for _, approvedAlias := range routeApprovers {
if approvedAlias == node.User.Name {
approvedRoutes = append(approvedRoutes, advertisedRoute)

View File

@@ -371,6 +371,7 @@ func TestFailoverRoute(t *testing.T) {
MachineKey: machineKeys[0],
},
IsPrimary: true,
Enabled: true,
},
routes: types.Routes{
types.Route{
@@ -382,6 +383,7 @@ func TestFailoverRoute(t *testing.T) {
MachineKey: machineKeys[0],
},
IsPrimary: true,
Enabled: true,
},
types.Route{
Model: gorm.Model{
@@ -392,6 +394,7 @@ func TestFailoverRoute(t *testing.T) {
MachineKey: machineKeys[1],
},
IsPrimary: false,
Enabled: true,
},
},
want: []key.MachinePublic{
@@ -411,6 +414,7 @@ func TestFailoverRoute(t *testing.T) {
MachineKey: machineKeys[0],
},
IsPrimary: false,
Enabled: true,
},
routes: types.Routes{
types.Route{
@@ -422,6 +426,7 @@ func TestFailoverRoute(t *testing.T) {
MachineKey: machineKeys[0],
},
IsPrimary: true,
Enabled: true,
},
types.Route{
Model: gorm.Model{
@@ -432,6 +437,7 @@ func TestFailoverRoute(t *testing.T) {
MachineKey: machineKeys[1],
},
IsPrimary: false,
Enabled: true,
},
},
want: nil,
@@ -448,6 +454,7 @@ func TestFailoverRoute(t *testing.T) {
MachineKey: machineKeys[1],
},
IsPrimary: true,
Enabled: true,
},
routes: types.Routes{
types.Route{
@@ -459,6 +466,7 @@ func TestFailoverRoute(t *testing.T) {
MachineKey: machineKeys[0],
},
IsPrimary: false,
Enabled: true,
},
types.Route{
Model: gorm.Model{
@@ -469,6 +477,7 @@ func TestFailoverRoute(t *testing.T) {
MachineKey: machineKeys[1],
},
IsPrimary: true,
Enabled: true,
},
types.Route{
Model: gorm.Model{
@@ -479,6 +488,7 @@ func TestFailoverRoute(t *testing.T) {
MachineKey: machineKeys[2],
},
IsPrimary: false,
Enabled: true,
},
},
want: []key.MachinePublic{
@@ -498,6 +508,7 @@ func TestFailoverRoute(t *testing.T) {
MachineKey: machineKeys[0],
},
IsPrimary: true,
Enabled: true,
},
routes: types.Routes{
types.Route{
@@ -509,6 +520,7 @@ func TestFailoverRoute(t *testing.T) {
MachineKey: machineKeys[0],
},
IsPrimary: true,
Enabled: true,
},
// Offline
types.Route{
@@ -520,6 +532,7 @@ func TestFailoverRoute(t *testing.T) {
MachineKey: machineKeys[3],
},
IsPrimary: false,
Enabled: true,
},
},
want: nil,
@@ -536,6 +549,7 @@ func TestFailoverRoute(t *testing.T) {
MachineKey: machineKeys[0],
},
IsPrimary: true,
Enabled: true,
},
routes: types.Routes{
types.Route{
@@ -547,6 +561,7 @@ func TestFailoverRoute(t *testing.T) {
MachineKey: machineKeys[0],
},
IsPrimary: true,
Enabled: true,
},
// Offline
types.Route{
@@ -558,6 +573,7 @@ func TestFailoverRoute(t *testing.T) {
MachineKey: machineKeys[3],
},
IsPrimary: false,
Enabled: true,
},
types.Route{
Model: gorm.Model{
@@ -568,6 +584,7 @@ func TestFailoverRoute(t *testing.T) {
MachineKey: machineKeys[1],
},
IsPrimary: true,
Enabled: true,
},
},
want: []key.MachinePublic{
@@ -576,6 +593,47 @@ func TestFailoverRoute(t *testing.T) {
},
wantErr: false,
},
{
name: "failover-primary-none-enabled",
failingRoute: types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
Enabled: true,
},
routes: types.Routes{
types.Route{
Model: gorm.Model{
ID: 1,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[0],
},
IsPrimary: true,
Enabled: true,
},
// not enabled
types.Route{
Model: gorm.Model{
ID: 2,
},
Prefix: ipp("10.0.0.0/24"),
Node: types.Node{
MachineKey: machineKeys[1],
},
IsPrimary: false,
Enabled: false,
},
},
want: nil,
wantErr: false,
},
}
for _, tt := range tests {

View File

@@ -84,6 +84,8 @@ func (d *DERPServer) GenerateRegion() (tailcfg.DERPRegion, error) {
RegionID: d.cfg.ServerRegionID,
HostName: host,
DERPPort: port,
IPv4: d.cfg.IPv4,
IPv6: d.cfg.IPv6,
},
},
}
@@ -99,6 +101,7 @@ func (d *DERPServer) GenerateRegion() (tailcfg.DERPRegion, error) {
localDERPregion.Nodes[0].STUNPort = portSTUN
log.Info().Caller().Msgf("DERP region: %+v", localDERPregion)
log.Info().Caller().Msgf("DERP Nodes[0]: %+v", localDERPregion.Nodes[0])
return localDERPregion, nil
}
@@ -208,6 +211,7 @@ func DERPProbeHandler(
// The initial implementation is here https://github.com/tailscale/tailscale/pull/1406
// They have a cache, but not clear if that is really necessary at Headscale, uh, scale.
// An example implementation is found here https://derp.tailscale.com/bootstrap-dns
// Coordination server is included automatically, since local DERP is using the same DNS Name in d.serverURL
func DERPBootstrapDNSHandler(
derpMap *tailcfg.DERPMap,
) func(http.ResponseWriter, *http.Request) {

View File

@@ -21,7 +21,6 @@ import (
"github.com/juanfont/headscale/hscontrol/util"
"github.com/klauspost/compress/zstd"
"github.com/rs/zerolog/log"
"github.com/samber/lo"
"golang.org/x/exp/maps"
"tailscale.com/envknob"
"tailscale.com/smallzstd"
@@ -279,6 +278,18 @@ func (m *Mapper) LiteMapResponse(
return nil, err
}
rules, sshPolicy, err := policy.GenerateFilterAndSSHRules(
pol,
node,
nodeMapToList(m.peers),
)
if err != nil {
return nil, err
}
resp.PacketFilter = policy.ReduceFilterRules(node, rules)
resp.SSHPolicy = sshPolicy
return m.marshalMapResponse(mapRequest, resp, node, mapRequest.Compress)
}
@@ -459,6 +470,8 @@ func (m *Mapper) marshalMapResponse(
switch {
case resp.Peers != nil && len(resp.Peers) > 0:
responseType = "full"
case resp.Peers == nil && resp.PeersChanged == nil && resp.PeersChangedPatch == nil:
responseType = "lite"
case resp.PeersChanged != nil && len(resp.PeersChanged) > 0:
responseType = "changed"
case resp.PeersChangedPatch != nil && len(resp.PeersChangedPatch) > 0:
@@ -593,15 +606,6 @@ func nodeMapToList(nodes map[uint64]*types.Node) types.Nodes {
return ret
}
func filterExpiredAndNotReady(peers types.Nodes) types.Nodes {
return lo.Filter(peers, func(item *types.Node, index int) bool {
// Filter out nodes that are expired OR
// nodes that has no endpoints, this typically means they have
// registered, but are not configured.
return !item.IsExpired() || len(item.Endpoints) > 0
})
}
// appendPeerChanges mutates a tailcfg.MapResponse with all the
// necessary changes when peers have changed.
func appendPeerChanges(
@@ -627,9 +631,6 @@ func appendPeerChanges(
return err
}
// Filter out peers that have expired.
changed = filterExpiredAndNotReady(changed)
// If there are filter rules present, see if there are any nodes that cannot
// access eachother at all and remove them from the peers.
if len(rules) > 0 {

View File

@@ -95,6 +95,21 @@ func (n *Notifier) NotifyWithIgnore(update types.StateUpdate, ignore ...string)
}
}
func (n *Notifier) NotifyByMachineKey(update types.StateUpdate, mKey key.MachinePublic) {
log.Trace().Caller().Interface("type", update.Type).Msg("acquiring lock to notify")
defer log.Trace().
Caller().
Interface("type", update.Type).
Msg("releasing lock, finished notifing")
n.l.RLock()
defer n.l.RUnlock()
if c, ok := n.nodes[mKey.String()]; ok {
c <- update
}
}
func (n *Notifier) String() string {
n.l.RLock()
defer n.l.RUnlock()

View File

@@ -250,6 +250,21 @@ func ReduceFilterRules(node *types.Node, rules []tailcfg.FilterRule) []tailcfg.F
if node.IPAddresses.InIPSet(expanded) {
dests = append(dests, dest)
}
// If the node exposes routes, ensure they are note removed
// when the filters are reduced.
if node.Hostinfo != nil {
// TODO(kradalby): Evaluate if we should only keep
// the routes if the route is enabled. This will
// require database access in this part of the code.
if len(node.Hostinfo.RoutableIPs) > 0 {
for _, routableIP := range node.Hostinfo.RoutableIPs {
if expanded.ContainsPrefix(routableIP) {
dests = append(dests, dest)
}
}
}
}
}
if len(dests) > 0 {
@@ -674,14 +689,18 @@ func expandOwnersFromTag(
pol *ACLPolicy,
tag string,
) ([]string, error) {
noTagErr := fmt.Errorf(
"%w. %v isn't owned by a TagOwner. Please add one first. https://tailscale.com/kb/1018/acls/#tag-owners",
ErrInvalidTag,
tag,
)
if pol == nil {
return []string{}, noTagErr
}
var owners []string
ows, ok := pol.TagOwners[tag]
if !ok {
return []string{}, fmt.Errorf(
"%w. %v isn't owned by a TagOwner. Please add one first. https://tailscale.com/kb/1018/acls/#tag-owners",
ErrInvalidTag,
tag,
)
return []string{}, noTagErr
}
for _, owner := range ows {
if isGroup(owner) {

View File

@@ -1901,6 +1901,81 @@ func TestReduceFilterRules(t *testing.T) {
},
want: []tailcfg.FilterRule{},
},
{
name: "1604-subnet-routers-are-preserved",
pol: ACLPolicy{
Groups: Groups{
"group:admins": {"user1"},
},
ACLs: []ACL{
{
Action: "accept",
Sources: []string{"group:admins"},
Destinations: []string{"group:admins:*"},
},
{
Action: "accept",
Sources: []string{"group:admins"},
Destinations: []string{"10.33.0.0/16:*"},
},
},
},
node: &types.Node{
IPAddresses: types.NodeAddresses{
netip.MustParseAddr("100.64.0.1"),
netip.MustParseAddr("fd7a:115c:a1e0::1"),
},
User: types.User{Name: "user1"},
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{
netip.MustParsePrefix("10.33.0.0/16"),
},
},
},
peers: types.Nodes{
&types.Node{
IPAddresses: types.NodeAddresses{
netip.MustParseAddr("100.64.0.2"),
netip.MustParseAddr("fd7a:115c:a1e0::2"),
},
User: types.User{Name: "user1"},
},
},
want: []tailcfg.FilterRule{
{
SrcIPs: []string{
"100.64.0.1/32",
"100.64.0.2/32",
"fd7a:115c:a1e0::1/128",
"fd7a:115c:a1e0::2/128",
},
DstPorts: []tailcfg.NetPortRange{
{
IP: "100.64.0.1/32",
Ports: tailcfg.PortRangeAny,
},
{
IP: "fd7a:115c:a1e0::1/128",
Ports: tailcfg.PortRangeAny,
},
},
},
{
SrcIPs: []string{
"100.64.0.1/32",
"100.64.0.2/32",
"fd7a:115c:a1e0::1/128",
"fd7a:115c:a1e0::2/128",
},
DstPorts: []tailcfg.NetPortRange{
{
IP: "10.33.0.0/16",
Ports: tailcfg.PortRangeAny,
},
},
},
},
},
}
for _, tt := range tests {

View File

@@ -125,6 +125,14 @@ func (h *Headscale) handlePoll(
return
}
if h.ACLPolicy != nil {
// update routes with peer information
err = h.db.EnableAutoApprovedRoutes(h.ACLPolicy, node)
if err != nil {
logErr(err, "Error running auto approved routes")
}
}
}
// Services is mostly useful for discovery and not critical,
@@ -145,6 +153,8 @@ func (h *Headscale) handlePoll(
return
}
// Send an update to all peers to propagate the new routes
// available.
stateUpdate := types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: types.Nodes{node},
@@ -156,6 +166,19 @@ func (h *Headscale) handlePoll(
node.MachineKey.String())
}
// Send an update to the node itself with to ensure it
// has an updated packetfilter allowing the new route
// if it is defined in the ACL.
selfUpdate := types.StateUpdate{
Type: types.StateSelfUpdate,
ChangeNodes: types.Nodes{node},
}
if selfUpdate.Valid() {
h.nodeNotifier.NotifyByMachineKey(
selfUpdate,
node.MachineKey)
}
return
}
}
@@ -370,6 +393,16 @@ func (h *Headscale) handlePoll(
var data []byte
var err error
// Ensure the node object is updated, for example, there
// might have been a hostinfo update in a sidechannel
// which contains data needed to generate a map response.
node, err = h.db.GetNodeByMachineKey(node.MachineKey)
if err != nil {
logErr(err, "Could not get machine from db")
return
}
switch update.Type {
case types.StateFullUpdate:
logInfo("Sending Full MapResponse")
@@ -397,6 +430,14 @@ func (h *Headscale) handlePoll(
case types.StatePeerRemoved:
logInfo("Sending PeerRemoved MapResponse")
data, err = mapp.PeerRemovedResponse(mapRequest, node, update.Removed)
case types.StateSelfUpdate:
if len(update.ChangeNodes) == 1 {
logInfo("Sending SelfUpdate MapResponse")
node = update.ChangeNodes[0]
data, err = mapp.LiteMapResponse(mapRequest, node, h.ACLPolicy)
} else {
logInfo("SelfUpdate contained too many nodes, this is likely a bug in the code, please report.")
}
case types.StateDERPUpdated:
logInfo("Sending DERPUpdate MapResponse")
data, err = mapp.DERPMapResponse(mapRequest, node, update.DERPMap)

View File

@@ -13,7 +13,7 @@ import (
)
const (
MinimumCapVersion tailcfg.CapabilityVersion = 36
MinimumCapVersion tailcfg.CapabilityVersion = 56
)
// NoisePollNetMapHandler takes care of /machine/:id/map using the Noise protocol

View File

@@ -91,6 +91,12 @@ const (
StatePeerChanged
StatePeerChangedPatch
StatePeerRemoved
// StateSelfUpdate is used to indicate that the node
// has changed in control, and the client needs to be
// informed.
// The updated node is inside the ChangeNodes field
// which should have a length of one.
StateSelfUpdate
StateDERPUpdated
)
@@ -142,6 +148,10 @@ func (su *StateUpdate) Valid() bool {
if su.Removed == nil {
panic("Mandatory field Removed is not set on StatePeerRemove update")
}
case StateSelfUpdate:
if su.ChangeNodes == nil || len(su.ChangeNodes) != 1 {
panic("Mandatory field ChangeNodes is not set for StateSelfUpdate or has more than one node")
}
case StateDERPUpdated:
if su.DERPMap == nil {
panic("Mandatory field DERPMap is not set on StateDERPUpdated update")

View File

@@ -107,16 +107,19 @@ type OIDCConfig struct {
}
type DERPConfig struct {
ServerEnabled bool
ServerRegionID int
ServerRegionCode string
ServerRegionName string
ServerPrivateKeyPath string
STUNAddr string
URLs []url.URL
Paths []string
AutoUpdate bool
UpdateFrequency time.Duration
ServerEnabled bool
AutomaticallyAddEmbeddedDerpRegion bool
ServerRegionID int
ServerRegionCode string
ServerRegionName string
ServerPrivateKeyPath string
STUNAddr string
URLs []url.URL
Paths []string
AutoUpdate bool
UpdateFrequency time.Duration
IPv4 string
IPv6 string
}
type LogTailConfig struct {
@@ -169,6 +172,7 @@ func LoadConfig(path string, isFile bool) error {
viper.SetDefault("derp.server.enabled", false)
viper.SetDefault("derp.server.stun.enabled", true)
viper.SetDefault("derp.server.automatically_add_embedded_derp_region", true)
viper.SetDefault("unix_socket", "/var/run/headscale/headscale.sock")
viper.SetDefault("unix_socket_permission", "0o770")
@@ -286,8 +290,14 @@ func GetDERPConfig() DERPConfig {
serverRegionCode := viper.GetString("derp.server.region_code")
serverRegionName := viper.GetString("derp.server.region_name")
stunAddr := viper.GetString("derp.server.stun_listen_addr")
privateKeyPath := util.AbsolutePathFromConfigPath(viper.GetString("derp.server.private_key_path"))
privateKeyPath := util.AbsolutePathFromConfigPath(
viper.GetString("derp.server.private_key_path"),
)
ipv4 := viper.GetString("derp.server.ipv4")
ipv6 := viper.GetString("derp.server.ipv6")
automaticallyAddEmbeddedDerpRegion := viper.GetBool(
"derp.server.automatically_add_embedded_derp_region",
)
if serverEnabled && stunAddr == "" {
log.Fatal().
Msg("derp.server.stun_listen_addr must be set if derp.server.enabled is true")
@@ -310,20 +320,28 @@ func GetDERPConfig() DERPConfig {
paths := viper.GetStringSlice("derp.paths")
if serverEnabled && !automaticallyAddEmbeddedDerpRegion && len(paths) == 0 {
log.Fatal().
Msg("Disabling derp.server.automatically_add_embedded_derp_region requires to configure the derp server in derp.paths")
}
autoUpdate := viper.GetBool("derp.auto_update_enabled")
updateFrequency := viper.GetDuration("derp.update_frequency")
return DERPConfig{
ServerEnabled: serverEnabled,
ServerRegionID: serverRegionID,
ServerRegionCode: serverRegionCode,
ServerRegionName: serverRegionName,
ServerPrivateKeyPath: privateKeyPath,
STUNAddr: stunAddr,
URLs: urls,
Paths: paths,
AutoUpdate: autoUpdate,
UpdateFrequency: updateFrequency,
ServerEnabled: serverEnabled,
ServerRegionID: serverRegionID,
ServerRegionCode: serverRegionCode,
ServerRegionName: serverRegionName,
ServerPrivateKeyPath: privateKeyPath,
STUNAddr: stunAddr,
URLs: urls,
Paths: paths,
AutoUpdate: autoUpdate,
UpdateFrequency: updateFrequency,
IPv4: ipv4,
IPv6: ipv6,
AutomaticallyAddEmbeddedDerpRegion: automaticallyAddEmbeddedDerpRegion,
}
}
@@ -572,7 +590,7 @@ func GetHeadscaleConfig() (*Config, error) {
if err != nil {
return nil, err
}
oidcClientSecret = string(secretBytes)
oidcClientSecret = strings.TrimSpace(string(secretBytes))
}
return &Config{

View File

@@ -383,7 +383,7 @@ func (node *Node) GetFQDN(dnsConfig *tailcfg.DNSConfig, baseDomain string) (stri
// inform peers about smaller changes to the node.
// When a field is added to this function, remember to also add it to:
// - node.ApplyPeerChange
// - logTracePeerChange in poll.go
// - logTracePeerChange in poll.go.
func (node *Node) PeerChangeFromMapRequest(req tailcfg.MapRequest) tailcfg.PeerChange {
ret := tailcfg.PeerChange{
NodeID: tailcfg.NodeID(node.ID),

View File

@@ -8,6 +8,7 @@ import (
"time"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic"
"github.com/stretchr/testify/assert"
@@ -665,6 +666,119 @@ func TestNodeTagCommand(t *testing.T) {
)
}
func TestNodeAdvertiseTagNoACLCommand(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
assertNoErr(t, err)
defer scenario.Shutdown()
spec := map[string]int{
"user1": 1,
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{tsic.WithTags([]string{"tag:test"})}, hsic.WithTestName("cliadvtags"))
assertNoErr(t, err)
headscale, err := scenario.Headscale()
assertNoErr(t, err)
// Test list all nodes after added seconds
resultMachines := make([]*v1.Node, spec["user1"])
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--tags",
"--output", "json",
},
&resultMachines,
)
assert.Nil(t, err)
found := false
for _, node := range resultMachines {
if node.GetInvalidTags() != nil {
for _, tag := range node.GetInvalidTags() {
if tag == "tag:test" {
found = true
}
}
}
}
assert.Equal(
t,
true,
found,
"should not find a node with the tag 'tag:test' in the list of nodes",
)
}
func TestNodeAdvertiseTagWithACLCommand(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
scenario, err := NewScenario()
assertNoErr(t, err)
defer scenario.Shutdown()
spec := map[string]int{
"user1": 1,
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{tsic.WithTags([]string{"tag:exists"})}, hsic.WithTestName("cliadvtags"), hsic.WithACLPolicy(
&policy.ACLPolicy{
ACLs: []policy.ACL{
{
Action: "accept",
Sources: []string{"*"},
Destinations: []string{"*:*"},
},
},
TagOwners: map[string][]string{
"tag:exists": {"user1"},
},
},
))
assertNoErr(t, err)
headscale, err := scenario.Headscale()
assertNoErr(t, err)
// Test list all nodes after added seconds
resultMachines := make([]*v1.Node, spec["user1"])
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"nodes",
"list",
"--tags",
"--output", "json",
},
&resultMachines,
)
assert.Nil(t, err)
found := false
for _, node := range resultMachines {
if node.GetValidTags() != nil {
for _, tag := range node.GetValidTags() {
if tag == "tag:exists" {
found = true
}
}
}
}
assert.Equal(
t,
true,
found,
"should not find a node with the tag 'tag:exists' in the list of nodes",
)
}
func TestNodeCommand(t *testing.T) {
IntegrationSkip(t)
t.Parallel()

View File

@@ -320,7 +320,6 @@ func TestTaildrop(t *testing.T) {
if err != nil {
t.Fatalf("failed to install curl on %s, err: %s", client.Hostname(), err)
}
}
curlCommand := []string{"curl", "--unix-socket", "/var/run/tailscale/tailscaled.sock", "http://local-tailscaled.sock/localapi/v0/file-targets"}
err = retry(10, 1*time.Second, func() error {
@@ -537,7 +536,7 @@ func TestExpireNode(t *testing.T) {
assertNoErr(t, err)
// Assert that we have the original count - self
assert.Len(t, status.Peers(), len(MustTestVersions)-1)
assert.Len(t, status.Peers(), spec["user1"]-1)
}
headscale, err := scenario.Headscale()
@@ -560,7 +559,7 @@ func TestExpireNode(t *testing.T) {
t.Logf("Node %s with node_key %s has been expired", node.GetName(), expiredNodeKey.String())
time.Sleep(30 * time.Second)
time.Sleep(2 * time.Minute)
now := time.Now()
@@ -572,21 +571,33 @@ func TestExpireNode(t *testing.T) {
if client.Hostname() != node.GetName() {
t.Logf("available peers of %s: %v", client.Hostname(), status.Peers())
// In addition to marking nodes expired, we filter them out during the map response
// this check ensures that the node is either not present, or that it is expired
// if it is in the map response.
// Ensures that the node is present, and that it is expired.
if peerStatus, ok := status.Peer[expiredNodeKey]; ok {
assertNotNil(t, peerStatus.Expired)
assert.Truef(t, peerStatus.KeyExpiry.Before(now), "node %s should have a key expire before %s, was %s", peerStatus.HostName, now.String(), peerStatus.KeyExpiry)
assert.Truef(t, peerStatus.Expired, "node %s should be expired, expired is %v", peerStatus.HostName, peerStatus.Expired)
assert.NotNil(t, peerStatus.KeyExpiry)
t.Logf("node %q should have a key expire before %s, was %s", peerStatus.HostName, now.String(), peerStatus.KeyExpiry)
if peerStatus.KeyExpiry != nil {
assert.Truef(t, peerStatus.KeyExpiry.Before(now), "node %q should have a key expire before %s, was %s", peerStatus.HostName, now.String(), peerStatus.KeyExpiry)
}
assert.Truef(t, peerStatus.Expired, "node %q should be expired, expired is %v", peerStatus.HostName, peerStatus.Expired)
_, stderr, _ := client.Execute([]string{"tailscale", "ping", node.GetName()})
if !strings.Contains(stderr, "node key has expired") {
t.Errorf("expected to be unable to ping expired host %q from %q", node.GetName(), client.Hostname())
}
} else {
t.Errorf("failed to find node %q with nodekey (%s) in mapresponse, should be present even if it is expired", node.GetName(), expiredNodeKey)
}
} else {
if status.Self.KeyExpiry != nil {
assert.Truef(t, status.Self.KeyExpiry.Before(now), "node %q should have a key expire before %s, was %s", status.Self.HostName, now.String(), status.Self.KeyExpiry)
}
// TODO(kradalby): We do not propogate expiry correctly, nodes should be aware
// of their status, and this should be sent directly to the node when its
// expired. This needs a notifier that goes directly to the node (currently we only do peers)
// so fix this in a follow up PR.
// } else {
// assert.True(t, status.Self.Expired)
// NeedsLogin means that the node has understood that it is no longer
// valid.
assert.Equal(t, "NeedsLogin", status.BackendState)
}
}
}

View File

@@ -9,10 +9,15 @@ import (
"testing"
"time"
"github.com/google/go-cmp/cmp"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/juanfont/headscale/integration/hsic"
"github.com/juanfont/headscale/integration/tsic"
"github.com/stretchr/testify/assert"
"tailscale.com/types/ipproto"
"tailscale.com/wgengine/filter"
)
// This test is both testing the routes command and the propagation of
@@ -778,3 +783,413 @@ func TestHASubnetRouterFailover(t *testing.T) {
)
}
}
func TestEnableDisableAutoApprovedRoute(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
expectedRoutes := "172.0.0.0/24"
user := "enable-disable-routing"
scenario, err := NewScenario()
assertNoErrf(t, "failed to create scenario: %s", err)
defer scenario.Shutdown()
spec := map[string]int{
user: 1,
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{tsic.WithTags([]string{"tag:approve"})}, hsic.WithTestName("clienableroute"), hsic.WithACLPolicy(
&policy.ACLPolicy{
ACLs: []policy.ACL{
{
Action: "accept",
Sources: []string{"*"},
Destinations: []string{"*:*"},
},
},
TagOwners: map[string][]string{
"tag:approve": {user},
},
AutoApprovers: policy.AutoApprovers{
Routes: map[string][]string{
expectedRoutes: {"tag:approve"},
},
},
},
))
assertNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
assertNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
headscale, err := scenario.Headscale()
assertNoErrGetHeadscale(t, err)
subRouter1 := allClients[0]
// Initially advertise route
command := []string{
"tailscale",
"set",
"--advertise-routes=" + expectedRoutes,
}
_, _, err = subRouter1.Execute(command)
assertNoErrf(t, "failed to advertise route: %s", err)
time.Sleep(10 * time.Second)
var routes []*v1.Route
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"routes",
"list",
"--output",
"json",
},
&routes,
)
assertNoErr(t, err)
assert.Len(t, routes, 1)
// All routes should be auto approved and enabled
assert.Equal(t, true, routes[0].GetAdvertised())
assert.Equal(t, true, routes[0].GetEnabled())
assert.Equal(t, true, routes[0].GetIsPrimary())
// Stop advertising route
command = []string{
"tailscale",
"set",
"--advertise-routes=",
}
_, _, err = subRouter1.Execute(command)
assertNoErrf(t, "failed to remove advertised route: %s", err)
time.Sleep(10 * time.Second)
var notAdvertisedRoutes []*v1.Route
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"routes",
"list",
"--output",
"json",
},
&notAdvertisedRoutes,
)
assertNoErr(t, err)
assert.Len(t, notAdvertisedRoutes, 1)
// Route is no longer advertised
assert.Equal(t, false, notAdvertisedRoutes[0].GetAdvertised())
assert.Equal(t, false, notAdvertisedRoutes[0].GetEnabled())
assert.Equal(t, true, notAdvertisedRoutes[0].GetIsPrimary())
// Advertise route again
command = []string{
"tailscale",
"set",
"--advertise-routes=" + expectedRoutes,
}
_, _, err = subRouter1.Execute(command)
assertNoErrf(t, "failed to advertise route: %s", err)
time.Sleep(10 * time.Second)
var reAdvertisedRoutes []*v1.Route
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"routes",
"list",
"--output",
"json",
},
&reAdvertisedRoutes,
)
assertNoErr(t, err)
assert.Len(t, reAdvertisedRoutes, 1)
// All routes should be auto approved and enabled
assert.Equal(t, true, reAdvertisedRoutes[0].GetAdvertised())
assert.Equal(t, true, reAdvertisedRoutes[0].GetEnabled())
assert.Equal(t, true, reAdvertisedRoutes[0].GetIsPrimary())
}
// TestSubnetRouteACL verifies that Subnet routes are distributed
// as expected when ACLs are activated.
// It implements the issue from
// https://github.com/juanfont/headscale/issues/1604
func TestSubnetRouteACL(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
user := "subnet-route-acl"
scenario, err := NewScenario()
assertNoErrf(t, "failed to create scenario: %s", err)
defer scenario.Shutdown()
spec := map[string]int{
user: 2,
}
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("clienableroute"), hsic.WithACLPolicy(
&policy.ACLPolicy{
Groups: policy.Groups{
"group:admins": {user},
},
ACLs: []policy.ACL{
{
Action: "accept",
Sources: []string{"group:admins"},
Destinations: []string{"group:admins:*"},
},
{
Action: "accept",
Sources: []string{"group:admins"},
Destinations: []string{"10.33.0.0/16:*"},
},
// {
// Action: "accept",
// Sources: []string{"group:admins"},
// Destinations: []string{"0.0.0.0/0:*"},
// },
},
},
))
assertNoErrHeadscaleEnv(t, err)
allClients, err := scenario.ListTailscaleClients()
assertNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
headscale, err := scenario.Headscale()
assertNoErrGetHeadscale(t, err)
expectedRoutes := map[string]string{
"1": "10.33.0.0/16",
}
// Sort nodes by ID
sort.SliceStable(allClients, func(i, j int) bool {
statusI, err := allClients[i].Status()
if err != nil {
return false
}
statusJ, err := allClients[j].Status()
if err != nil {
return false
}
return statusI.Self.ID < statusJ.Self.ID
})
subRouter1 := allClients[0]
client := allClients[1]
// advertise HA route on node 1 and 2
// ID 1 will be primary
// ID 2 will be secondary
for _, client := range allClients {
status, err := client.Status()
assertNoErr(t, err)
if route, ok := expectedRoutes[string(status.Self.ID)]; ok {
command := []string{
"tailscale",
"set",
"--advertise-routes=" + route,
}
_, _, err = client.Execute(command)
assertNoErrf(t, "failed to advertise route: %s", err)
}
}
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
var routes []*v1.Route
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"routes",
"list",
"--output",
"json",
},
&routes,
)
assertNoErr(t, err)
assert.Len(t, routes, 1)
for _, route := range routes {
assert.Equal(t, true, route.GetAdvertised())
assert.Equal(t, false, route.GetEnabled())
assert.Equal(t, false, route.GetIsPrimary())
}
// Verify that no routes has been sent to the client,
// they are not yet enabled.
for _, client := range allClients {
status, err := client.Status()
assertNoErr(t, err)
for _, peerKey := range status.Peers() {
peerStatus := status.Peer[peerKey]
assert.Nil(t, peerStatus.PrimaryRoutes)
}
}
// Enable all routes
for _, route := range routes {
_, err = headscale.Execute(
[]string{
"headscale",
"routes",
"enable",
"--route",
strconv.Itoa(int(route.GetId())),
})
assertNoErr(t, err)
}
time.Sleep(5 * time.Second)
var enablingRoutes []*v1.Route
err = executeAndUnmarshal(
headscale,
[]string{
"headscale",
"routes",
"list",
"--output",
"json",
},
&enablingRoutes,
)
assertNoErr(t, err)
assert.Len(t, enablingRoutes, 1)
// Node 1 has active route
assert.Equal(t, true, enablingRoutes[0].GetAdvertised())
assert.Equal(t, true, enablingRoutes[0].GetEnabled())
assert.Equal(t, true, enablingRoutes[0].GetIsPrimary())
// Verify that the client has routes from the primary machine
srs1, _ := subRouter1.Status()
clientStatus, err := client.Status()
assertNoErr(t, err)
srs1PeerStatus := clientStatus.Peer[srs1.Self.PublicKey]
assertNotNil(t, srs1PeerStatus.PrimaryRoutes)
t.Logf("subnet1 has following routes: %v", srs1PeerStatus.PrimaryRoutes.AsSlice())
assert.Len(t, srs1PeerStatus.PrimaryRoutes.AsSlice(), 1)
assert.Contains(
t,
srs1PeerStatus.PrimaryRoutes.AsSlice(),
netip.MustParsePrefix(expectedRoutes[string(srs1.Self.ID)]),
)
clientNm, err := client.Netmap()
assertNoErr(t, err)
wantClientFilter := []filter.Match{
{
IPProto: []ipproto.Proto{
ipproto.TCP, ipproto.UDP, ipproto.ICMPv4, ipproto.ICMPv6,
},
Srcs: []netip.Prefix{
netip.MustParsePrefix("100.64.0.1/32"),
netip.MustParsePrefix("100.64.0.2/32"),
netip.MustParsePrefix("fd7a:115c:a1e0::1/128"),
netip.MustParsePrefix("fd7a:115c:a1e0::2/128"),
},
Dsts: []filter.NetPortRange{
{
Net: netip.MustParsePrefix("100.64.0.2/32"),
Ports: filter.PortRange{0, 0xffff},
},
{
Net: netip.MustParsePrefix("fd7a:115c:a1e0::2/128"),
Ports: filter.PortRange{0, 0xffff},
},
},
Caps: []filter.CapMatch{},
},
}
if diff := cmp.Diff(wantClientFilter, clientNm.PacketFilter, util.PrefixComparer); diff != "" {
t.Errorf("Client (%s) filter, unexpected result (-want +got):\n%s", client.Hostname(), diff)
}
subnetNm, err := subRouter1.Netmap()
assertNoErr(t, err)
wantSubnetFilter := []filter.Match{
{
IPProto: []ipproto.Proto{
ipproto.TCP, ipproto.UDP, ipproto.ICMPv4, ipproto.ICMPv6,
},
Srcs: []netip.Prefix{
netip.MustParsePrefix("100.64.0.1/32"),
netip.MustParsePrefix("100.64.0.2/32"),
netip.MustParsePrefix("fd7a:115c:a1e0::1/128"),
netip.MustParsePrefix("fd7a:115c:a1e0::2/128"),
},
Dsts: []filter.NetPortRange{
{
Net: netip.MustParsePrefix("100.64.0.1/32"),
Ports: filter.PortRange{0, 0xffff},
},
{
Net: netip.MustParsePrefix("fd7a:115c:a1e0::1/128"),
Ports: filter.PortRange{0, 0xffff},
},
},
Caps: []filter.CapMatch{},
},
{
IPProto: []ipproto.Proto{
ipproto.TCP, ipproto.UDP, ipproto.ICMPv4, ipproto.ICMPv6,
},
Srcs: []netip.Prefix{
netip.MustParsePrefix("100.64.0.1/32"),
netip.MustParsePrefix("100.64.0.2/32"),
netip.MustParsePrefix("fd7a:115c:a1e0::1/128"),
netip.MustParsePrefix("fd7a:115c:a1e0::2/128"),
},
Dsts: []filter.NetPortRange{
{
Net: netip.MustParsePrefix("10.33.0.0/16"),
Ports: filter.PortRange{0, 0xffff},
},
},
Caps: []filter.CapMatch{},
},
}
if diff := cmp.Diff(wantSubnetFilter, subnetNm.PacketFilter, util.PrefixComparer); diff != "" {
t.Errorf("Subnet (%s) filter, unexpected result (-want +got):\n%s", subRouter1.Hostname(), diff)
}
}

View File

@@ -13,8 +13,10 @@ run_tests() {
for ((i = 1; i <= num_tests; i++)); do
docker network prune -f >/dev/null 2>&1
docker rm headscale-test-suite || true
docker kill "$(docker ps -q)" || true
docker rm headscale-test-suite >/dev/null 2>&1 || true
docker kill "$(docker ps -q)" >/dev/null 2>&1 || true
echo "Run $i"
start=$(date +%s)
docker run \

View File

@@ -47,19 +47,19 @@ var (
tailscaleVersions2021 = map[string]bool{
"head": true,
"unstable": true,
"1.56": true, // CapVer: 82
"1.54": true, // CapVer: 79
"1.52": true, // CapVer: 79
"1.50": true, // CapVer: 74
"1.48": true, // CapVer: 68
"1.46": true, // CapVer: 65
"1.44": true, // CapVer: 63
"1.42": true, // CapVer: 61
"1.40": true, // CapVer: 61
"1.38": true, // CapVer: 58
"1.36": true, // CapVer: 56
"1.34": true, // CapVer: 51
"1.32": true, // Oldest supported version, CapVer: 46
"1.56": true, // CapVer: 82
"1.54": true, // CapVer: 79
"1.52": true, // CapVer: 79
"1.50": true, // CapVer: 74
"1.48": true, // CapVer: 68
"1.46": true, // CapVer: 65
"1.44": true, // CapVer: 63
"1.42": true, // CapVer: 61
"1.40": true, // CapVer: 61
"1.38": true, // CapVer: 58
"1.36": true, // Oldest supported version, CapVer: 56
"1.34": false, // CapVer: 51
"1.32": false, // CapVer: 46
"1.30": false,
}

View File

@@ -142,7 +142,7 @@ func TestTailscaleNodesJoiningHeadcale(t *testing.T) {
})
t.Run("create-tailscale", func(t *testing.T) {
err := scenario.CreateTailscaleNodesInUser(user, "1.30.2", count)
err := scenario.CreateTailscaleNodesInUser(user, "unstable", count)
if err != nil {
t.Fatalf("failed to add tailscale nodes: %s", err)
}

View File

@@ -7,6 +7,7 @@ import (
"github.com/juanfont/headscale/integration/dockertestutil"
"github.com/juanfont/headscale/integration/tsic"
"tailscale.com/ipn/ipnstate"
"tailscale.com/types/netmap"
)
// nolint
@@ -26,6 +27,7 @@ type TailscaleClient interface {
IPs() ([]netip.Addr, error)
FQDN() (string, error)
Status() (*ipnstate.Status, error)
Netmap() (*netmap.NetworkMap, error)
WaitForNeedsLogin() error
WaitForRunning() error
WaitForPeers(expected int) error

View File

@@ -17,6 +17,7 @@ import (
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
"tailscale.com/ipn/ipnstate"
"tailscale.com/types/netmap"
)
const (
@@ -519,6 +520,30 @@ func (t *TailscaleInContainer) Status() (*ipnstate.Status, error) {
return &status, err
}
// Netmap returns the current Netmap (netmap.NetworkMap) of the Tailscale instance.
// Only works with Tailscale 1.56.1 and newer.
func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) {
command := []string{
"tailscale",
"debug",
"netmap",
}
result, stderr, err := t.Execute(command)
if err != nil {
fmt.Printf("stderr: %s\n", stderr)
return nil, fmt.Errorf("failed to execute tailscale debug netmap command: %w", err)
}
var nm netmap.NetworkMap
err = json.Unmarshal([]byte(result), &nm)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal tailscale netmap: %w", err)
}
return &nm, err
}
// FQDN returns the FQDN as a string of the Tailscale instance.
func (t *TailscaleInContainer) FQDN() (string, error) {
if t.fqdn != "" {