mirror of
https://github.com/juanfont/headscale.git
synced 2025-12-15 17:51:48 +00:00
Compare commits
4 Commits
copilot/de
...
copilot/in
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1f4b645d5b | ||
|
|
4fa1f4baa3 | ||
|
|
e0107024e8 | ||
|
|
a55cdc2636 |
1
.github/workflows/test-integration.yaml
vendored
1
.github/workflows/test-integration.yaml
vendored
@@ -62,7 +62,6 @@ jobs:
|
||||
- TestDERPServerScenario
|
||||
- TestDERPServerWebsocketScenario
|
||||
- TestPingAllByIP
|
||||
- TestPingAllByIPRandomClientPort
|
||||
- TestPingAllByIPPublicDERP
|
||||
- TestEphemeral
|
||||
- TestEphemeralInAlternateTimezone
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/policy/matcher"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/views"
|
||||
@@ -67,6 +68,33 @@ func (b *MapResponseBuilder) WithCapabilityVersion(capVer tailcfg.CapabilityVers
|
||||
return b
|
||||
}
|
||||
|
||||
// buildRouteFilterFunc creates a route filter function that includes both primary and exit routes.
|
||||
// It filters routes based on ACL policy to ensure only authorized routes are visible.
|
||||
func (b *MapResponseBuilder) buildRouteFilterFunc(
|
||||
viewingNode types.NodeView,
|
||||
matchers []matcher.Match,
|
||||
) routeFilterFunc {
|
||||
return func(id types.NodeID) []netip.Prefix {
|
||||
// Get the peer node to check for exit routes
|
||||
peer, ok := b.mapper.state.GetNodeByID(id)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start with primary routes (subnet routes, but not exit routes)
|
||||
routes := policy.ReduceRoutes(viewingNode, b.mapper.state.GetNodePrimaryRoutes(id), matchers)
|
||||
|
||||
// Also filter exit routes through policy
|
||||
// Only add exit routes if the viewing node has permission to use them
|
||||
if exitRoutes := peer.ExitRoutes(); len(exitRoutes) > 0 {
|
||||
filteredExitRoutes := policy.ReduceRoutes(viewingNode, exitRoutes, matchers)
|
||||
routes = append(routes, filteredExitRoutes...)
|
||||
}
|
||||
|
||||
return routes
|
||||
}
|
||||
}
|
||||
|
||||
// WithSelfNode adds the requesting node to the response.
|
||||
func (b *MapResponseBuilder) WithSelfNode() *MapResponseBuilder {
|
||||
nv, ok := b.mapper.state.GetNodeByID(b.nodeID)
|
||||
@@ -78,9 +106,7 @@ func (b *MapResponseBuilder) WithSelfNode() *MapResponseBuilder {
|
||||
_, matchers := b.mapper.state.Filter()
|
||||
tailnode, err := tailNode(
|
||||
nv, b.capVer, b.mapper.state,
|
||||
func(id types.NodeID) []netip.Prefix {
|
||||
return policy.ReduceRoutes(nv, b.mapper.state.GetNodePrimaryRoutes(id), matchers)
|
||||
},
|
||||
b.buildRouteFilterFunc(nv, matchers),
|
||||
b.mapper.cfg)
|
||||
if err != nil {
|
||||
b.addError(err)
|
||||
@@ -253,9 +279,7 @@ func (b *MapResponseBuilder) buildTailPeers(peers views.Slice[types.NodeView]) (
|
||||
|
||||
tailPeers, err := tailNodes(
|
||||
changedViews, b.capVer, b.mapper.state,
|
||||
func(id types.NodeID) []netip.Prefix {
|
||||
return policy.ReduceRoutes(node, b.mapper.state.GetNodePrimaryRoutes(id), matchers)
|
||||
},
|
||||
b.buildRouteFilterFunc(node, matchers),
|
||||
b.mapper.cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -88,9 +88,9 @@ func tailNode(
|
||||
}
|
||||
tags = lo.Uniq(tags)
|
||||
|
||||
// Get filtered routes (includes both primary routes and exit routes if allowed by policy)
|
||||
routes := primaryRouteFunc(node.ID())
|
||||
allowed := append(addrs, routes...)
|
||||
allowed = append(allowed, node.ExitRoutes()...)
|
||||
tsaddr.SortPrefixes(allowed)
|
||||
|
||||
tNode := tailcfg.Node{
|
||||
|
||||
@@ -137,10 +137,8 @@ func TestTailNode(t *testing.T) {
|
||||
),
|
||||
Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")},
|
||||
AllowedIPs: []netip.Prefix{
|
||||
tsaddr.AllIPv4(),
|
||||
netip.MustParsePrefix("192.168.0.0/24"),
|
||||
netip.MustParsePrefix("100.64.0.1/32"),
|
||||
tsaddr.AllIPv6(),
|
||||
},
|
||||
PrimaryRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("192.168.0.0/24"),
|
||||
|
||||
@@ -86,108 +86,6 @@ func TestPingAllByIP(t *testing.T) {
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
}
|
||||
|
||||
// TestPingAllByIPRandomClientPort is a variant of TestPingAllByIP that validates
|
||||
// direct connections between nodes with randomize_client_port enabled. This test
|
||||
// ensures that nodes can establish direct peer-to-peer connections without relying
|
||||
// on DERP relay servers, and that the randomize_client_port feature works correctly.
|
||||
func TestPingAllByIPRandomClientPort(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: len(MustTestVersions),
|
||||
Users: []string{"user1", "user2"},
|
||||
MaxWait: dockertestMaxWait(),
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
[]tsic.Option{},
|
||||
hsic.WithTestName("pingdirect"),
|
||||
hsic.WithEmbeddedDERPServerOnly(),
|
||||
hsic.WithTLS(),
|
||||
hsic.WithIPAllocationStrategy(types.IPAllocationStrategyRandom),
|
||||
hsic.WithConfigEnv(map[string]string{
|
||||
"HEADSCALE_RANDOMIZE_CLIENT_PORT": "true",
|
||||
}),
|
||||
)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
requireNoErrListClientIPs(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
hs, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Extract node IDs for validation
|
||||
expectedNodes := make([]types.NodeID, 0, len(allClients))
|
||||
for _, client := range allClients {
|
||||
status := client.MustStatus()
|
||||
nodeID, err := strconv.ParseUint(string(status.Self.ID), 10, 64)
|
||||
require.NoError(t, err, "failed to parse node ID")
|
||||
expectedNodes = append(expectedNodes, types.NodeID(nodeID))
|
||||
}
|
||||
requireAllClientsOnline(t, hs, expectedNodes, true, "all clients should be online across all systems", 30*time.Second)
|
||||
|
||||
allAddrs := lo.Map(allIps, func(x netip.Addr, index int) string {
|
||||
return x.String()
|
||||
})
|
||||
|
||||
// Perform pings to establish connections
|
||||
success := pingAllHelper(t, allClients, allAddrs)
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
// Validate that connections are direct (not relayed through DERP)
|
||||
// We check that each client has direct connections to its peers
|
||||
t.Logf("Validating direct connections...")
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
for _, client := range allClients {
|
||||
status, err := client.Status()
|
||||
assert.NoError(ct, err, "failed to get status for client %s", client.Hostname())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check each peer to see if we have a direct connection
|
||||
directCount := 0
|
||||
relayedCount := 0
|
||||
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
|
||||
// CurAddr indicates the current address being used to communicate with this peer
|
||||
// Direct connections have CurAddr set to an actual IP:port
|
||||
// DERP-relayed connections either have no CurAddr or it contains the DERP magic IP
|
||||
if peerStatus.CurAddr != "" && !strings.Contains(peerStatus.CurAddr, "127.3.3.40") {
|
||||
// This is a direct connection - CurAddr contains the actual peer IP:port
|
||||
directCount++
|
||||
t.Logf("Client %s -> Peer %s: DIRECT connection via %s (relay: %s)",
|
||||
client.Hostname(), peerStatus.HostName, peerStatus.CurAddr, peerStatus.Relay)
|
||||
} else {
|
||||
// This is a relayed connection through DERP
|
||||
relayedCount++
|
||||
t.Logf("Client %s -> Peer %s: RELAYED connection (CurAddr: %s, relay: %s)",
|
||||
client.Hostname(), peerStatus.HostName, peerStatus.CurAddr, peerStatus.Relay)
|
||||
}
|
||||
}
|
||||
|
||||
// Assert that we have at least some direct connections
|
||||
// In a local Docker network, we should be able to establish direct connections
|
||||
assert.Greater(ct, directCount, 0,
|
||||
"Client %s should have at least one direct connection, got %d direct and %d relayed",
|
||||
client.Hostname(), directCount, relayedCount)
|
||||
}
|
||||
}, 60*time.Second, 2*time.Second, "validating direct connections between peers")
|
||||
}
|
||||
|
||||
func TestPingAllByIPPublicDERP(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
|
||||
@@ -3042,3 +3042,154 @@ func TestSubnetRouteACLFiltering(t *testing.T) {
|
||||
assertTracerouteViaIPWithCollect(c, tr, ip)
|
||||
}, 20*time.Second, 200*time.Millisecond, "Verifying traceroute goes through router")
|
||||
}
|
||||
|
||||
// TestExitNodeVisibilityWithACL tests that exit nodes are only visible
|
||||
// to nodes that have permission to use them according to ACL policy.
|
||||
// This is a regression test for issue #2788.
|
||||
func TestExitNodeVisibilityWithACL(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 1,
|
||||
Users: []string{"mobile", "server", "exit-owner"},
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
require.NoErrorf(t, err, "failed to create scenario: %s", err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
// Policy that allows:
|
||||
// - mobile can communicate with server on port 80
|
||||
// - mobile does NOT have autogroup:internet, so should NOT see exit node
|
||||
policy := `
|
||||
{
|
||||
"hosts": {
|
||||
"mobile": "100.64.0.1/32",
|
||||
"server": "100.64.0.2/32",
|
||||
"exit": "100.64.0.3/32"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["mobile"],
|
||||
"dst": ["server:80"]
|
||||
}
|
||||
]
|
||||
}
|
||||
`
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
[]tsic.Option{},
|
||||
hsic.WithTestName("exitnodeacl"),
|
||||
hsic.WithConfigEnv(map[string]string{
|
||||
"HEADSCALE_POLICY_MODE": "file",
|
||||
"HEADSCALE_POLICY_PATH": "/etc/headscale/policy.json",
|
||||
}),
|
||||
hsic.WithFileInContainer("/etc/headscale/policy.json", []byte(policy)),
|
||||
)
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
requireNoErrListClients(t, err)
|
||||
require.Len(t, allClients, 3)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
requireNoErrGetHeadscale(t, err)
|
||||
|
||||
// Find the clients
|
||||
var mobileClient, serverClient, exitClient TailscaleClient
|
||||
for _, client := range allClients {
|
||||
status := client.MustStatus()
|
||||
switch status.User[status.Self.UserID].LoginName {
|
||||
case "mobile@test.no":
|
||||
mobileClient = client
|
||||
case "server@test.no":
|
||||
serverClient = client
|
||||
case "exit-owner@test.no":
|
||||
exitClient = client
|
||||
}
|
||||
}
|
||||
require.NotNil(t, mobileClient, "mobile client not found")
|
||||
require.NotNil(t, serverClient, "server client not found")
|
||||
require.NotNil(t, exitClient, "exit client not found")
|
||||
|
||||
// Advertise exit node from the exit-owner node
|
||||
_, _, err = exitClient.Execute([]string{
|
||||
"tailscale",
|
||||
"set",
|
||||
"--advertise-exit-node",
|
||||
})
|
||||
require.NoErrorf(t, err, "failed to advertise exit node: %s", err)
|
||||
|
||||
// Wait for the exit node to be registered
|
||||
var nodes []*v1.Node
|
||||
var exitNode *v1.Node
|
||||
exitStatus := exitClient.MustStatus()
|
||||
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err = headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
assert.Len(c, nodes, 3)
|
||||
|
||||
// Find the exit node
|
||||
exitNode = nil
|
||||
for _, node := range nodes {
|
||||
if node.GetName() == exitStatus.Self.HostName {
|
||||
exitNode = node
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.NotNil(c, exitNode, "exit node not found")
|
||||
if exitNode != nil {
|
||||
// Exit node should have 2 available routes (0.0.0.0/0 and ::/0)
|
||||
assert.Len(c, exitNode.GetAvailableRoutes(), 2, "exit node should advertise 2 routes")
|
||||
}
|
||||
}, 10*time.Second, 500*time.Millisecond, "waiting for exit node advertisement")
|
||||
|
||||
// Approve the exit routes
|
||||
require.NotNil(t, exitNode, "exit node not found after advertisement")
|
||||
|
||||
_, err = headscale.ApproveRoutes(exitNode.GetId(), []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()})
|
||||
require.NoError(t, err, "failed to approve exit routes")
|
||||
|
||||
// Wait for routes to be approved in the database
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
nodes, err = headscale.ListNodes()
|
||||
assert.NoError(c, err)
|
||||
|
||||
for _, node := range nodes {
|
||||
if node.GetName() == exitStatus.Self.HostName {
|
||||
assert.Len(c, node.GetApprovedRoutes(), 2, "exit node should have 2 approved routes")
|
||||
assert.Len(c, node.GetSubnetRoutes(), 2, "exit node should have 2 subnet routes")
|
||||
}
|
||||
}
|
||||
}, 10*time.Second, 500*time.Millisecond, "waiting for route approval")
|
||||
|
||||
// The key test: mobile client should NOT see the exit node in their peer list
|
||||
// because they don't have autogroup:internet in their ACL
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
status, err := mobileClient.Status()
|
||||
assert.NoError(c, err)
|
||||
|
||||
// Mobile should see server as a peer (allowed by ACL)
|
||||
serverStatus := serverClient.MustStatus()
|
||||
_, hasPeer := status.Peer[serverStatus.Self.PublicKey]
|
||||
assert.True(c, hasPeer, "mobile should see server as peer")
|
||||
|
||||
// Mobile should NOT see exit node in peer list at all since no ACL allows access
|
||||
_, hasExitPeer := status.Peer[exitStatus.Self.PublicKey]
|
||||
assert.False(c, hasExitPeer, "mobile should NOT see exit node as peer without autogroup:internet in ACL")
|
||||
}, 10*time.Second, 500*time.Millisecond, "verifying mobile cannot see exit node")
|
||||
|
||||
// Server should also not see the exit node (no ACL rule allowing it)
|
||||
assert.EventuallyWithT(t, func(c *assert.CollectT) {
|
||||
status, err := serverClient.Status()
|
||||
assert.NoError(c, err)
|
||||
|
||||
_, hasExitPeer := status.Peer[exitStatus.Self.PublicKey]
|
||||
assert.False(c, hasExitPeer, "server should NOT see exit node as peer without autogroup:internet in ACL")
|
||||
}, 10*time.Second, 500*time.Millisecond, "verifying server cannot see exit node")
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user