mirror of
https://github.com/juanfont/headscale.git
synced 2025-12-15 17:51:48 +00:00
Compare commits
3 Commits
copilot/in
...
copilot/fi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
18fb3f00a8 | ||
|
|
1980ee6ca4 | ||
|
|
6bb7f4383e |
1
.github/workflows/test-integration.yaml
vendored
1
.github/workflows/test-integration.yaml
vendored
@@ -70,7 +70,6 @@ jobs:
|
||||
- TestTaildrop
|
||||
- TestUpdateHostnameFromClient
|
||||
- TestExpireNode
|
||||
- TestSetNodeExpiryInFuture
|
||||
- TestNodeOnlineStatus
|
||||
- TestPingAllByIPManyUpDown
|
||||
- Test2118DeletingOnlineNodePanics
|
||||
|
||||
@@ -2,11 +2,6 @@
|
||||
|
||||
## Next
|
||||
|
||||
### Changes
|
||||
|
||||
- Expire nodes with a custom timestamp
|
||||
[#2828](https://github.com/juanfont/headscale/pull/2828)
|
||||
|
||||
## 0.27.0 (2025-10-27)
|
||||
|
||||
**Minimum supported Tailscale client version: v1.64.0**
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/samber/lo"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
@@ -52,7 +51,6 @@ func init() {
|
||||
nodeCmd.AddCommand(registerNodeCmd)
|
||||
|
||||
expireNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
expireNodeCmd.Flags().StringP("expiry", "e", "", "Set expire to (RFC3339 format, e.g. 2025-08-27T10:00:00Z), or leave empty to expire immediately.")
|
||||
err = expireNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
@@ -291,37 +289,12 @@ var expireNodeCmd = &cobra.Command{
|
||||
)
|
||||
}
|
||||
|
||||
expiry, err := cmd.Flags().GetString("expiry")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting expiry to string: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
expiryTime := time.Now()
|
||||
if expiry != "" {
|
||||
expiryTime, err = time.Parse(time.RFC3339, expiry)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting expiry to string: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ExpireNodeRequest{
|
||||
NodeId: identifier,
|
||||
Expiry: timestamppb.New(expiryTime),
|
||||
}
|
||||
|
||||
response, err := client.ExpireNode(ctx, request)
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
overlay = _: prev: let
|
||||
pkgs = nixpkgs.legacyPackages.${prev.system};
|
||||
buildGo = pkgs.buildGo125Module;
|
||||
vendorHash = "sha256-VOi4PGZ8I+2MiwtzxpKc/4smsL5KcH/pHVkjJfAFPJ0=";
|
||||
vendorHash = "sha256-GUIzlPRsyEq1uSTzRNds9p1uVu4pTeH5PAxrJ5Njhis=";
|
||||
in {
|
||||
headscale = buildGo {
|
||||
pname = "headscale";
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/apikey.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/device.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/headscale.proto
|
||||
|
||||
|
||||
@@ -471,8 +471,6 @@ func local_request_HeadscaleService_DeleteNode_0(ctx context.Context, marshaler
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
var filter_HeadscaleService_ExpireNode_0 = &utilities.DoubleArray{Encoding: map[string]int{"node_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}}
|
||||
|
||||
func request_HeadscaleService_ExpireNode_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq ExpireNodeRequest
|
||||
@@ -487,12 +485,6 @@ func request_HeadscaleService_ExpireNode_0(ctx context.Context, marshaler runtim
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ExpireNode_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := client.ExpireNode(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
@@ -511,12 +503,6 @@ func local_request_HeadscaleService_ExpireNode_0(ctx context.Context, marshaler
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_HeadscaleService_ExpireNode_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := server.ExpireNode(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/node.proto
|
||||
|
||||
@@ -729,7 +729,6 @@ func (*DeleteNodeResponse) Descriptor() ([]byte, []int) {
|
||||
type ExpireNodeRequest struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
|
||||
Expiry *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expiry,proto3" json:"expiry,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
sizeCache protoimpl.SizeCache
|
||||
}
|
||||
@@ -771,13 +770,6 @@ func (x *ExpireNodeRequest) GetNodeId() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ExpireNodeRequest) GetExpiry() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.Expiry
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ExpireNodeResponse struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Node *Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
|
||||
@@ -1357,10 +1349,9 @@ const file_headscale_v1_node_proto_rawDesc = "" +
|
||||
"\x04node\x18\x01 \x01(\v2\x12.headscale.v1.NodeR\x04node\",\n" +
|
||||
"\x11DeleteNodeRequest\x12\x17\n" +
|
||||
"\anode_id\x18\x01 \x01(\x04R\x06nodeId\"\x14\n" +
|
||||
"\x12DeleteNodeResponse\"`\n" +
|
||||
"\x12DeleteNodeResponse\",\n" +
|
||||
"\x11ExpireNodeRequest\x12\x17\n" +
|
||||
"\anode_id\x18\x01 \x01(\x04R\x06nodeId\x122\n" +
|
||||
"\x06expiry\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\x06expiry\"<\n" +
|
||||
"\anode_id\x18\x01 \x01(\x04R\x06nodeId\"<\n" +
|
||||
"\x12ExpireNodeResponse\x12&\n" +
|
||||
"\x04node\x18\x01 \x01(\v2\x12.headscale.v1.NodeR\x04node\"G\n" +
|
||||
"\x11RenameNodeRequest\x12\x17\n" +
|
||||
@@ -1448,17 +1439,16 @@ var file_headscale_v1_node_proto_depIdxs = []int32{
|
||||
1, // 7: headscale.v1.GetNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 8: headscale.v1.SetTagsResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 9: headscale.v1.SetApprovedRoutesResponse.node:type_name -> headscale.v1.Node
|
||||
25, // 10: headscale.v1.ExpireNodeRequest.expiry:type_name -> google.protobuf.Timestamp
|
||||
1, // 11: headscale.v1.ExpireNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 12: headscale.v1.RenameNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 13: headscale.v1.ListNodesResponse.nodes:type_name -> headscale.v1.Node
|
||||
1, // 14: headscale.v1.MoveNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 15: headscale.v1.DebugCreateNodeResponse.node:type_name -> headscale.v1.Node
|
||||
16, // [16:16] is the sub-list for method output_type
|
||||
16, // [16:16] is the sub-list for method input_type
|
||||
16, // [16:16] is the sub-list for extension type_name
|
||||
16, // [16:16] is the sub-list for extension extendee
|
||||
0, // [0:16] is the sub-list for field type_name
|
||||
1, // 10: headscale.v1.ExpireNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 11: headscale.v1.RenameNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 12: headscale.v1.ListNodesResponse.nodes:type_name -> headscale.v1.Node
|
||||
1, // 13: headscale.v1.MoveNodeResponse.node:type_name -> headscale.v1.Node
|
||||
1, // 14: headscale.v1.DebugCreateNodeResponse.node:type_name -> headscale.v1.Node
|
||||
15, // [15:15] is the sub-list for method output_type
|
||||
15, // [15:15] is the sub-list for method input_type
|
||||
15, // [15:15] is the sub-list for extension type_name
|
||||
15, // [15:15] is the sub-list for extension extendee
|
||||
0, // [0:15] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_node_proto_init() }
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/policy.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/preauthkey.proto
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.10
|
||||
// protoc-gen-go v1.36.8
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/user.proto
|
||||
|
||||
|
||||
@@ -406,13 +406,6 @@
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "expiry",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
|
||||
6
go.mod
6
go.mod
@@ -36,7 +36,7 @@ require (
|
||||
github.com/spf13/viper v1.21.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33
|
||||
github.com/tailscale/squibble v0.0.0-20251030164342-4d5df9caa993
|
||||
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e
|
||||
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
|
||||
@@ -115,7 +115,7 @@ require (
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 // indirect
|
||||
github.com/creachadair/mds v0.25.10 // indirect
|
||||
github.com/creachadair/mds v0.25.2 // indirect
|
||||
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 // indirect
|
||||
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
@@ -159,7 +159,7 @@ require (
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jsimonetti/rtnetlink v1.4.1 // indirect
|
||||
github.com/klauspost/compress v1.18.1 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
|
||||
12
go.sum
12
go.sum
@@ -124,8 +124,8 @@ github.com/creachadair/command v0.2.0 h1:qTA9cMMhZePAxFoNdnk6F6nn94s1qPndIg9hJbq
|
||||
github.com/creachadair/command v0.2.0/go.mod h1:j+Ar+uYnFsHpkMeV9kGj6lJ45y9u2xqtg8FYy6cm+0o=
|
||||
github.com/creachadair/flax v0.0.5 h1:zt+CRuXQASxwQ68e9GHAOnEgAU29nF0zYMHOCrL5wzE=
|
||||
github.com/creachadair/flax v0.0.5/go.mod h1:F1PML0JZLXSNDMNiRGK2yjm5f+L9QCHchyHBldFymj8=
|
||||
github.com/creachadair/mds v0.25.10 h1:9k9JB35D1xhOCFl0liBhagBBp8fWWkKZrA7UXsfoHtA=
|
||||
github.com/creachadair/mds v0.25.10/go.mod h1:4hatI3hRM+qhzuAmqPRFvaBM8mONkS7nsLxkcuTYUIs=
|
||||
github.com/creachadair/mds v0.25.2 h1:xc0S0AfDq5GX9KUR5sLvi5XjA61/P6S5e0xFs1vA18Q=
|
||||
github.com/creachadair/mds v0.25.2/go.mod h1:+s4CFteFRj4eq2KcGHW8Wei3u9NyzSPzNV32EvjyK/Q=
|
||||
github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc=
|
||||
github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
@@ -276,8 +276,8 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jsimonetti/rtnetlink v1.4.1 h1:JfD4jthWBqZMEffc5RjgmlzpYttAVw1sdnmiNaPO3hE=
|
||||
github.com/jsimonetti/rtnetlink v1.4.1/go.mod h1:xJjT7t59UIZ62GLZbv6PLLo8VFrostJMPBAheR6OM8w=
|
||||
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
|
||||
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
|
||||
@@ -459,8 +459,8 @@ github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+y
|
||||
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc=
|
||||
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d h1:mnqtPWYyvNiPU9l9tzO2YbHXU/xV664XthZYA26lOiE=
|
||||
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d/go.mod h1:9BzmlFc3OLqLzLTF/5AY+BMs+clxMqyhSGzgXIm8mNI=
|
||||
github.com/tailscale/squibble v0.0.0-20251030164342-4d5df9caa993 h1:FyiiAvDAxpB0DrW2GW3KOVfi3YFOtsQUEeFWbf55JJU=
|
||||
github.com/tailscale/squibble v0.0.0-20251030164342-4d5df9caa993/go.mod h1:xJkMmR3t+thnUQhA3Q4m2VSlS5pcOq+CIjmU/xfKKx4=
|
||||
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694 h1:95eIP97c88cqAFU/8nURjgI9xxPbD+Ci6mY/a79BI/w=
|
||||
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694/go.mod h1:veguaG8tVg1H/JG5RfpoUW41I+O8ClPElo/fTYr8mMk=
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97 h1:JJkDnrAhHvOCttk8z9xeZzcDlzzkRA7+Duxj9cwOyxk=
|
||||
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97/go.mod h1:9jS8HxwsP2fU4ESZ7DZL+fpH/U66EVlVMzdgznH12RM=
|
||||
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14=
|
||||
|
||||
@@ -932,26 +932,6 @@ AND auth_key_id NOT IN (
|
||||
},
|
||||
Rollback: func(db *gorm.DB) error { return nil },
|
||||
},
|
||||
{
|
||||
// Drop all tables that are no longer in use and has existed.
|
||||
// They potentially still present from broken migrations in the past.
|
||||
ID: "202510311551",
|
||||
Migrate: func(tx *gorm.DB) error {
|
||||
for _, oldTable := range []string{"namespaces", "machines", "shared_machines", "kvs", "pre_auth_key_acl_tags", "routes"} {
|
||||
err := tx.Migrator().DropTable(oldTable)
|
||||
if err != nil {
|
||||
log.Trace().Str("table", oldTable).
|
||||
Err(err).
|
||||
Msg("Error dropping old table, continuing...")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Rollback: func(tx *gorm.DB) error {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
// From this point, the following rules must be followed:
|
||||
// - NEVER use gorm.AutoMigrate, write the exact migration steps needed
|
||||
// - AutoMigrate depends on the struct staying exactly the same, which it won't over time.
|
||||
@@ -982,17 +962,7 @@ AND auth_key_id NOT IN (
|
||||
ctx, cancel := context.WithTimeout(context.Background(), contextTimeoutSecs*time.Second)
|
||||
defer cancel()
|
||||
|
||||
opts := squibble.DigestOptions{
|
||||
IgnoreTables: []string{
|
||||
// Litestream tables, these are inserted by
|
||||
// litestream and not part of our schema
|
||||
// https://litestream.io/how-it-works
|
||||
"_litestream_lock",
|
||||
"_litestream_seq",
|
||||
},
|
||||
}
|
||||
|
||||
if err := squibble.Validate(ctx, sqlConn, dbSchema, &opts); err != nil {
|
||||
if err := squibble.Validate(ctx, sqlConn, dbSchema); err != nil {
|
||||
return nil, fmt.Errorf("validating schema: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,7 +27,9 @@ const (
|
||||
NodeGivenNameTrimSize = 2
|
||||
)
|
||||
|
||||
var invalidDNSRegex = regexp.MustCompile("[^a-z0-9-.]+")
|
||||
var (
|
||||
invalidDNSRegex = regexp.MustCompile("[^a-z0-9-.]+")
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNodeNotFound = errors.New("node not found")
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
PRAGMA foreign_keys=OFF;
|
||||
BEGIN TRANSACTION;
|
||||
CREATE TABLE `migrations` (`id` text,PRIMARY KEY (`id`));
|
||||
INSERT INTO migrations VALUES('202312101416');
|
||||
INSERT INTO migrations VALUES('202312101430');
|
||||
INSERT INTO migrations VALUES('202402151347');
|
||||
INSERT INTO migrations VALUES('2024041121742');
|
||||
INSERT INTO migrations VALUES('202406021630');
|
||||
INSERT INTO migrations VALUES('202409271400');
|
||||
INSERT INTO migrations VALUES('202407191627');
|
||||
INSERT INTO migrations VALUES('202408181235');
|
||||
INSERT INTO migrations VALUES('202501221827');
|
||||
INSERT INTO migrations VALUES('202501311657');
|
||||
INSERT INTO migrations VALUES('202502070949');
|
||||
INSERT INTO migrations VALUES('202502131714');
|
||||
INSERT INTO migrations VALUES('202502171819');
|
||||
INSERT INTO migrations VALUES('202505091439');
|
||||
INSERT INTO migrations VALUES('202505141324');
|
||||
CREATE TABLE `users` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`name` text,`display_name` text,`email` text,`provider_identifier` text,`provider` text,`profile_pic_url` text);
|
||||
CREATE TABLE `pre_auth_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`key` text,`user_id` integer,`reusable` numeric,`ephemeral` numeric DEFAULT false,`used` numeric DEFAULT false,`tags` text,`created_at` datetime,`expiration` datetime,CONSTRAINT `fk_pre_auth_keys_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE SET NULL);
|
||||
CREATE TABLE `api_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`prefix` text,`hash` blob,`created_at` datetime,`expiration` datetime,`last_seen` datetime);
|
||||
CREATE TABLE IF NOT EXISTS "nodes" (`id` integer PRIMARY KEY AUTOINCREMENT,`machine_key` text,`node_key` text,`disco_key` text,`endpoints` text,`host_info` text,`ipv4` text,`ipv6` text,`hostname` text,`given_name` varchar(63),`user_id` integer,`register_method` text,`forced_tags` text,`auth_key_id` integer,`expiry` datetime,`last_seen` datetime,`approved_routes` text,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,CONSTRAINT `fk_nodes_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_nodes_auth_key` FOREIGN KEY (`auth_key_id`) REFERENCES `pre_auth_keys`(`id`));
|
||||
CREATE TABLE `policies` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`data` text);
|
||||
DELETE FROM sqlite_sequence;
|
||||
INSERT INTO sqlite_sequence VALUES('nodes',0);
|
||||
CREATE INDEX `idx_users_deleted_at` ON `users`(`deleted_at`);
|
||||
CREATE UNIQUE INDEX `idx_api_keys_prefix` ON `api_keys`(`prefix`);
|
||||
CREATE INDEX `idx_policies_deleted_at` ON `policies`(`deleted_at`);
|
||||
CREATE UNIQUE INDEX idx_provider_identifier ON users (provider_identifier) WHERE provider_identifier IS NOT NULL;
|
||||
CREATE UNIQUE INDEX idx_name_provider_identifier ON users (name,provider_identifier);
|
||||
CREATE UNIQUE INDEX idx_name_no_provider_identifier ON users (name) WHERE provider_identifier IS NULL;
|
||||
|
||||
-- Create all the old tables we have had and ensure they are clean up.
|
||||
CREATE TABLE `namespaces` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `machines` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `kvs` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `shared_machines` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `pre_auth_key_acl_tags` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `routes` (`id` text,PRIMARY KEY (`id`));
|
||||
COMMIT;
|
||||
@@ -1,14 +0,0 @@
|
||||
CREATE TABLE `migrations` (`id` text,PRIMARY KEY (`id`));
|
||||
CREATE TABLE `users` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`name` text,`display_name` text,`email` text,`provider_identifier` text,`provider` text,`profile_pic_url` text);
|
||||
CREATE INDEX `idx_users_deleted_at` ON `users`(`deleted_at`);
|
||||
CREATE TABLE `pre_auth_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`key` text,`user_id` integer,`reusable` numeric,`ephemeral` numeric DEFAULT false,`used` numeric DEFAULT false,`tags` text,`created_at` datetime,`expiration` datetime,CONSTRAINT `fk_pre_auth_keys_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE SET NULL);
|
||||
CREATE TABLE `api_keys` (`id` integer PRIMARY KEY AUTOINCREMENT,`prefix` text,`hash` blob,`created_at` datetime,`expiration` datetime,`last_seen` datetime);
|
||||
CREATE UNIQUE INDEX `idx_api_keys_prefix` ON `api_keys`(`prefix`);
|
||||
CREATE TABLE IF NOT EXISTS "nodes" (`id` integer PRIMARY KEY AUTOINCREMENT,`machine_key` text,`node_key` text,`disco_key` text,`endpoints` text,`host_info` text,`ipv4` text,`ipv6` text,`hostname` text,`given_name` varchar(63),`user_id` integer,`register_method` text,`forced_tags` text,`auth_key_id` integer,`expiry` datetime,`last_seen` datetime,`approved_routes` text,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,CONSTRAINT `fk_nodes_user` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`) ON DELETE CASCADE,CONSTRAINT `fk_nodes_auth_key` FOREIGN KEY (`auth_key_id`) REFERENCES `pre_auth_keys`(`id`));
|
||||
CREATE TABLE `policies` (`id` integer PRIMARY KEY AUTOINCREMENT,`created_at` datetime,`updated_at` datetime,`deleted_at` datetime,`data` text);
|
||||
CREATE INDEX `idx_policies_deleted_at` ON `policies`(`deleted_at`);
|
||||
CREATE UNIQUE INDEX idx_provider_identifier ON users (provider_identifier) WHERE provider_identifier IS NOT NULL;
|
||||
CREATE UNIQUE INDEX idx_name_provider_identifier ON users (name,provider_identifier);
|
||||
CREATE UNIQUE INDEX idx_name_no_provider_identifier ON users (name) WHERE provider_identifier IS NULL;
|
||||
CREATE TABLE _litestream_seq (id INTEGER PRIMARY KEY, seq INTEGER);
|
||||
CREATE TABLE _litestream_lock (id INTEGER);
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -126,7 +127,17 @@ func shuffleDERPMap(dm *tailcfg.DERPMap) {
|
||||
return
|
||||
}
|
||||
|
||||
for id, region := range dm.Regions {
|
||||
// Collect region IDs and sort them to ensure deterministic iteration order.
|
||||
// Map iteration order is non-deterministic in Go, which would cause the
|
||||
// shuffle to be non-deterministic even with a fixed seed.
|
||||
ids := make([]int, 0, len(dm.Regions))
|
||||
for id := range dm.Regions {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
slices.Sort(ids)
|
||||
|
||||
for _, id := range ids {
|
||||
region := dm.Regions[id]
|
||||
if len(region.Nodes) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -83,9 +83,9 @@ func TestShuffleDERPMapDeterministic(t *testing.T) {
|
||||
RegionCode: "sea",
|
||||
RegionName: "Seattle",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "10b", RegionID: 10, HostName: "derp10b.tailscale.com"},
|
||||
{Name: "10c", RegionID: 10, HostName: "derp10c.tailscale.com"},
|
||||
{Name: "10d", RegionID: 10, HostName: "derp10d.tailscale.com"},
|
||||
{Name: "10c", RegionID: 10, HostName: "derp10c.tailscale.com"},
|
||||
{Name: "10b", RegionID: 10, HostName: "derp10b.tailscale.com"},
|
||||
},
|
||||
},
|
||||
2: {
|
||||
@@ -93,9 +93,9 @@ func TestShuffleDERPMapDeterministic(t *testing.T) {
|
||||
RegionCode: "sfo",
|
||||
RegionName: "San Francisco",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "2f", RegionID: 2, HostName: "derp2f.tailscale.com"},
|
||||
{Name: "2e", RegionID: 2, HostName: "derp2e.tailscale.com"},
|
||||
{Name: "2d", RegionID: 2, HostName: "derp2d.tailscale.com"},
|
||||
{Name: "2e", RegionID: 2, HostName: "derp2e.tailscale.com"},
|
||||
{Name: "2f", RegionID: 2, HostName: "derp2f.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -169,6 +169,74 @@ func TestShuffleDERPMapDeterministic(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "same dataset with another base domain",
|
||||
baseDomain: "another.example.com",
|
||||
derpMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
4: {
|
||||
RegionID: 4,
|
||||
RegionCode: "fra",
|
||||
RegionName: "Frankfurt",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"},
|
||||
{Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"},
|
||||
{Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"},
|
||||
{Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
4: {
|
||||
RegionID: 4,
|
||||
RegionCode: "fra",
|
||||
RegionName: "Frankfurt",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"},
|
||||
{Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"},
|
||||
{Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"},
|
||||
{Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "same dataset with yet another base domain",
|
||||
baseDomain: "yetanother.example.com",
|
||||
derpMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
4: {
|
||||
RegionID: 4,
|
||||
RegionCode: "fra",
|
||||
RegionName: "Frankfurt",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"},
|
||||
{Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"},
|
||||
{Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"},
|
||||
{Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
4: {
|
||||
RegionID: 4,
|
||||
RegionCode: "fra",
|
||||
RegionName: "Frankfurt",
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{Name: "4i", RegionID: 4, HostName: "derp4i.tailscale.com"},
|
||||
{Name: "4h", RegionID: 4, HostName: "derp4h.tailscale.com"},
|
||||
{Name: "4f", RegionID: 4, HostName: "derp4f.tailscale.com"},
|
||||
{Name: "4g", RegionID: 4, HostName: "derp4g.tailscale.com"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
||||
@@ -416,12 +416,9 @@ func (api headscaleV1APIServer) ExpireNode(
|
||||
ctx context.Context,
|
||||
request *v1.ExpireNodeRequest,
|
||||
) (*v1.ExpireNodeResponse, error) {
|
||||
expiry := time.Now()
|
||||
if request.GetExpiry() != nil {
|
||||
expiry = request.GetExpiry().AsTime()
|
||||
}
|
||||
now := time.Now()
|
||||
|
||||
node, nodeChange, err := api.h.state.SetNodeExpiry(types.NodeID(request.GetNodeId()), expiry)
|
||||
node, nodeChange, err := api.h.state.SetNodeExpiry(types.NodeID(request.GetNodeId()), now)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -7,55 +7,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/policy/matcher"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/views"
|
||||
"tailscale.com/util/multierr"
|
||||
)
|
||||
|
||||
// canUseExitRoutes checks if a node can access exit routes (0.0.0.0/0 and ::/0)
|
||||
// based on ACL matchers. This specifically checks if the node has permission to
|
||||
// access the internet broadly, which is required to use exit nodes.
|
||||
//
|
||||
// Exit routes should only be visible when the ACL explicitly grants broad internet
|
||||
// access (e.g., via autogroup:internet), not just access to specific services.
|
||||
//
|
||||
// The function tests if the ACL grants access to well-known public DNS servers.
|
||||
// If any of these are accessible, it indicates the ACL grants broad internet access
|
||||
// (as opposed to just specific private services), which is sufficient for exit node usage.
|
||||
func canUseExitRoutes(node types.NodeView, matchers []matcher.Match) bool {
|
||||
src := node.IPs()
|
||||
|
||||
// Sample public internet IPs to test for broad internet access.
|
||||
// If the ACL grants access to any of these well-known public IPs, it indicates
|
||||
// broad internet access (e.g., via autogroup:internet) rather than just access
|
||||
// to specific private services.
|
||||
samplePublicIPs := []netip.Addr{
|
||||
netip.MustParseAddr("1.1.1.1"), // Cloudflare DNS
|
||||
netip.MustParseAddr("8.8.8.8"), // Google DNS
|
||||
netip.MustParseAddr("208.67.222.222"), // OpenDNS
|
||||
}
|
||||
|
||||
// Check if any matcher grants access to sample public IPs
|
||||
for _, matcher := range matchers {
|
||||
// Check if this node is in the source
|
||||
if !matcher.SrcsContainsIPs(src...) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if the destination includes any public internet IPs.
|
||||
// DestsContainsIP returns true if ANY of the provided IPs is in the destination set.
|
||||
// This will be true for autogroup:internet (which resolves to the public internet)
|
||||
// but false for rules that only allow access to specific private IPs or services.
|
||||
if matcher.DestsContainsIP(samplePublicIPs...) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// MapResponseBuilder provides a fluent interface for building tailcfg.MapResponse.
|
||||
type MapResponseBuilder struct {
|
||||
resp *tailcfg.MapResponse
|
||||
@@ -124,14 +81,6 @@ func (b *MapResponseBuilder) WithSelfNode() *MapResponseBuilder {
|
||||
func(id types.NodeID) []netip.Prefix {
|
||||
return policy.ReduceRoutes(nv, b.mapper.state.GetNodePrimaryRoutes(id), matchers)
|
||||
},
|
||||
func(id types.NodeID) []netip.Prefix {
|
||||
// For self node, always include its own exit routes
|
||||
peerNode, ok := b.mapper.state.GetNodeByID(id)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return peerNode.ExitRoutes()
|
||||
},
|
||||
b.mapper.cfg)
|
||||
if err != nil {
|
||||
b.addError(err)
|
||||
@@ -307,22 +256,6 @@ func (b *MapResponseBuilder) buildTailPeers(peers views.Slice[types.NodeView]) (
|
||||
func(id types.NodeID) []netip.Prefix {
|
||||
return policy.ReduceRoutes(node, b.mapper.state.GetNodePrimaryRoutes(id), matchers)
|
||||
},
|
||||
func(id types.NodeID) []netip.Prefix {
|
||||
// For peer nodes, only include exit routes if the requesting node can use exit nodes
|
||||
peerNode, ok := b.mapper.state.GetNodeByID(id)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
exitRoutes := peerNode.ExitRoutes()
|
||||
if len(exitRoutes) == 0 {
|
||||
return nil
|
||||
}
|
||||
// Check if the requesting node has permission to use exit nodes
|
||||
if canUseExitRoutes(node, matchers) {
|
||||
return exitRoutes
|
||||
}
|
||||
return nil
|
||||
},
|
||||
b.mapper.cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -1,336 +0,0 @@
|
||||
package mapper
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"testing"
|
||||
|
||||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/stretchr/testify/require"
|
||||
"tailscale.com/net/tsaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
// TestExitNodeVisibilityWithoutAutogroupInternet tests that exit nodes are not visible
|
||||
// to nodes that don't have autogroup:internet permission in their ACL.
|
||||
// This is a regression test for https://github.com/juanfont/headscale/issues/2788
|
||||
func TestExitNodeVisibilityWithoutAutogroupInternet(t *testing.T) {
|
||||
mustNK := func(str string) key.NodePublic {
|
||||
var k key.NodePublic
|
||||
_ = k.UnmarshalText([]byte(str))
|
||||
return k
|
||||
}
|
||||
|
||||
mustDK := func(str string) key.DiscoPublic {
|
||||
var k key.DiscoPublic
|
||||
_ = k.UnmarshalText([]byte(str))
|
||||
return k
|
||||
}
|
||||
|
||||
mustMK := func(str string) key.MachinePublic {
|
||||
var k key.MachinePublic
|
||||
_ = k.UnmarshalText([]byte(str))
|
||||
return k
|
||||
}
|
||||
|
||||
// Create three nodes: mobile, server, exit
|
||||
mobile := &types.Node{
|
||||
ID: 1,
|
||||
MachineKey: mustMK(
|
||||
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
|
||||
),
|
||||
NodeKey: mustNK(
|
||||
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
|
||||
),
|
||||
DiscoKey: mustDK(
|
||||
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
|
||||
),
|
||||
IPv4: iap("100.64.0.1"),
|
||||
Hostname: "mobile",
|
||||
GivenName: "mobile",
|
||||
UserID: 1,
|
||||
User: types.User{
|
||||
Name: "alice",
|
||||
},
|
||||
}
|
||||
|
||||
server := &types.Node{
|
||||
ID: 2,
|
||||
MachineKey: mustMK(
|
||||
"mkey:e08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422508",
|
||||
),
|
||||
NodeKey: mustNK(
|
||||
"nodekey:8b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306ff",
|
||||
),
|
||||
DiscoKey: mustDK(
|
||||
"discokey:df7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03085",
|
||||
),
|
||||
IPv4: iap("100.64.0.2"),
|
||||
Hostname: "server",
|
||||
GivenName: "server",
|
||||
UserID: 1,
|
||||
User: types.User{
|
||||
Name: "alice",
|
||||
},
|
||||
}
|
||||
|
||||
exitNode := &types.Node{
|
||||
ID: 3,
|
||||
MachineKey: mustMK(
|
||||
"mkey:d08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422509",
|
||||
),
|
||||
NodeKey: mustNK(
|
||||
"nodekey:7b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fd",
|
||||
),
|
||||
DiscoKey: mustDK(
|
||||
"discokey:ef7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03086",
|
||||
),
|
||||
IPv4: iap("100.64.0.3"),
|
||||
Hostname: "exit",
|
||||
GivenName: "exit",
|
||||
UserID: 1,
|
||||
User: types.User{
|
||||
Name: "alice",
|
||||
},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{
|
||||
tsaddr.AllIPv4(),
|
||||
tsaddr.AllIPv6(),
|
||||
},
|
||||
},
|
||||
// Exit node has approved exit routes
|
||||
ApprovedRoutes: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()},
|
||||
}
|
||||
|
||||
// ACL that only allows mobile -> server:80, no autogroup:internet
|
||||
pol := []byte(`{
|
||||
"hosts": {
|
||||
"mobile": "100.64.0.1/32",
|
||||
"server": "100.64.0.2/32",
|
||||
"exit": "100.64.0.3/32"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["mobile"],
|
||||
"dst": ["server:80"]
|
||||
}
|
||||
]
|
||||
}`)
|
||||
|
||||
polMan, err := policy.NewPolicyManager(pol, []types.User{mobile.User}, types.Nodes{mobile, server, exitNode}.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
|
||||
matchers, err := polMan.MatchersForNode(mobile.View())
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &types.Config{
|
||||
BaseDomain: "",
|
||||
RandomizeClientPort: false,
|
||||
}
|
||||
|
||||
// Build the exit node as a peer from mobile's perspective
|
||||
exitTailNode, err := tailNode(
|
||||
exitNode.View(),
|
||||
0,
|
||||
polMan,
|
||||
func(id types.NodeID) []netip.Prefix {
|
||||
// No primary routes for this test
|
||||
return nil
|
||||
},
|
||||
func(id types.NodeID) []netip.Prefix {
|
||||
// For peer nodes, only include exit routes if the requesting node can use exit nodes
|
||||
peerNode := exitNode
|
||||
if id != peerNode.ID {
|
||||
return nil
|
||||
}
|
||||
exitRoutes := peerNode.ExitRoutes()
|
||||
if len(exitRoutes) == 0 {
|
||||
return nil
|
||||
}
|
||||
// Check if the requesting node has permission to use exit nodes
|
||||
if canUseExitRoutes(mobile.View(), matchers) {
|
||||
return exitRoutes
|
||||
}
|
||||
return nil
|
||||
},
|
||||
cfg,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that exit routes are NOT included in AllowedIPs
|
||||
// since mobile doesn't have autogroup:internet permission
|
||||
hasExitRoutes := false
|
||||
for _, prefix := range exitTailNode.AllowedIPs {
|
||||
if tsaddr.IsExitRoute(prefix) {
|
||||
hasExitRoutes = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if hasExitRoutes {
|
||||
t.Errorf("Exit node should NOT have exit routes in AllowedIPs when requesting node lacks autogroup:internet permission.\nAllowedIPs: %v", exitTailNode.AllowedIPs)
|
||||
}
|
||||
|
||||
// The AllowedIPs should only contain the exit node's own IP, not the exit routes
|
||||
// Check the count and that no exit routes are present
|
||||
if len(exitTailNode.AllowedIPs) != 1 {
|
||||
t.Errorf("Expected exactly 1 IP in AllowedIPs (node's own IP), got %d: %v", len(exitTailNode.AllowedIPs), exitTailNode.AllowedIPs)
|
||||
}
|
||||
|
||||
// Verify the one IP is the node's own IP
|
||||
expectedIP := netip.MustParsePrefix("100.64.0.3/32")
|
||||
found := false
|
||||
for _, ip := range exitTailNode.AllowedIPs {
|
||||
if ip == expectedIP {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("Expected to find node's own IP %s in AllowedIPs, got: %v", expectedIP, exitTailNode.AllowedIPs)
|
||||
}
|
||||
}
|
||||
|
||||
// TestExitNodeVisibilityWithAutogroupInternet tests that exit nodes ARE visible
|
||||
// to nodes that have autogroup:internet permission in their ACL.
|
||||
func TestExitNodeVisibilityWithAutogroupInternet(t *testing.T) {
|
||||
mustNK := func(str string) key.NodePublic {
|
||||
var k key.NodePublic
|
||||
_ = k.UnmarshalText([]byte(str))
|
||||
return k
|
||||
}
|
||||
|
||||
mustDK := func(str string) key.DiscoPublic {
|
||||
var k key.DiscoPublic
|
||||
_ = k.UnmarshalText([]byte(str))
|
||||
return k
|
||||
}
|
||||
|
||||
mustMK := func(str string) key.MachinePublic {
|
||||
var k key.MachinePublic
|
||||
_ = k.UnmarshalText([]byte(str))
|
||||
return k
|
||||
}
|
||||
|
||||
mobile := &types.Node{
|
||||
ID: 1,
|
||||
MachineKey: mustMK(
|
||||
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
|
||||
),
|
||||
NodeKey: mustNK(
|
||||
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
|
||||
),
|
||||
DiscoKey: mustDK(
|
||||
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
|
||||
),
|
||||
IPv4: iap("100.64.0.1"),
|
||||
Hostname: "mobile",
|
||||
GivenName: "mobile",
|
||||
UserID: 1,
|
||||
User: types.User{
|
||||
Name: "alice",
|
||||
},
|
||||
}
|
||||
|
||||
exitNode := &types.Node{
|
||||
ID: 3,
|
||||
MachineKey: mustMK(
|
||||
"mkey:d08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422509",
|
||||
),
|
||||
NodeKey: mustNK(
|
||||
"nodekey:7b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fd",
|
||||
),
|
||||
DiscoKey: mustDK(
|
||||
"discokey:ef7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03086",
|
||||
),
|
||||
IPv4: iap("100.64.0.3"),
|
||||
Hostname: "exit",
|
||||
GivenName: "exit",
|
||||
UserID: 1,
|
||||
User: types.User{
|
||||
Name: "alice",
|
||||
},
|
||||
Hostinfo: &tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{
|
||||
tsaddr.AllIPv4(),
|
||||
tsaddr.AllIPv6(),
|
||||
},
|
||||
},
|
||||
ApprovedRoutes: []netip.Prefix{tsaddr.AllIPv4(), tsaddr.AllIPv6()},
|
||||
}
|
||||
|
||||
// ACL that allows mobile to use autogroup:internet
|
||||
pol := []byte(`{
|
||||
"hosts": {
|
||||
"mobile": "100.64.0.1/32",
|
||||
"exit": "100.64.0.3/32"
|
||||
},
|
||||
"acls": [
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["mobile"],
|
||||
"dst": ["autogroup:internet:*"]
|
||||
}
|
||||
]
|
||||
}`)
|
||||
|
||||
polMan, err := policy.NewPolicyManager(pol, []types.User{mobile.User}, types.Nodes{mobile, exitNode}.ViewSlice())
|
||||
require.NoError(t, err)
|
||||
|
||||
matchers, err := polMan.MatchersForNode(mobile.View())
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := &types.Config{
|
||||
BaseDomain: "",
|
||||
RandomizeClientPort: false,
|
||||
}
|
||||
|
||||
// Build the exit node as a peer from mobile's perspective
|
||||
exitTailNode, err := tailNode(
|
||||
exitNode.View(),
|
||||
0,
|
||||
polMan,
|
||||
func(id types.NodeID) []netip.Prefix {
|
||||
return nil
|
||||
},
|
||||
func(id types.NodeID) []netip.Prefix {
|
||||
peerNode := exitNode
|
||||
if id != peerNode.ID {
|
||||
return nil
|
||||
}
|
||||
exitRoutes := peerNode.ExitRoutes()
|
||||
if len(exitRoutes) == 0 {
|
||||
return nil
|
||||
}
|
||||
// Check if the requesting node has permission to use exit nodes - mobile has autogroup:internet permission
|
||||
if canUseExitRoutes(mobile.View(), matchers) {
|
||||
return exitRoutes
|
||||
}
|
||||
return nil
|
||||
},
|
||||
cfg,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that exit routes ARE included in AllowedIPs
|
||||
hasIPv4ExitRoute := false
|
||||
hasIPv6ExitRoute := false
|
||||
for _, prefix := range exitTailNode.AllowedIPs {
|
||||
if prefix == tsaddr.AllIPv4() {
|
||||
hasIPv4ExitRoute = true
|
||||
}
|
||||
if prefix == tsaddr.AllIPv6() {
|
||||
hasIPv6ExitRoute = true
|
||||
}
|
||||
}
|
||||
|
||||
if !hasIPv4ExitRoute {
|
||||
t.Errorf("Exit node should have IPv4 exit route (0.0.0.0/0) in AllowedIPs when requesting node has autogroup:internet permission.\nAllowedIPs: %v", exitTailNode.AllowedIPs)
|
||||
}
|
||||
|
||||
if !hasIPv6ExitRoute {
|
||||
t.Errorf("Exit node should have IPv6 exit route (::/0) in AllowedIPs when requesting node has autogroup:internet permission.\nAllowedIPs: %v", exitTailNode.AllowedIPs)
|
||||
}
|
||||
}
|
||||
@@ -21,7 +21,6 @@ func tailNodes(
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
checker NodeCanHaveTagChecker,
|
||||
primaryRouteFunc routeFilterFunc,
|
||||
exitRouteFunc routeFilterFunc,
|
||||
cfg *types.Config,
|
||||
) ([]*tailcfg.Node, error) {
|
||||
tNodes := make([]*tailcfg.Node, 0, nodes.Len())
|
||||
@@ -32,7 +31,6 @@ func tailNodes(
|
||||
capVer,
|
||||
checker,
|
||||
primaryRouteFunc,
|
||||
exitRouteFunc,
|
||||
cfg,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -51,7 +49,6 @@ func tailNode(
|
||||
capVer tailcfg.CapabilityVersion,
|
||||
checker NodeCanHaveTagChecker,
|
||||
primaryRouteFunc routeFilterFunc,
|
||||
exitRouteFunc routeFilterFunc,
|
||||
cfg *types.Config,
|
||||
) (*tailcfg.Node, error) {
|
||||
addrs := node.Prefixes()
|
||||
@@ -93,10 +90,7 @@ func tailNode(
|
||||
|
||||
routes := primaryRouteFunc(node.ID())
|
||||
allowed := append(addrs, routes...)
|
||||
|
||||
// Only include exit routes if the exitRouteFunc allows them
|
||||
exitRoutes := exitRouteFunc(node.ID())
|
||||
allowed = append(allowed, exitRoutes...)
|
||||
allowed = append(allowed, node.ExitRoutes()...)
|
||||
tsaddr.SortPrefixes(allowed)
|
||||
|
||||
tNode := tailcfg.Node{
|
||||
|
||||
@@ -221,13 +221,6 @@ func TestTailNode(t *testing.T) {
|
||||
func(id types.NodeID) []netip.Prefix {
|
||||
return primary.PrimaryRoutes(id)
|
||||
},
|
||||
func(id types.NodeID) []netip.Prefix {
|
||||
// For tests, include exit routes if node has them
|
||||
if id == tt.node.ID {
|
||||
return tt.node.ExitRoutes()
|
||||
}
|
||||
return nil
|
||||
},
|
||||
cfg,
|
||||
)
|
||||
|
||||
@@ -288,9 +281,6 @@ func TestNodeExpiry(t *testing.T) {
|
||||
func(id types.NodeID) []netip.Prefix {
|
||||
return []netip.Prefix{}
|
||||
},
|
||||
func(id types.NodeID) []netip.Prefix {
|
||||
return []netip.Prefix{}
|
||||
},
|
||||
&types.Config{},
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -91,10 +91,3 @@ func (m *Match) SrcsOverlapsPrefixes(prefixes ...netip.Prefix) bool {
|
||||
func (m *Match) DestsOverlapsPrefixes(prefixes ...netip.Prefix) bool {
|
||||
return slices.ContainsFunc(prefixes, m.dests.OverlapsPrefix)
|
||||
}
|
||||
|
||||
// DestsContainsPrefixes checks if the destination IPSet contains any of the given prefixes.
|
||||
// Returns true if at least one prefix is fully contained in the destination IPSet.
|
||||
// This is more strict than DestsOverlapsPrefixes which only requires overlap.
|
||||
func (m *Match) DestsContainsPrefixes(prefixes ...netip.Prefix) bool {
|
||||
return slices.ContainsFunc(prefixes, m.dests.ContainsPrefix)
|
||||
}
|
||||
|
||||
@@ -819,104 +819,6 @@ func TestExpireNode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestSetNodeExpiryInFuture tests setting arbitrary expiration date
|
||||
// New expiration date should be stored in the db and propagated to all peers
|
||||
func TestSetNodeExpiryInFuture(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: len(MustTestVersions),
|
||||
Users: []string{"user1"},
|
||||
}
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
require.NoError(t, err)
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
err = scenario.CreateHeadscaleEnv([]tsic.Option{}, hsic.WithTestName("expirenodefuture"))
|
||||
requireNoErrHeadscaleEnv(t, err)
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
requireNoErrListClients(t, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
requireNoErrSync(t, err)
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
require.NoError(t, err)
|
||||
|
||||
targetExpiry := time.Now().Add(2 * time.Hour).Round(time.Second).UTC()
|
||||
|
||||
result, err := headscale.Execute(
|
||||
[]string{
|
||||
"headscale", "nodes", "expire",
|
||||
"--identifier", "1",
|
||||
"--output", "json",
|
||||
"--expiry", targetExpiry.Format(time.RFC3339),
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
var node v1.Node
|
||||
err = json.Unmarshal([]byte(result), &node)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.True(t, node.GetExpiry().AsTime().After(time.Now()))
|
||||
require.WithinDuration(t, targetExpiry, node.GetExpiry().AsTime(), 2*time.Second)
|
||||
|
||||
var nodeKey key.NodePublic
|
||||
err = nodeKey.UnmarshalText([]byte(node.GetNodeKey()))
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, client := range allClients {
|
||||
if client.Hostname() == node.GetName() {
|
||||
continue
|
||||
}
|
||||
|
||||
assert.EventuallyWithT(
|
||||
t, func(ct *assert.CollectT) {
|
||||
status, err := client.Status()
|
||||
assert.NoError(ct, err)
|
||||
|
||||
peerStatus, ok := status.Peer[nodeKey]
|
||||
assert.True(ct, ok, "node key should be present in peer list")
|
||||
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
assert.NotNil(ct, peerStatus.KeyExpiry)
|
||||
assert.NotNil(ct, peerStatus.Expired)
|
||||
|
||||
if peerStatus.KeyExpiry != nil {
|
||||
assert.WithinDuration(
|
||||
ct,
|
||||
targetExpiry,
|
||||
*peerStatus.KeyExpiry,
|
||||
5*time.Second,
|
||||
"node %q should have key expiry near the requested future time",
|
||||
peerStatus.HostName,
|
||||
)
|
||||
|
||||
assert.Truef(
|
||||
ct,
|
||||
peerStatus.KeyExpiry.After(time.Now()),
|
||||
"node %q should have a key expiry timestamp in the future",
|
||||
peerStatus.HostName,
|
||||
)
|
||||
}
|
||||
|
||||
assert.Falsef(
|
||||
ct,
|
||||
peerStatus.Expired,
|
||||
"node %q should not be marked as expired",
|
||||
peerStatus.HostName,
|
||||
)
|
||||
}, 3*time.Minute, 5*time.Second, "Waiting for future expiry to propagate",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeOnlineStatus(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
|
||||
|
||||
@@ -82,10 +82,7 @@ message DeleteNodeRequest { uint64 node_id = 1; }
|
||||
|
||||
message DeleteNodeResponse {}
|
||||
|
||||
message ExpireNodeRequest {
|
||||
uint64 node_id = 1;
|
||||
google.protobuf.Timestamp expiry = 2;
|
||||
}
|
||||
message ExpireNodeRequest { uint64 node_id = 1; }
|
||||
|
||||
message ExpireNodeResponse { Node node = 1; }
|
||||
|
||||
|
||||
Reference in New Issue
Block a user