Compare commits

..

283 Commits

Author SHA1 Message Date
Kristoffer Dalby
0bbf343348 Merge pull request #113 from kradalby/apple-mobileconfig
Apple macOS profile support
2021-09-26 21:34:11 +01:00
Kristoffer Dalby
9811809f6a Resolve conflict 2021-09-26 20:51:07 +01:00
Kristoffer Dalby
237a14858a Add apple endpoint to readme 2021-09-26 20:47:39 +01:00
Kristoffer Dalby
59c3d4bcfe Comment out iOS from /apple for now 2021-09-26 20:41:48 +01:00
Juan Font
7612cc84d2 Merge pull request #122 from juanfont/taildrop-support
Add support for Taildrop (file sharing)
2021-09-26 20:40:26 +02:00
Kristoffer Dalby
4aa91bc420 Merge branch 'main' into taildrop-support 2021-09-26 19:29:00 +01:00
Juan Font Alonso
c801a8c553 Improve comments on taildrop tests 2021-09-26 20:23:15 +02:00
Juan Font Alonso
5626f598ce Do several attempts to send the file 2021-09-26 18:59:23 +02:00
Juan Font Alonso
7d0da8b578 Added retries 2021-09-26 17:38:51 +02:00
Juan Font Alonso
eb87fc9215 Fixed getAPIURLs method 2021-09-26 15:17:27 +02:00
Juan Font Alonso
ada40960bd Removed unnecesary prints 2021-09-26 14:33:01 +02:00
Juan Font Alonso
83ead36fce Integration tests working for taildrop 2021-09-26 14:22:11 +02:00
Juan Font
05a5f21c3d Use curl to uploaded the file 2021-09-26 12:22:59 +02:00
Juan Font
a36328dbfc Added integration tests 2021-09-25 13:12:44 +02:00
Juan Font
cab5641d95 Update readme 2021-09-24 09:50:01 +02:00
Juan Font
b83894abd6 Add support for taildrop (#118) 2021-09-24 09:49:29 +02:00
Kristoffer Dalby
8e588ae146 Add a more comprehensive macOS explaination 2021-09-23 20:22:07 +01:00
Juan Font
83815f567d Merge pull request #109 from juanfont/tailscale-1.14
Update to Tailscale 1.14
2021-09-23 16:44:51 +02:00
Kristoffer Dalby
7db91c68be Merge pull request #121 from juanfont/main
New integration test for tailscale 1.14
2021-09-23 14:51:26 +01:00
Juan Font
109115c13b Merge pull request #120 from t56k/main
Fix namespace instructions in README.md
2021-09-23 09:55:57 +02:00
t56k
11e0402396 create a db file first 2021-09-23 17:14:04 +12:00
t56k
fd94105483 fix namespace instructions in README.md 2021-09-23 16:32:15 +12:00
Juan Font
96e8142540 Merge pull request #114 from kradalby/integration-tests-improvement
Improve integration tests
2021-09-22 14:51:47 +02:00
Kristoffer Dalby
9615138202 Remove non working default 2021-09-21 11:10:26 +01:00
Juan Font
9900b215cc Merge pull request #115 from ohdearaugustin/topic/fix-docu
Topic/fix docu
2021-09-21 09:19:09 +02:00
Kristoffer Dalby
d5ea224e11 Merge pull request #116 from ohdearaugustin/topic/docker-release-tag 2021-09-20 23:34:44 +01:00
Kristoffer Dalby
024d6ee7c3 Initial work on shared node integration test
This commit adds initial integration tests for shared nodes, it adds
them and verifies that they are shared.

It does not yet manage to ping the shared node because it does not seem
to establish the connection.
2021-09-20 23:21:25 +01:00
ohdearaugustin
f653b00258 workflows/release: add docker full version tag 2021-09-21 00:10:00 +02:00
ohdearaugustin
ff1ee4ca64 Fix README whitespace 2021-09-21 00:02:54 +02:00
ohdearaugustin
830aa250e1 Fix README formating 2021-09-21 00:01:18 +02:00
ohdearaugustin
f0bbc3c7d8 Fix docker docu 2021-09-20 23:53:52 +02:00
Kristoffer Dalby
994b4eef17 Use JSON output and proper datamodel for tailscale status
This commit uses tailscale --json to check status, and unmarshal it into
the proper ipn Status.
2021-09-20 22:53:34 +01:00
Kristoffer Dalby
f905812afa Test two namespaces
This expands the tests to verify two namespaces instead of only one.

It verifies some of the isolation, and is prework for shared nodes
testing
2021-09-20 20:18:28 +01:00
Kristoffer Dalby
d68d201722 Add version support to integration support
This commit adds a list of tailscale versions to use in the integration
test. An equal distribution of versions will be used across the clients.
2021-09-20 19:23:18 +01:00
Juan Font
da209e89a7 Update README.md
Fixed typo (causing #110)
2021-09-20 18:42:04 +02:00
Juan Font
7940dbc78b Merge pull request #111 from woudsma/main
fix typo
2021-09-20 18:37:52 +02:00
Juan Font
4d22b4252f Merge pull request #108 from ohdearaugustin/topic/docker-image-version
Dockerfile: add golang tag
2021-09-20 18:37:22 +02:00
Kristoffer Dalby
b3efd1e47b Handle errors 2021-09-20 07:54:18 +01:00
Kristoffer Dalby
2d39d6602c Merge remote-tracking branch 'upstream/main' into apple-mobileconfig 2021-09-19 18:00:40 +01:00
Kristoffer Dalby
dfcab2b6d5 Wire up new handlers 2021-09-19 17:56:29 +01:00
Kristoffer Dalby
40c5263927 Add initial code for generating Apple profiles
This code adds new http handlers that will generate iOS and macOS
configuration profiles allowing us to override the Control server of the
official Tailscale.app.

Currently, macOS is working, as I have not found the correct "key" to
inject for iOS.

This means that a profile will allow users to no longer log in via the
command line, but they can use the app.
2021-09-19 17:54:41 +01:00
Kristoffer Dalby
bf26e37e0e Merge pull request #112 from fkr/main 2021-09-19 11:19:35 +01:00
Felix Kronlage-Dammers
e154e7a0fb fix typo, it is 'relayed' not 'relied' 2021-09-19 12:07:17 +02:00
Tjerk Woudsma
b28ebb5d20 fix typo 2021-09-18 12:34:04 +02:00
Juan Font
5840f88251 Update tailscale dependencies to v1.14 2021-09-14 23:46:16 +02:00
Juan Font
2c2968473a Update basic dependencies 2021-09-14 23:42:19 +02:00
Juan Font
8f1f48b7d0 Update README.md
Remove Google registry for the time being
2021-09-13 23:11:15 +02:00
Juan Font
536e8b71bf Removed wrong syntax in actions 2021-09-13 22:59:33 +02:00
Juan Font
acc43c39af Increased linter timeout in makefile 2021-09-13 22:58:35 +02:00
Juan Font
eae1b6a3de More timeout in linting 2021-09-13 22:51:58 +02:00
Juan Font
31cc61478f More timeout in linting 2021-09-13 22:47:38 +02:00
Juan Font
3095c1e150 Trying to correct Actions issues 2021-09-13 22:45:31 +02:00
Juan Font
e1d5da5bd9 Merge pull request #107 from qbit/no_color_trace
Remove trace lines about NO_COLOR.
2021-09-13 22:38:37 +02:00
Juan Font
5f818b7349 Merge pull request #89 from ohdearaugustin/topic/docker-release
Topic/docker release
2021-09-13 22:37:33 +02:00
ohdearaugustin
0aac79f8fa Dockerfile: add golang tag 2021-09-13 20:03:03 +02:00
ohdearaugustin
1e93347a26 Merge branch 'main' into topic/docker-release 2021-09-12 18:18:34 +02:00
ohdearaugustin
18867a4c84 update docu 2021-09-12 18:08:43 +02:00
ohdearaugustin
3b97c7bdec gitignore: add jetbrains 2021-09-12 18:08:43 +02:00
Aaron Bieber
203e6bc6b2 Remove trace lines about NO_COLOR. 2021-09-12 07:30:35 -06:00
Juan Font
e27753e46e Merge pull request #103 from juanfont/shared-nodes
Add support for sharing nodes across namespaces
2021-09-11 23:31:37 +02:00
Juan Font
11fbef4bf0 Added extra timeout 2021-09-11 23:21:45 +02:00
Juan Font
c4e6ad1ec7 Fixed some typos 2021-09-10 00:52:08 +02:00
Juan Font
263a3f1983 Merge branch 'main' into shared-nodes 2021-09-10 00:49:50 +02:00
Juan Font
8acaea0fbe Increased timeout 2021-09-10 00:44:27 +02:00
Juan Font
bd6adfaec6 Changes a few more variables 2021-09-10 00:37:01 +02:00
Juan Font
4b4a5a4b93 Update sharing.go
Co-authored-by: Kristoffer Dalby <kradalby@kradalby.no>
2021-09-10 00:32:42 +02:00
Juan Font
b098d84557 Apply suggestions from code review
Changed more variable names

Co-authored-by: Kristoffer Dalby <kradalby@kradalby.no>
2021-09-10 00:32:06 +02:00
Juan Font
b937f9b762 Update machine.go
Added comment on toNode
2021-09-10 00:30:02 +02:00
Juan Font
55f3e07bd4 Apply suggestions from code review
Removed one letter variables

Co-authored-by: Kristoffer Dalby <kradalby@kradalby.no>
2021-09-10 00:26:46 +02:00
Juan Font
2780623076 Renamed SharedNode to SharedMachine 2021-09-06 14:43:43 +02:00
Juan Font
75a342f96e Renamed files 2021-09-06 14:40:37 +02:00
Juan Font
729cd54401 Renamed sharing function 2021-09-06 14:39:52 +02:00
Juan Font
a023f51971 Merge pull request #101 from SilverBut/main
fix: check last seen time without possible null pointer
2021-09-03 10:35:49 +02:00
Juan Font
5076eb9215 Merge pull request #102 from SilverBut/patch-1
docs: add notes on how to build own DERP server
2021-09-03 10:24:32 +02:00
Juan Font
7edd0cd14c Added add node cli 2021-09-03 10:23:45 +02:00
Juan Font
7ce4738d8a Preload namespace so the name can be shown 2021-09-03 10:23:26 +02:00
Juan Font
7287e0259c Minor linting issues 2021-09-02 17:08:39 +02:00
Juan Font
d86de68b40 Show namespace in node list table 2021-09-02 17:06:47 +02:00
Juan Font
4ba107a765 README updated 2021-09-02 17:00:46 +02:00
Juan Font
187b016d09 Added helper function to get list of shared nodes 2021-09-02 16:59:50 +02:00
Juan Font
7010f5afad Added unit tests on sharing nodes 2021-09-02 16:59:12 +02:00
Juan Font
48b73fa12f Implement node sharing functionality 2021-09-02 16:59:03 +02:00
Juan Font
1ecd0d7ca4 Added DB SharedNode model to support sharing nodes 2021-09-02 16:57:26 +02:00
Silver Bullet
6faaae0c5f docs: add notes on how to build own DERP server
The official doc is hidden under a bunch of issues. Add a doc link here and hope it could be helpful.
2021-09-02 06:08:12 +08:00
Silver Bullet
e4ef65be76 fix: check last seen time without possible null pointer 2021-09-02 05:44:42 +08:00
Juan Font
39c661d408 Merge pull request #99 from juanfont/explicit-ubuntu-version
Use explicit version in Dockerfile
2021-08-26 21:18:16 +02:00
Juan Font
91a48d6a43 Update Dockerfile
Use explicit version in Dockerfile (addresses #95)
2021-08-26 10:23:45 +02:00
Kristoffer Dalby
123f0fa185 Merge pull request #98 from kradalby/initial-dns-server-exit-node 2021-08-25 22:58:25 +01:00
Kristoffer Dalby
ba3dffecbf Update readme 2021-08-25 19:05:10 +01:00
Kristoffer Dalby
8735e5675c Add a test for the getdnsconfig function 2021-08-25 19:03:04 +01:00
Kristoffer Dalby
3f5e06a0f8 Dont add the portnumber to the ip 2021-08-25 18:43:13 +01:00
Juan Font
ba40a40b73 Merge pull request #96 from qbit/version_fix
Fix setting of version
2021-08-25 12:34:34 +02:00
Kristoffer Dalby
b3732e7fb9 Add nameserver as resolver aswell 2021-08-25 07:04:48 +01:00
Aaron Bieber
104776ee84 fix setting of version 2021-08-24 07:49:15 -06:00
Kristoffer Dalby
01e781e546 Pass DNSConfig to nodes in MapResponse 2021-08-24 07:11:45 +01:00
Kristoffer Dalby
e77c16b55a Add DNSConfig to example and setup test 2021-08-24 07:10:09 +01:00
Kristoffer Dalby
987bbee1db Add DNSConfig field to configuration 2021-08-24 07:09:47 +01:00
Juan Font
74d2fe1baa Merge pull request #84 from kradalby/integration-tests-ci
Improve logic to keep nodes up to date with the network state
2021-08-23 09:42:07 +02:00
Kristoffer Dalby
98e63d5561 Merge pull request #94 from kradalby/split-lint-test
Split lint and test CI files
2021-08-23 07:46:11 +01:00
Kristoffer Dalby
059f13fc9d Add missing comment for stream function 2021-08-23 07:38:14 +01:00
Kristoffer Dalby
ebd27b46af Add comment to updatemachine 2021-08-23 07:35:44 +01:00
Juan Font
ca8d814918 Merge pull request #92 from kradalby/enhance-route-command
Enhance route command with ptables and multiple routes
2021-08-22 12:48:30 +02:00
Kristoffer Dalby
0aeeaac361 Always load machine object from DB before save/modify
We are currently holding Machine objects in memory for a long time,
while waiting for stream/longpoll, this might make us end up with stale
objects, that we just call save on, potentially overwriting stuff in
the database.

A typical scenario would be someone changing something from the CLI,
e.g. enabling routes, which in turn is overwritten again by the stale
object in the longpolling function.

The code has been left with TODO's and a discussion is available in #93.
2021-08-21 16:52:19 +01:00
Kristoffer Dalby
28ed8a5742 Actually rename lint 2021-08-21 15:42:23 +01:00
Kristoffer Dalby
f749be1490 Split lint and test CI files
This commit splits the lint and test steps into two different jobs in
github actions.

Consider this a suggestion, the idea is that when we look at PRs we will
see explicitly which one of the two types of checks fails without having
to open Github actions.
2021-08-21 15:40:27 +01:00
Kristoffer Dalby
693bce1b10 Update test machine name properly 2021-08-21 15:35:26 +01:00
Kristoffer Dalby
4f97e077db Add --all flag to routes enable command to enable all advertised routes 2021-08-21 15:04:30 +01:00
Kristoffer Dalby
c883e79884 Enhance route command with ptables and multiple routes
This commit rewrites the `routes list` command to use ptables to present
a slightly nicer list, including a new field if the route is enabled or
not (which is quite useful).

In addition, it reworks the enable command to support enabling multiple
routes (not only one route as per removed TODO). This allows users to
actually take advantage of exit-nodes and subnet relays.
2021-08-21 14:49:46 +01:00
ohdearaugustin
a613501ff2 Update .github/workflows/release.yml
Fix typo

Co-authored-by: Kristoffer Dalby <kradalby@kradalby.no>
2021-08-21 11:17:21 +02:00
Kristoffer Dalby
a054e2514a Keep tailscale count at 25 in integration tests 2021-08-21 09:26:18 +01:00
Kristoffer Dalby
c49fe26da7 Code clean up, loglevel debug for integration tests 2021-08-21 09:15:16 +01:00
ohdearaugustin
75afdc6306 github/workflows: remove version tag 2021-08-20 20:10:34 +02:00
ohdearaugustin
f02beaf075 github/workflows: add checkout 2021-08-20 19:45:01 +02:00
ohdearaugustin
8bcc7e88f0 github/workflows: add dispatch 2021-08-20 19:37:15 +02:00
ohdearaugustin
0adbd720bf github/workflows: add docker release 2021-08-20 19:15:20 +02:00
Kristoffer Dalby
d93a7f2e02 Make Info default log level 2021-08-20 17:15:07 +01:00
Kristoffer Dalby
88d7ac04bf Account for racecondition in deleting/closing update channel
This commit tries to address the possible raceondition  that can happen
if a client closes its connection after we have fetched it from the
syncmap before sending the message.

To try to avoid introducing new dead lock conditions, all messages sent
to updateChannel has been moved into a function, which handles the
locking (instead of calling it all over the place)

The same lock is used around the delete/close function.
2021-08-20 16:52:34 +01:00
Kristoffer Dalby
1f422af1c8 Save headscale logs if jobs fail 2021-08-20 16:50:55 +01:00
Kristoffer Dalby
53168d54d8 Make http timeout 30s instead of 10s 2021-08-19 22:29:03 +01:00
Kristoffer Dalby
b0ec945dbb Make lastStateChange namespaced 2021-08-19 18:19:26 +01:00
Kristoffer Dalby
48ef6e5a6f Rename keepAlive function, as it now does more things 2021-08-19 18:06:57 +01:00
Kristoffer Dalby
8d1adaaef3 Move isOutdated logic to updateChan consumation 2021-08-19 18:05:33 +01:00
Kristoffer Dalby
dd8c0d1e9e Move most "poll" functionality to poll.go
This function migrates more poll functions (including keepalive) to
poll.go to keep it somehow in the same file.

In addition it makes changes to improve the stability and ensure nodes
get the appropriate updates from the headscale control and are not left
in an inconsistent state.

Two new additions is:

omitpeers=true will now trigger an update if the clients are not already up
to date

keepalive has been extended with a timer that will check every 120s if
all nodes are up to date.
2021-08-18 23:24:22 +01:00
Kristoffer Dalby
57b79aa852 Set timeout, add lastupdate field
This commit makes two reasonably major changes:

Set a default timeout for the go HTTP server (which gin uses), which
allows us to actually have broken long poll sessions fail so we can have
the client re-establish them.
The current 10s number is chosen randomly and we need more testing to
ensure that the feature work as intended.

The second is adding a last updated field to keep track of the last time
we had an update that needs to be propagated to all of our
clients/nodes. This will be used to keep track of our machines and if
they are up to date or need us to push an update.
2021-08-18 23:21:11 +01:00
Kristoffer Dalby
2f883410d2 Add lastUpdate field to machine, function issue message on update channel
This commit adds a new field to machine, lastSuccessfulUpdate which
tracks when we last was able to send a proper mapupdate to the node. The
purpose of this is to be able to compare to a "global" last updated time
and determine if we need to send an update map request to a node.

In addition it allows us to create a scheduled check to see if all known
nodes are up to date.

Also, add a helper function to send a message to the update channel of a
machine.
2021-08-18 23:17:38 +01:00
Kristoffer Dalby
6fa61380b2 Up client count, make arguments more explicit and clean up unused assignments 2021-08-18 23:17:09 +01:00
Juan Font
47b61c0cea Merge pull request #86 from juanfont/better-ui
Improve tables in CLI
2021-08-16 09:33:47 +02:00
Juan Font
d739ac830f Merge pull request #87 from juanfont/fix-route-notify
Send notifications when enabling a route
2021-08-16 09:25:31 +02:00
Juan Font
26024fedc7 Merge branch 'main' into fix-route-notify 2021-08-16 00:29:38 +02:00
Juan Font
a376b697c0 Send notifications when enabling a route 2021-08-16 00:17:26 +02:00
Juan Font
bc2574680d Linting 2021-08-15 23:35:03 +02:00
Juan Font
f194b41435 Better table in preauthkeys 2021-08-15 23:29:55 +02:00
Juan Font
350f7da55d Better table in namespaces 2021-08-15 23:20:38 +02:00
Juan Font
36f5f78f46 pterm dependency 2021-08-15 23:10:50 +02:00
Juan Font
55fe5b0b41 Use pterm table in node list 2021-08-15 23:10:39 +02:00
Kristoffer Dalby
7d1a5c00a0 Try with longer timeout 2021-08-13 16:56:28 +01:00
Kristoffer Dalby
036061664e initial integration test file 2021-08-13 16:12:01 +01:00
Kristoffer Dalby
5b1b40ce93 Merge pull request #83 from kradalby/more-integration-tests
Improve reliability of PollMapHandler, more integration tests
2021-08-13 16:05:32 +01:00
Kristoffer Dalby
a8d9fdce3c Uncomment ping test 2021-08-13 11:01:23 +01:00
Kristoffer Dalby
700382cba4 Split stream part of pollhandlermap into its own func 2021-08-13 10:33:50 +01:00
Kristoffer Dalby
9698abbfd5 Resolve merge conflict 2021-08-13 10:33:19 +01:00
Juan Font
5bfcf5c917 Merge pull request #82 from juanfont/really-expire-ephemeral
Also notify peers when deleting ephemerals
2021-08-12 22:05:53 +02:00
Juan Font
8eb7d47072 Fixed linting 2021-08-12 21:57:20 +02:00
Juan Font
ab61c87701 Also notify peers when deleting ephemerals 2021-08-12 21:53:37 +02:00
Juan Font
c1e6157847 Expire ephemeral is internal 2021-08-12 21:45:40 +02:00
Juan Font
4c849539fc Expire the ephemeral nodes in the Serve method 2021-08-12 21:44:12 +02:00
Juan Font
9c2a630055 Merge pull request #81 from kradalby/integration-tests
Add Integration tests
2021-08-12 11:15:45 +02:00
Kristoffer Dalby
0e1ddf9715 Set longer timeout for integration tests 2021-08-12 07:36:38 +01:00
Kristoffer Dalby
54da1a4155 Commit the correct integration etc files 2021-08-12 07:05:26 +01:00
Kristoffer Dalby
7141e2ed70 Fix hostname passed to join command 2021-08-11 17:12:39 +01:00
Kristoffer Dalby
c9e5048015 Merge remote-tracking branch 'upstream/main' into integration-tests 2021-08-08 17:57:28 +01:00
Kristoffer Dalby
4e077b053c Initial work, add integration tests
This commit adds integration tests to headscale. They are currently
quite simple, but it lays the groundwork for more comprehensive testing
and ensuring we dont break things with the official tailscale client.

The test works by leveraging Docker (via dockertest) to spin up a
Headscale container, and a number of tailscale containers (10).

Each tailscale container is joined to the headscale and then "passed on"
to the tests.

Currently three tests have been implemented:

- Have all tailscale containers join headscale (in the setup process)
- Get IP from each container (I plan to extend this with cross-ping)
- List nodes with headscales CLI and verify all has been registered

This test depends on Docker, and currently, I have not looked into
hooking it into Github Actions.
2021-08-08 17:50:32 +01:00
Kristoffer Dalby
f973aef80c Add Dockerfile to build tailscale docker image for integration tests 2021-08-08 17:43:06 +01:00
Kristoffer Dalby
a43bb1bb40 Improve Dockerfile
This commit makes several changes to the dockerfile:

- Add go.mod and go.sum in a seperate stage, subsequently calling `go
  mod download` to make it cache dependencies and speed up builds
- Use ubuntu:latest (28MB larger) instead of scratch, makes the image a
  lot easier to debug (e.g. it has a shell and a package manager)
- Change ENTRYPOINT to CMD, this makes the behaviour of the image
  slightly different from a CLI perspective, but makes interacting with
  the image from code, docker-compose and kubernetes easier.
2021-08-08 17:39:39 +01:00
Kristoffer Dalby
d86123195c Add a dockerignore file to speed up builds and make cachine better 2021-08-08 17:38:44 +01:00
Kristoffer Dalby
91ffd10192 Remove "Keys: " from create auth key output
This is based on the premis that "the user know what command they
executed" and therefor know that the output is the key.

This makes the command a lot more useful in scripts.
2021-08-08 17:37:23 +01:00
Kristoffer Dalby
642c7824a7 Add trace log for machine failing to parce ip in toNode 2021-08-08 17:37:04 +01:00
Kristoffer Dalby
149279f3d5 Add health endpoint
Allow us to tell when the server is up and running and can answer
requests
2021-08-08 17:36:25 +01:00
Juan Font
275214920f Merge pull request #80 from juanfont/delete-pak
Add CLI command to mark preauthkeys as expired
2021-08-08 10:52:18 +02:00
Juan Font
0124899759 fixed linting x 2 2021-08-08 00:14:10 +02:00
Juan Font
033136cb9a fixed linting 2021-08-08 00:13:44 +02:00
Juan Font
05e08e0ac7 Added cmd to expire preauth keys (requested in #78) 2021-08-08 00:10:30 +02:00
Juan Font
226cb89d97 Added func to expire PAKs 2021-08-07 23:57:52 +02:00
Juan Font
3007c0ec4f Merge pull request #79 from felixonmars/patch-1
Correct a typo in routes.go
2021-08-07 20:02:16 +02:00
Felix Yan
3fa1ac9c79 Correct a typo in routes.go 2021-08-08 01:52:01 +08:00
Juan Font
bb2ccfddd9 Merge pull request #77 from kradalby/deadlierlocks
Remove more deadlocks
2021-08-07 01:05:01 +02:00
Kristoffer Dalby
99fd126219 Remove unused mutex 2021-08-06 21:11:38 +01:00
Kristoffer Dalby
15b8c8f4c5 Remove lock from keepAlive 2021-08-06 20:08:51 +01:00
Kristoffer Dalby
4243885246 Rewrite old lock error msg 2021-08-06 20:03:25 +01:00
Kristoffer Dalby
5bc5c5dc1b Remove forgotten lock 2021-08-06 20:02:47 +01:00
Juan Font
db4f49901e Merge pull request #76 from kradalby/no-color-logs
Try to detect color support, make color configurable
2021-08-06 08:40:54 +02:00
Kristoffer Dalby
73a00c89ff Try to detect color support, make color configurable
This commit tries to detect if users can render colors in their terminal
and only enables color logs if that is true.

It also adds no-color.org's NO_COLOR env var support to allow it to be
disabled.
2021-08-06 07:29:57 +01:00
Juan Font
8a614dabc0 Headscale is from no-juan 2021-08-06 00:23:07 +02:00
Juan Font
c95cf15731 Fixed log message 2021-08-06 00:21:34 +02:00
Juan Font
e7ce902f9d Merge pull request #75 from kradalby/syncmap
Fix deadlock issue
2021-08-06 00:19:34 +02:00
Juan Font
d421c7b665 Merge pull request #74 from kradalby/deadlock-logging
Switch to a structured logger
2021-08-06 00:18:40 +02:00
Kristoffer Dalby
1abc68ccf4 Removes locks causing deadlock
This commit removes most of the locks in the PollingMap handler as there
was combinations that caused deadlocks. Instead of doing a plain map and
doing the locking ourselves, we use sync.Map which handles it for us.
2021-08-05 22:14:37 +01:00
Kristoffer Dalby
575b15e5fa Add more trace logging 2021-08-05 21:47:06 +01:00
Kristoffer Dalby
a8c8a358d0 Make log keys lowercase 2021-08-05 20:57:47 +01:00
Kristoffer Dalby
cd2ca137c0 Make log_level user configurable 2021-08-05 19:19:25 +01:00
Kristoffer Dalby
0660867a16 Correct url 2021-08-05 18:58:15 +01:00
Kristoffer Dalby
b1200140b8 Convert cli/utils.go 2021-08-05 18:26:49 +01:00
Kristoffer Dalby
d10b57b317 Convert namespaces.go 2021-08-05 18:23:02 +01:00
Kristoffer Dalby
42bf566fff Convert acls.go 2021-08-05 18:18:18 +01:00
Kristoffer Dalby
0bb2fabc6c Convert missing from api.go 2021-08-05 18:16:21 +01:00
Kristoffer Dalby
ee704f8ef3 Initial port to zerologger 2021-08-05 18:11:26 +01:00
Juan Font
4aad3b7933 Improved README.md on ip_prefix 2021-08-03 20:38:23 +02:00
Juan Font
6091373b53 Merge pull request #63 from juanfont/use-kv-for-updates
Added communication between Serve and CLI using KV table
2021-08-03 20:30:33 +02:00
Juan Font
3879120967 Merge pull request #72 from kradalby/ip-pool
Make IP Prefix configurable and available ip deterministic
2021-08-03 20:27:42 +02:00
Kristoffer Dalby
465669f650 Merge pull request #1 from kradalby/ip-pool-test
Fix empty ip issue and remove network/broadcast addresses
2021-08-03 10:12:09 +01:00
Kristoffer Dalby
ea615e3a26 Do not issue "network" or "broadcast" addresses (0 or 255) 2021-08-03 10:06:42 +01:00
Kristoffer Dalby
d3349aa4d1 Add test to ensure we can deal with empty ips from database 2021-08-03 09:26:28 +01:00
Kristoffer Dalby
73207decfd Check that IP is set before parsing
Machine is saved to db before it is assigned an ip, so we might have
empty ip fields coming back.
2021-08-03 07:42:11 +01:00
Kristoffer Dalby
eda6e560c3 debug logging 2021-08-02 22:51:50 +01:00
Kristoffer Dalby
95de823b72 Add test to ensure we can read back ips 2021-08-02 22:39:18 +01:00
Kristoffer Dalby
9f85efffd5 Update readme 2021-08-02 22:06:15 +01:00
Kristoffer Dalby
b5841c8a8b Rework getAvailableIp
This commit reworks getAvailableIp with a "simpler" version that will
look for the first available IP address in our IP Prefix.

There is a couple of ideas behind this:

* Make the host IPs reasonably predictable and in within similar
  subnets, which should simplify ACLs for subnets
* The code is not random, but deterministic so we can have tests
* The code is a bit more understandable (no bit shift magic)
2021-08-02 21:57:45 +01:00
Kristoffer Dalby
309f868a21 Make IP prefix configurable
This commit makes the IP prefix used to generate addresses configurable
to users. This can be useful if you would like to use a smaller range or
if your current setup is overlapping with the current range.

The current range is left as a default
2021-08-02 20:06:26 +01:00
Juan Font
6c903d4a2f Fixed missing nodes cmd 2021-07-31 23:14:24 +02:00
Juan Font
c3aa9a5d4c Merge pull request #69 from juanfont/change-default-port
Use 8080 as default port in the example config
2021-07-31 11:47:15 +02:00
Juan Font Alonso
4fb55e1684 Use 8080 as default port, like in the Kubernetes yamls 2021-07-30 17:07:19 +02:00
Juan Font Alonso
91bfb481c1 Fix identation 2021-07-30 16:42:26 +02:00
Juan Font
201ba109c3 Merge pull request #62 from ohdearaugustin/topic/refactor-config
Topic/refactor config
2021-07-30 16:40:38 +02:00
Juan Font
d3f965d493 Merge pull request #66 from juanfont/remove-old-docker
Remove old docker code
2021-07-28 13:43:58 +02:00
Juan Font
f832d7325b Merge pull request #67 from kradalby/patch-1
Fix typo in example
2021-07-27 19:58:15 +02:00
Kristoffer Dalby
b1d1bd32c3 Fix typo in example
The example command is missing the `s` in `preauthkeys`
2021-07-27 18:37:43 +01:00
Juan Font Alonso
df6d4de6fd Remove old docker code 2021-07-27 17:05:22 +02:00
Juan Font Alonso
461a893ee4 Added log message when sending updates 2021-07-25 20:47:51 +02:00
Juan Font Alonso
97f7c90092 Added communication between Serve and CLI using KV table (helps in #52) 2021-07-25 17:59:48 +02:00
ohdearaugustin
ea3043cdcb cmd: Add error check for Persistent Flags 2021-07-25 16:26:15 +02:00
ohdearaugustin
04dffcc4ae Refactor cli commands 2021-07-25 15:14:09 +02:00
ohdearaugustin
3a07360b6e Add root cmd 2021-07-25 15:10:34 +02:00
ohdearaugustin
b97d6f71b1 Refactor version cmd 2021-07-25 15:09:53 +02:00
ohdearaugustin
4915902e04 Refactor server cmd 2021-07-25 15:09:33 +02:00
ohdearaugustin
d87a4c87cc Refactor routes cmd 2021-07-25 15:08:40 +02:00
ohdearaugustin
e56755fd67 Refactor preauthkeys cmd 2021-07-25 15:07:27 +02:00
ohdearaugustin
2862c2034b Refactor nodes cmd 2021-07-25 15:04:06 +02:00
ohdearaugustin
53185eaa9e Refactor namespaces cmd 2021-07-25 15:03:45 +02:00
Juan Font
b83ecc3e6e Merge pull request #61 from ohdearaugustin/topic/refactor-cli-versionCmd
Refactor cmdVersion to cli package
2021-07-25 12:00:06 +02:00
Juan Font
04fdd94201 Merge pull request #60 from cure/tls-more-readme-changes
Add some more detail to the README about the different Let's Encrypt
2021-07-25 11:38:31 +02:00
ohdearaugustin
48ec51d166 Refactor cmdVersion to cli package 2021-07-25 02:02:05 +02:00
Ward Vandewege
3260362436 Add some more detail to the README about the different Let's Encrypt
validation methods.
2021-07-24 09:20:38 -04:00
Ward Vandewege
5f60671d12 Merge pull request #59 from qbit/tls_letsencrypt_listen
Add a 'tls_letsencrypt_listen' config option
2021-07-24 09:03:04 -04:00
Aaron Bieber
69d77f6e9d Add a 'tls_letsencrypt_listen' config option
Currently the default (and non-configurable) Let's Encrypt listener will
bind to all IPs. This isn't ideal if we want to run headscale on a specific
IP only.

This also allows for one to set the listener to something other than
port 80. This is useful for OSs like OpenBSD which only allow root to
bind the lower port ranges (and don't have `setcap`) as we can now run
`headscale` as a non-privileged user while still using the baked in ACME
magic. Obviously this configuration would also require a reverse proxy
or firewall rule to redirect traffic. I attempted to outline that in the
README change.
2021-07-23 16:12:01 -06:00
Juan Font
1af9c11bdd Merge pull request #54 from juanfont/delete-nodes
Implement node deletion
2021-07-19 16:18:09 +02:00
Juan Font Alonso
57c115e60a Fix linting error: 2021-07-17 11:17:42 +02:00
Juan Font Alonso
96b4d2f391 Mark the machine as unregistered before soft delete 2021-07-17 11:12:24 +02:00
Juan Font Alonso
0f649aae8b Ask for confirmation before deleting 2021-07-17 11:09:42 +02:00
Juan Font
f491db232b Merge pull request #55 from cure/letsencrypt-more-flexible-config
Turn the combination of TLS-ALPN-01 and listen_addr on a port other than
2021-07-17 11:01:08 +02:00
Ward Vandewege
9a24340bd4 Turn the combination of TLS-ALPN-01 and listen_addr on a port other than
443 into a warning, not an error, refs #53.
2021-07-16 22:02:05 -04:00
Juan Font Alonso
39b756cf55 Fixed linting 2021-07-17 00:29:14 +02:00
Juan Font Alonso
9ca2ae7fc5 Implemented delete nodes (#52) 2021-07-17 00:23:12 +02:00
Juan Font Alonso
f3139d26c8 Added methods to delete nodes 2021-07-17 00:14:22 +02:00
Juan Font
6f20a1fc68 Merge pull request #51 from tianon/typo
Fix minor typo
2021-07-16 18:04:46 +02:00
Tianon Gravi
243b961cbe Fix minor typo
> Error: unknown command "namespace" for "headscale"
2021-07-16 15:07:13 +00:00
Juan Font Alonso
5748744134 Use ubuntu 18.04 as build env 2021-07-12 17:04:28 +02:00
Juan Font
31556e1ac0 Merge pull request #48 from juanfont/better-profile-info
Improving namespace/user support
2021-07-11 16:44:16 +02:00
Juan Font Alonso
0159649d0a Send the namespace name as user to the clients 2021-07-11 16:39:19 +02:00
Juan Font Alonso
cf9d920e4a Minor typo 2021-07-11 15:10:37 +02:00
Juan Font Alonso
7d46dfe012 Only load ACLs if a path is present 2021-07-11 15:10:11 +02:00
Juan Font Alonso
eabb1ce881 Fix minor typo on the register webpage 2021-07-11 15:05:32 +02:00
Juan Font Alonso
db20985b06 Show N/A in reusable when key is ephemeral 2021-07-11 13:14:25 +02:00
Juan Font Alonso
29b80e3ca1 Fix debug mode enabled by default in db 2021-07-11 13:13:36 +02:00
Juan Font Alonso
a16a763283 Update README.md with info on ACLs 2021-07-11 13:04:33 +02:00
Juan Font
ad7f03c9dd Merge pull request #47 from juanfont/handle-ephemeral-reconnect
Added HTTP responses on map errors
2021-07-11 11:41:23 +02:00
Juan Font Alonso
bff3d2d613 Added HTTP responses on errors 2021-07-11 11:37:17 +02:00
Juan Font
f66c283756 Merge pull request #46 from Teteros/update-derp-servers
Update DERP server definitions
2021-07-10 23:29:54 +02:00
Teteros
ad454d95b9 Update DERP server definitions 2021-07-10 09:00:35 +01:00
Juan Font
e67a98b758 Merge pull request #44 from juanfont/acls
Add support for Policy ACLs
2021-07-07 16:19:45 +02:00
Juan Font Alonso
ecf258f995 Use gorm connection pool 2021-07-04 21:56:13 +02:00
Juan Font Alonso
d4b27fd54b Merge branch 'main' into acls 2021-07-04 21:54:55 +02:00
Juan Font
90e9ad9a0e Merge pull request #45 from juanfont/reuse-gorm-connection
Use gorm connection pool
2021-07-04 21:51:43 +02:00
Juan Font Alonso
ff9d99b9ea Use gorm connection pool 2021-07-04 21:40:46 +02:00
Juan Font
7590dee1f2 Removed unnecessary prints 2021-07-04 13:47:59 +02:00
Juan Font
315bc6b677 Added acl path key in example config 2021-07-04 13:41:38 +02:00
Juan Font
a1b8f77b1b Fixed tests 2021-07-04 13:40:45 +02:00
Juan Font
19443669bf Fixed linting issues 2021-07-04 13:33:00 +02:00
Juan Font
d446e8a2fb More stuff in go.sum 2021-07-04 13:24:27 +02:00
Juan Font
202d6b506f Load ACL policy on headscale startup 2021-07-04 13:24:05 +02:00
Juan Font
401e6aec32 And more tests 2021-07-04 13:23:31 +02:00
Juan Font
bd86975d10 Added missing go.mod 2021-07-04 13:10:15 +02:00
Juan Font
d0e970f21d Added more unit tests 2021-07-04 13:01:41 +02:00
Juan Font
07e95393b3 Rule generation kinda working, missing tests 2021-07-04 12:35:18 +02:00
Juan Font
136aab9dc8 Work in progress in rule generation 2021-07-03 17:31:32 +02:00
Juan Font
bbd6a67c46 Added more acl test hujsons 2021-07-03 17:31:08 +02:00
Juan Font
31ea67bcaf Minor addenda to README.md 2021-07-03 16:10:22 +02:00
Juan Font
5644dadaf9 Added dependency on hujson 2021-07-03 12:02:46 +02:00
Juan Font
874aa4277d Minor changes in the README.md 2021-07-03 12:01:19 +02:00
Juan Font
b161a92e58 Initial work on ACLs 2021-07-03 11:55:32 +02:00
Juan Font
95fee5aa6f Merge pull request #43 from juanfont/use-plurals-for-cmds
Change all commands to plural words
2021-06-29 23:38:03 +02:00
Juan Font Alonso
f5b8a3f710 Make all commands a plural word 2021-06-28 20:04:05 +02:00
Juan Font
ba87ade9c5 Merge pull request #42 from juanfont/tailscale-1.8.x
Update Headscale to Tailscale 1.10
2021-06-26 18:36:46 +02:00
Juan Font Alonso
aa27709e60 Update code to Tailscale 1.10 2021-06-25 18:57:08 +02:00
Juan Font Alonso
736182f651 Update dependencies, including Tailscale 1.10.x 2021-06-25 18:56:49 +02:00
Juan Font
c4aa9d8aed Merge pull request #41 from juanfont/gorm2
Migrate to GORM 2.0
2021-06-25 10:00:13 +02:00
Juan Font Alonso
d8e0b16512 Do not apply the FK migrations on startup 2021-06-24 23:05:26 +02:00
Juan Font Alonso
d67be9ef58 go.mod updates 2021-06-24 15:49:27 +02:00
Juan Font Alonso
69ba750b38 Update Headscale to depend on gorm v2 2021-06-24 15:44:19 +02:00
Juan Font
df0d214faf Merge pull request #38 from cmars/k8s
Add k8s deployment, standalone app Dockerfile.
2021-06-21 21:18:41 +02:00
Juan Font
73186eeb2f Merge pull request #40 from cmars/upstream-fix-nodes-nil-lastseen
Fix nil dereference in nodes list command.
2021-06-20 11:12:10 +02:00
Casey Marshall
fdcd3bb574 Fix nil dereference in nodes list command.
Fixes a nil pointer dereference observed when listing nodes that have
not yet connected.

```
panic: runtime error: invalid memory address or nil pointer dereference
[signal SIGSEGV: segmentation violation code=0x1 addr=0x0 pc=0xb931a4]

goroutine 1 [running]:
github.com/juanfont/headscale/cmd/headscale/cli.glob..func8(0x13c93e0, 0xc0004c4220, 0x0, 0x2)
	/go/src/headscale/cmd/headscale/cli/nodes.go:74 +0x364
github.com/spf13/cobra.(*Command).execute(0x13c93e0, 0xc0004c41e0, 0x2, 0x2, 0x13c93e0, 0xc0004c41e0)
	/go/pkg/mod/github.com/spf13/cobra@v1.1.3/command.go:856 +0x2c2
github.com/spf13/cobra.(*Command).ExecuteC(0x13ca2e0, 0xc000497110, 0xe76416, 0x6)
	/go/pkg/mod/github.com/spf13/cobra@v1.1.3/command.go:960 +0x375
github.com/spf13/cobra.(*Command).Execute(...)
	/go/pkg/mod/github.com/spf13/cobra@v1.1.3/command.go:897
main.main()
	/go/src/headscale/cmd/headscale/headscale.go:89 +0x805
command terminated with exit code 2
```
2021-06-19 18:20:27 -05:00
Casey Marshall
c64d756ea7 Add k8s deployment, standalone app Dockerfile.
Tested with Rancher k3s. See k8s/README.md for site configuration and
deployment instructions.

Add cert-manager, tls, remote headscale script.
2021-06-18 12:45:21 -05:00
Juan Font
a63fb6b007 Update README.me on how to clear tailscaled data (#37) 2021-06-17 14:22:38 +02:00
89 changed files with 6355 additions and 1239 deletions

17
.dockerignore Normal file
View File

@@ -0,0 +1,17 @@
// integration tests are not needed in docker
// ignoring it let us speed up the integration test
// development
integration_test.go
integration_test/
Dockerfile*
docker-compose*
.dockerignore
.goreleaser.yml
.git
.github
.gitignore
README.md
LICENSE
.vscode

39
.github/workflows/lint.yml vendored Normal file
View File

@@ -0,0 +1,39 @@
name: CI
on: [push, pull_request]
jobs:
# The "build" workflow
lint:
# The type of runner that the job will run on
runs-on: ubuntu-latest
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
# Install and run golangci-lint as a separate step, it's much faster this
# way because this action has caching. It'll get run again in `make lint`
# below, but it's still much faster in the end than installing
# golangci-lint manually in the `Run lint` step.
- uses: golangci/golangci-lint-action@v2
with:
args: --timeout 5m
# Setup Go
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: "1.16.3" # The Go version to download (if necessary) and use.
# Install all the dependencies
- name: Install dependencies
run: |
go version
go install golang.org/x/lint/golint@latest
sudo apt update
sudo apt install -y make
- name: Run lint
run: make lint

View File

@@ -1,13 +1,15 @@
name: goreleaser
---
name: release
on:
push:
tags:
- "*" # triggers only if push new tag version
- "*" # triggers only if push new tag version
workflow_dispatch:
jobs:
goreleaser:
runs-on: ubuntu-latest
runs-on: ubuntu-18.04 # due to CGO we need to user an older version
steps:
-
name: Checkout
@@ -27,4 +29,49 @@ jobs:
version: latest
args: release --rm-dist
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
docker-release:
runs-on: ubuntu-latest
steps:
-
name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 0
-
name: Docker meta
id: meta
uses: docker/metadata-action@v3
with:
# list of Docker images to use as base name for tags
images: |
${{ secrets.DOCKERHUB_USERNAME }}/headscale
ghcr.io/${{ github.repository_owner }}/headscale
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=sha
-
name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Login to GHCR
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
-
name: Build and push
id: docker_build
uses: docker/build-push-action@v2
with:
push: true
context: .
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}

23
.github/workflows/test-integration.yml vendored Normal file
View File

@@ -0,0 +1,23 @@
name: CI
on: [pull_request]
jobs:
# The "build" workflow
integration-test:
# The type of runner that the job will run on
runs-on: ubuntu-latest
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
# Setup Go
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: "1.16.3"
- name: Run Integration tests
run: go test -tags integration -timeout 30m

View File

@@ -10,36 +10,24 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v2
# Install and run golangci-lint as a separate step, it's much faster this
# way because this action has caching. It'll get run again in `make lint`
# below, but it's still much faster in the end than installing
# golangci-lint manually in the `Run lint` step.
- uses: golangci/golangci-lint-action@v2
with:
args: --timeout 2m
# Setup Go
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: '1.16.3' # The Go version to download (if necessary) and use.
# Setup Go
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: "1.16.3" # The Go version to download (if necessary) and use.
# Install all the dependencies
- name: Install dependencies
run: |
go version
go install golang.org/x/lint/golint@latest
sudo apt update
sudo apt install -y make
- name: Run tests
run: make test
# Install all the dependencies
- name: Install dependencies
run: |
go version
sudo apt update
sudo apt install -y make
- name: Run lint
run: make lint
- name: Run tests
run: make test
- name: Run build
run: make
- name: Run build
run: make

6
.gitignore vendored
View File

@@ -18,3 +18,9 @@
config.json
*.key
/db.sqlite
*.sqlite3
# Exclude Jetbrains Editors
.idea
test_output/

View File

@@ -62,8 +62,7 @@ archives:
- linux-armhf
- linux-amd64
name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
format: zip
# wrap_in_directory: true
format: binary
checksum:
name_template: 'checksums.txt'

19
Dockerfile Normal file
View File

@@ -0,0 +1,19 @@
FROM golang:1.17.1-bullseye AS build
ENV GOPATH /go
COPY go.mod go.sum /go/src/headscale/
WORKDIR /go/src/headscale
RUN go mod download
COPY . /go/src/headscale
RUN go install -a -ldflags="-extldflags=-static" -tags netgo,sqlite_omit_load_extension ./cmd/headscale
RUN test -e /go/bin/headscale
FROM ubuntu:20.04
COPY --from=build /go/bin/headscale /usr/local/bin/headscale
ENV TZ UTC
EXPOSE 8080/tcp
CMD ["headscale"]

11
Dockerfile.tailscale Normal file
View File

@@ -0,0 +1,11 @@
FROM ubuntu:latest
ARG TAILSCALE_VERSION
RUN apt-get update \
&& apt-get install -y gnupg curl \
&& curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.gpg | apt-key add - \
&& curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.list | tee /etc/apt/sources.list.d/tailscale.list \
&& apt-get update \
&& apt-get install -y tailscale=${TAILSCALE_VERSION} \
&& rm -rf /var/lib/apt/lists/*

View File

@@ -2,13 +2,16 @@
version = $(shell ./scripts/version-at-commit.sh)
build:
go build -ldflags "-s -w -X main.version=$(version)" cmd/headscale/headscale.go
go build -ldflags "-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.version=$(version)" cmd/headscale/headscale.go
dev: lint test build
test:
@go test -coverprofile=coverage.out ./...
test_integration:
go test -tags integration -timeout 30m ./...
coverprofile_func:
go tool cover -func=coverage.out
@@ -17,7 +20,7 @@ coverprofile_html:
lint:
golint
golangci-lint run
golangci-lint run --timeout 5m
compress: build
upx --brute headscale

207
README.md
View File

@@ -2,7 +2,7 @@
[![Join the chat at https://gitter.im/headscale-dev/community](https://badges.gitter.im/headscale-dev/community.svg)](https://gitter.im/headscale-dev/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) ![ci](https://github.com/juanfont/headscale/actions/workflows/test.yml/badge.svg)
An open source implementation of the Tailscale coordination server.
An open source, self-hosted implementation of the Tailscale coordination server.
## Overview
@@ -10,85 +10,147 @@ Tailscale is [a modern VPN](https://tailscale.com/) built on top of [Wireguard](
Everything in Tailscale is Open Source, except the GUI clients for proprietary OS (Windows and macOS/iOS), and the 'coordination/control server'.
The control server works as an exchange point of cryptographic public keys for the nodes in the Tailscale network. It also assigns the IP addresses of the clients, creates the boundaries between each user, enables sharing machines between users, and exposes the advertised routes of your nodes.
The control server works as an exchange point of Wireguard public keys for the nodes in the Tailscale network. It also assigns the IP addresses of the clients, creates the boundaries between each user, enables sharing machines between users, and exposes the advertised routes of your nodes.
Headscale implements this coordination server.
## Status
- [x] Basic functionality (nodes can communicate with each other)
- [x] Base functionality (nodes can communicate with each other)
- [x] Node registration through the web flow
- [x] Network changes are relied to the nodes
- [x] ~~Multiuser~~ Namespace support
- [x] Basic routing (advertise & accept)
- [ ] Share nodes between ~~users~~ namespaces
- [x] Node registration via pre-auth keys (including reusable keys and ephemeral node support)
- [X] JSON-formatted output
- [ ] ACLs
- [ ] DNS
... and probably lots of stuff missing
- [x] Network changes are relayed to the nodes
- [x] Namespace support (~equivalent to multi-user in Tailscale.com)
- [x] Routing (advertise & accept, including exit nodes)
- [x] Node registration via pre-auth keys (including reusable keys, and ephemeral node support)
- [x] JSON-formatted output
- [x] ACLs
- [x] Taildrop (File Sharing)
- [x] Support for alternative IP ranges in the tailnets (default Tailscale's 100.64.0.0/10)
- [x] DNS (passing DNS servers to nodes)
- [x] Share nodes between ~~users~~ namespaces
- [ ] MagicDNS / Smart DNS
## Roadmap 🤷
Basic multiuser support (multinamespace, actually) is now implemented. No node sharing or ACLs between namespaces yet though...
Suggestions/PRs welcomed!
## Running it
1. Download the Headscale binary https://github.com/juanfont/headscale/releases, and place it somewhere in your PATH
1. Download the Headscale binary https://github.com/juanfont/headscale/releases, and place it somewhere in your PATH or use the docker container
```shell
docker pull headscale/headscale:x.x.x
```
<!--
or
```shell
docker pull ghrc.io/juanfont/headscale:x.x.x
``` -->
2. (Optional, you can also use SQLite) Get yourself a PostgreSQL DB running
```shell
docker run --name headscale -e POSTGRES_DB=headscale -e \
POSTGRES_USER=foo -e POSTGRES_PASSWORD=bar -p 5432:5432 -d postgres
```
```shell
docker run --name headscale -e POSTGRES_DB=headscale -e \
POSTGRES_USER=foo -e POSTGRES_PASSWORD=bar -p 5432:5432 -d postgres
```
3. Set some stuff up (headscale Wireguard keys & the config.json file)
```shell
wg genkey > private.key
wg pubkey < private.key > public.key # not needed
# Postgres
cp config.json.postgres.example config.json
# or
# SQLite
cp config.json.sqlite.example config.json
```
```shell
wg genkey > private.key
wg pubkey < private.key > public.key # not needed
# Postgres
cp config.json.postgres.example config.json
# or
# SQLite
cp config.json.sqlite.example config.json
```
4. Create a namespace (a namespace is a 'tailnet', a group of Tailscale nodes that can talk to each other)
```shell
headscale namespace create myfirstnamespace
```
```shell
headscale namespaces create myfirstnamespace
```
or docker:
the db.sqlite mount is only needed if you use sqlite
```shell
touch db.sqlite
docker run -v $(pwd)/private.key:/private.key -v $(pwd)/config.json:/config.json -v $(pwd)/derp.yaml:/derp.yaml -v $(pwd)/db.sqlite:/db.sqlite -p 127.0.0.1:8000:8000 headscale/headscale:x.x.x headscale namespaces create myfirstnamespace
```
or if your server is already running in docker:
```shell
docker exec <container_name> headscale create myfirstnamespace
```
5. Run the server
```shell
headscale serve
```
6. Add your first machine
```shell
tailscale up -login-server YOUR_HEADSCALE_URL
```
```shell
headscale serve
```
7. Navigate to the URL you will get with `tailscale up`, where you'll find your machine key.
or docker:
8. In the server, register your machine to a namespace with the CLI
```shell
headscale -n myfirstnamespace node register YOURMACHINEKEY
```
the db.sqlite mount is only needed if you use sqlite
```shell
docker run -v $(pwd)/private.key:/private.key -v $(pwd)/config.json:/config.json -v $(pwd)/derp.yaml:/derp.yaml -v $(pwd)/db.sqlite:/db.sqlite -p 127.0.0.1:8000:8000 headscale/headscale:x.x.x headscale serve
```
6. If you used tailscale.com before in your nodes, make sure you clear the tailscald data folder
```shell
systemctl stop tailscaled
rm -fr /var/lib/tailscale
systemctl start tailscaled
```
7. Add your first machine
```shell
tailscale up -login-server YOUR_HEADSCALE_URL
```
8. Navigate to the URL you will get with `tailscale up`, where you'll find your machine key.
9. In the server, register your machine to a namespace with the CLI
```shell
headscale -n myfirstnamespace node register YOURMACHINEKEY
```
or docker:
```shell
docker run -v $(pwd)/private.key:/private.key -v $(pwd)/config.json:/config.json -v $(pwd)/derp.yaml:/derp.yaml headscale/headscale:x.x.x headscale -n myfirstnamespace node register YOURMACHINEKEY
```
or if your server is already running in docker:
```shell
docker exec <container_name> headscale -n myfistnamespace node register YOURMACHINEKEY
```
Alternatively, you can use Auth Keys to register your machines:
1. Create an authkey
```shell
headscale -n myfirstnamespace preauthkey create --reusable --expiration 24h
```
```shell
headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h
```
or docker:
```shell
docker run -v $(pwd)/private.key:/private.key -v $(pwd)/config.json:/config.json -v$(pwd)/derp.yaml:/derp.yaml -v $(pwd)/db.sqlite:/db.sqlite headscale/headscale:x.x.x headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h
```
or if your server is already running in docker:
```shell
docker exec <container_name> headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h
```
2. Use the authkey from your machine to register it
```shell
@@ -97,19 +159,25 @@ Alternatively, you can use Auth Keys to register your machines:
If you create an authkey with the `--ephemeral` flag, that key will create ephemeral nodes. This implies that `--reusable` is true.
Please bear in mind that all the commands from headscale support adding `-o json` or `-o json-line` to get a nicely JSON-formatted output.
Please bear in mind that all the commands from headscale support adding `-o json` or `-o json-line` to get a nicely JSON-formatted output.
## Configuration reference
Headscale's configuration file is named `config.json` or `config.yaml`. Headscale will look for it in `/etc/headscale`, `~/.headscale` and finally the directory from where the Headscale binary is executed.
```
"server_url": "http://192.168.1.12:8000",
"listen_addr": "0.0.0.0:8000",
"server_url": "http://192.168.1.12:8080",
"listen_addr": "0.0.0.0:8080",
"ip_prefix": "100.64.0.0/10"
```
`server_url` is the external URL via which Headscale is reachable. `listen_addr` is the IP address and port the Headscale program should listen on.
`server_url` is the external URL via which Headscale is reachable. `listen_addr` is the IP address and port the Headscale program should listen on. `ip_prefix` is the IP prefix (range) in which IP addresses for nodes will be allocated (default 100.64.0.0/10, e.g., 192.168.4.0/24, 10.0.0.0/8)
```
"log_level": "debug"
```
`log_level` can be used to set the Log level for Headscale, it defaults to `debug`, and the available levels are: `trace`, `debug`, `info`, `warn` and `error`.
```
"private_key_path": "private.key",
@@ -150,22 +218,43 @@ Headscale can be configured to expose its web service via TLS. To configure the
```
"tls_letsencrypt_hostname": "",
"tls_letsencrypt_listen": ":http",
"tls_letsencrypt_cache_dir": ".cache",
"tls_letsencrypt_challenge_type": "HTTP-01",
```
To get a certificate automatically via [Let's Encrypt](https://letsencrypt.org/), set `tls_letsencrypt_hostname` to the desired certificate hostname. This name must resolve to the IP address(es) Headscale is reachable on (i.e., it must correspond to the `server_url` configuration parameter). The certificate and Let's Encrypt account credentials will be stored in the directory configured in `tls_letsencrypt_cache_dir`. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from. The certificate will automatically be renewed as needed. The default challenge type HTTP-01 requires that Headscale listens on port 80 for the Let's Encrypt automated validation, in addition to whatever port is configured in `listen_addr`. Alternatively, `tls_letsencrypt_challenge_type` can be set to `TLS-ALPN-01`. In this configuration, Headscale must be reachable via port 443, but port 80 is not required.
To get a certificate automatically via [Let's Encrypt](https://letsencrypt.org/), set `tls_letsencrypt_hostname` to the desired certificate hostname. This name must resolve to the IP address(es) Headscale is reachable on (i.e., it must correspond to the `server_url` configuration parameter). The certificate and Let's Encrypt account credentials will be stored in the directory configured in `tls_letsencrypt_cache_dir`. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from. The certificate will automatically be renewed as needed.
#### Challenge type HTTP-01
The default challenge type `HTTP-01` requires that Headscale is reachable on port 80 for the Let's Encrypt automated validation, in addition to whatever port is configured in `listen_addr`. By default, Headscale listens on port 80 on all local IPs for Let's Encrypt automated validation.
If you need to change the ip and/or port used by Headscale for the Let's Encrypt validation process, set `tls_letsencrypt_listen` to the appropriate value. This can be handy if you are running Headscale as a non-root user (or can't run `setcap`). Keep in mind, however, that Let's Encrypt will _only_ connect to port 80 for the validation callback, so if you change `tls_letsencrypt_listen` you will also need to configure something else (e.g. a firewall rule) to forward the traffic from port 80 to the ip:port combination specified in `tls_letsencrypt_listen`.
#### Challenge type TLS-ALPN-01
Alternatively, `tls_letsencrypt_challenge_type` can be set to `TLS-ALPN-01`. In this configuration, Headscale listens on the ip:port combination defined in `listen_addr`. Let's Encrypt will _only_ connect to port 443 for the validation callback, so if `listen_addr` is not set to port 443, something else (e.g. a firewall rule) will be required to forward the traffic from port 443 to the ip:port combination specified in `listen_addr`.
### Policy ACLs
Headscale implements the same policy ACLs as Tailscale.com, adapted to the self-hosted environment.
For instance, instead of referring to users when defining groups you must
use namespaces (which are the equivalent to user/logins in Tailscale.com).
Please check https://tailscale.com/kb/1018/acls/, and `./tests/acls/` in this repo for working examples.
### Apple devices
An endpoint with information on how to connect your Apple devices (currently macOS only) is available at `/apple` on your running instance.
## Disclaimer
1. We have nothing to do with Tailscale, or Tailscale Inc.
2. The purpose of writing this was to learn how Tailscale works.
## More on Tailscale
- https://tailscale.com/blog/how-tailscale-works/
- https://tailscale.com/blog/tailscale-key-management/
- https://tailscale.com/blog/an-unlikely-database-migration/

266
acls.go Normal file
View File

@@ -0,0 +1,266 @@
package headscale
import (
"encoding/json"
"fmt"
"io"
"os"
"strconv"
"strings"
"github.com/rs/zerolog/log"
"github.com/tailscale/hujson"
"inet.af/netaddr"
"tailscale.com/tailcfg"
)
const errorEmptyPolicy = Error("empty policy")
const errorInvalidAction = Error("invalid action")
const errorInvalidUserSection = Error("invalid user section")
const errorInvalidGroup = Error("invalid group")
const errorInvalidTag = Error("invalid tag")
const errorInvalidNamespace = Error("invalid namespace")
const errorInvalidPortFormat = Error("invalid port format")
// LoadACLPolicy loads the ACL policy from the specify path, and generates the ACL rules
func (h *Headscale) LoadACLPolicy(path string) error {
policyFile, err := os.Open(path)
if err != nil {
return err
}
defer policyFile.Close()
var policy ACLPolicy
b, err := io.ReadAll(policyFile)
if err != nil {
return err
}
err = hujson.Unmarshal(b, &policy)
if err != nil {
return err
}
if policy.IsZero() {
return errorEmptyPolicy
}
h.aclPolicy = &policy
rules, err := h.generateACLRules()
if err != nil {
return err
}
h.aclRules = rules
return nil
}
func (h *Headscale) generateACLRules() (*[]tailcfg.FilterRule, error) {
rules := []tailcfg.FilterRule{}
for i, a := range h.aclPolicy.ACLs {
if a.Action != "accept" {
return nil, errorInvalidAction
}
r := tailcfg.FilterRule{}
srcIPs := []string{}
for j, u := range a.Users {
srcs, err := h.generateACLPolicySrcIP(u)
if err != nil {
log.Error().
Msgf("Error parsing ACL %d, User %d", i, j)
return nil, err
}
srcIPs = append(srcIPs, *srcs...)
}
r.SrcIPs = srcIPs
destPorts := []tailcfg.NetPortRange{}
for j, d := range a.Ports {
dests, err := h.generateACLPolicyDestPorts(d)
if err != nil {
log.Error().
Msgf("Error parsing ACL %d, Port %d", i, j)
return nil, err
}
destPorts = append(destPorts, *dests...)
}
rules = append(rules, tailcfg.FilterRule{
SrcIPs: srcIPs,
DstPorts: destPorts,
})
}
return &rules, nil
}
func (h *Headscale) generateACLPolicySrcIP(u string) (*[]string, error) {
return h.expandAlias(u)
}
func (h *Headscale) generateACLPolicyDestPorts(d string) (*[]tailcfg.NetPortRange, error) {
tokens := strings.Split(d, ":")
if len(tokens) < 2 || len(tokens) > 3 {
return nil, errorInvalidPortFormat
}
var alias string
// We can have here stuff like:
// git-server:*
// 192.168.1.0/24:22
// tag:montreal-webserver:80,443
// tag:api-server:443
// example-host-1:*
if len(tokens) == 2 {
alias = tokens[0]
} else {
alias = fmt.Sprintf("%s:%s", tokens[0], tokens[1])
}
expanded, err := h.expandAlias(alias)
if err != nil {
return nil, err
}
ports, err := h.expandPorts(tokens[len(tokens)-1])
if err != nil {
return nil, err
}
dests := []tailcfg.NetPortRange{}
for _, d := range *expanded {
for _, p := range *ports {
pr := tailcfg.NetPortRange{
IP: d,
Ports: p,
}
dests = append(dests, pr)
}
}
return &dests, nil
}
func (h *Headscale) expandAlias(s string) (*[]string, error) {
if s == "*" {
return &[]string{"*"}, nil
}
if strings.HasPrefix(s, "group:") {
if _, ok := h.aclPolicy.Groups[s]; !ok {
return nil, errorInvalidGroup
}
ips := []string{}
for _, n := range h.aclPolicy.Groups[s] {
nodes, err := h.ListMachinesInNamespace(n)
if err != nil {
return nil, errorInvalidNamespace
}
for _, node := range *nodes {
ips = append(ips, node.IPAddress)
}
}
return &ips, nil
}
if strings.HasPrefix(s, "tag:") {
if _, ok := h.aclPolicy.TagOwners[s]; !ok {
return nil, errorInvalidTag
}
// This will have HORRIBLE performance.
// We need to change the data model to better store tags
machines := []Machine{}
if err := h.db.Where("registered").Find(&machines).Error; err != nil {
return nil, err
}
ips := []string{}
for _, m := range machines {
hostinfo := tailcfg.Hostinfo{}
if len(m.HostInfo) != 0 {
hi, err := m.HostInfo.MarshalJSON()
if err != nil {
return nil, err
}
err = json.Unmarshal(hi, &hostinfo)
if err != nil {
return nil, err
}
// FIXME: Check TagOwners allows this
for _, t := range hostinfo.RequestTags {
if s[4:] == t {
ips = append(ips, m.IPAddress)
break
}
}
}
}
return &ips, nil
}
n, err := h.GetNamespace(s)
if err == nil {
nodes, err := h.ListMachinesInNamespace(n.Name)
if err != nil {
return nil, err
}
ips := []string{}
for _, n := range *nodes {
ips = append(ips, n.IPAddress)
}
return &ips, nil
}
if h, ok := h.aclPolicy.Hosts[s]; ok {
return &[]string{h.String()}, nil
}
ip, err := netaddr.ParseIP(s)
if err == nil {
return &[]string{ip.String()}, nil
}
cidr, err := netaddr.ParseIPPrefix(s)
if err == nil {
return &[]string{cidr.String()}, nil
}
return nil, errorInvalidUserSection
}
func (h *Headscale) expandPorts(s string) (*[]tailcfg.PortRange, error) {
if s == "*" {
return &[]tailcfg.PortRange{{First: 0, Last: 65535}}, nil
}
ports := []tailcfg.PortRange{}
for _, p := range strings.Split(s, ",") {
rang := strings.Split(p, "-")
if len(rang) == 1 {
pi, err := strconv.ParseUint(rang[0], 10, 16)
if err != nil {
return nil, err
}
ports = append(ports, tailcfg.PortRange{
First: uint16(pi),
Last: uint16(pi),
})
} else if len(rang) == 2 {
start, err := strconv.ParseUint(rang[0], 10, 16)
if err != nil {
return nil, err
}
last, err := strconv.ParseUint(rang[1], 10, 16)
if err != nil {
return nil, err
}
ports = append(ports, tailcfg.PortRange{
First: uint16(start),
Last: uint16(last),
})
} else {
return nil, errorInvalidPortFormat
}
}
return &ports, nil
}

160
acls_test.go Normal file
View File

@@ -0,0 +1,160 @@
package headscale
import (
"gopkg.in/check.v1"
)
func (s *Suite) TestWrongPath(c *check.C) {
err := h.LoadACLPolicy("asdfg")
c.Assert(err, check.NotNil)
}
func (s *Suite) TestBrokenHuJson(c *check.C) {
err := h.LoadACLPolicy("./tests/acls/broken.hujson")
c.Assert(err, check.NotNil)
}
func (s *Suite) TestInvalidPolicyHuson(c *check.C) {
err := h.LoadACLPolicy("./tests/acls/invalid.hujson")
c.Assert(err, check.NotNil)
c.Assert(err, check.Equals, errorEmptyPolicy)
}
func (s *Suite) TestParseHosts(c *check.C) {
var hs Hosts
err := hs.UnmarshalJSON([]byte(`{"example-host-1": "100.100.100.100","example-host-2": "100.100.101.100/24"}`))
c.Assert(hs, check.NotNil)
c.Assert(err, check.IsNil)
}
func (s *Suite) TestParseInvalidCIDR(c *check.C) {
var hs Hosts
err := hs.UnmarshalJSON([]byte(`{"example-host-1": "100.100.100.100/42"}`))
c.Assert(hs, check.IsNil)
c.Assert(err, check.NotNil)
}
func (s *Suite) TestRuleInvalidGeneration(c *check.C) {
err := h.LoadACLPolicy("./tests/acls/acl_policy_invalid.hujson")
c.Assert(err, check.NotNil)
}
func (s *Suite) TestBasicRule(c *check.C) {
err := h.LoadACLPolicy("./tests/acls/acl_policy_basic_1.hujson")
c.Assert(err, check.IsNil)
rules, err := h.generateACLRules()
c.Assert(err, check.IsNil)
c.Assert(rules, check.NotNil)
}
func (s *Suite) TestPortRange(c *check.C) {
err := h.LoadACLPolicy("./tests/acls/acl_policy_basic_range.hujson")
c.Assert(err, check.IsNil)
rules, err := h.generateACLRules()
c.Assert(err, check.IsNil)
c.Assert(rules, check.NotNil)
c.Assert(*rules, check.HasLen, 1)
c.Assert((*rules)[0].DstPorts, check.HasLen, 1)
c.Assert((*rules)[0].DstPorts[0].Ports.First, check.Equals, uint16(5400))
c.Assert((*rules)[0].DstPorts[0].Ports.Last, check.Equals, uint16(5500))
}
func (s *Suite) TestPortWildcard(c *check.C) {
err := h.LoadACLPolicy("./tests/acls/acl_policy_basic_wildcards.hujson")
c.Assert(err, check.IsNil)
rules, err := h.generateACLRules()
c.Assert(err, check.IsNil)
c.Assert(rules, check.NotNil)
c.Assert(*rules, check.HasLen, 1)
c.Assert((*rules)[0].DstPorts, check.HasLen, 1)
c.Assert((*rules)[0].DstPorts[0].Ports.First, check.Equals, uint16(0))
c.Assert((*rules)[0].DstPorts[0].Ports.Last, check.Equals, uint16(65535))
c.Assert((*rules)[0].SrcIPs, check.HasLen, 1)
c.Assert((*rules)[0].SrcIPs[0], check.Equals, "*")
}
func (s *Suite) TestPortNamespace(c *check.C) {
n, err := h.CreateNamespace("testnamespace")
c.Assert(err, check.IsNil)
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = h.GetMachine("testnamespace", "testmachine")
c.Assert(err, check.NotNil)
ip, _ := h.getAvailableIP()
m := Machine{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Name: "testmachine",
NamespaceID: n.ID,
Registered: true,
RegisterMethod: "authKey",
IPAddress: ip.String(),
AuthKeyID: uint(pak.ID),
}
h.db.Save(&m)
err = h.LoadACLPolicy("./tests/acls/acl_policy_basic_namespace_as_user.hujson")
c.Assert(err, check.IsNil)
rules, err := h.generateACLRules()
c.Assert(err, check.IsNil)
c.Assert(rules, check.NotNil)
c.Assert(*rules, check.HasLen, 1)
c.Assert((*rules)[0].DstPorts, check.HasLen, 1)
c.Assert((*rules)[0].DstPorts[0].Ports.First, check.Equals, uint16(0))
c.Assert((*rules)[0].DstPorts[0].Ports.Last, check.Equals, uint16(65535))
c.Assert((*rules)[0].SrcIPs, check.HasLen, 1)
c.Assert((*rules)[0].SrcIPs[0], check.Not(check.Equals), "not an ip")
c.Assert((*rules)[0].SrcIPs[0], check.Equals, ip.String())
}
func (s *Suite) TestPortGroup(c *check.C) {
n, err := h.CreateNamespace("testnamespace")
c.Assert(err, check.IsNil)
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = h.GetMachine("testnamespace", "testmachine")
c.Assert(err, check.NotNil)
ip, _ := h.getAvailableIP()
m := Machine{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Name: "testmachine",
NamespaceID: n.ID,
Registered: true,
RegisterMethod: "authKey",
IPAddress: ip.String(),
AuthKeyID: uint(pak.ID),
}
h.db.Save(&m)
err = h.LoadACLPolicy("./tests/acls/acl_policy_basic_groups.hujson")
c.Assert(err, check.IsNil)
rules, err := h.generateACLRules()
c.Assert(err, check.IsNil)
c.Assert(rules, check.NotNil)
c.Assert(*rules, check.HasLen, 1)
c.Assert((*rules)[0].DstPorts, check.HasLen, 1)
c.Assert((*rules)[0].DstPorts[0].Ports.First, check.Equals, uint16(0))
c.Assert((*rules)[0].DstPorts[0].Ports.Last, check.Equals, uint16(65535))
c.Assert((*rules)[0].SrcIPs, check.HasLen, 1)
c.Assert((*rules)[0].SrcIPs[0], check.Not(check.Equals), "not an ip")
c.Assert((*rules)[0].SrcIPs[0], check.Equals, ip.String())
}

70
acls_types.go Normal file
View File

@@ -0,0 +1,70 @@
package headscale
import (
"strings"
"github.com/tailscale/hujson"
"inet.af/netaddr"
)
// ACLPolicy represents a Tailscale ACL Policy
type ACLPolicy struct {
Groups Groups `json:"Groups"`
Hosts Hosts `json:"Hosts"`
TagOwners TagOwners `json:"TagOwners"`
ACLs []ACL `json:"ACLs"`
Tests []ACLTest `json:"Tests"`
}
// ACL is a basic rule for the ACL Policy
type ACL struct {
Action string `json:"Action"`
Users []string `json:"Users"`
Ports []string `json:"Ports"`
}
// Groups references a series of alias in the ACL rules
type Groups map[string][]string
// Hosts are alias for IP addresses or subnets
type Hosts map[string]netaddr.IPPrefix
// TagOwners specify what users (namespaces?) are allow to use certain tags
type TagOwners map[string][]string
// ACLTest is not implemented, but should be use to check if a certain rule is allowed
type ACLTest struct {
User string `json:"User"`
Allow []string `json:"Allow"`
Deny []string `json:"Deny,omitempty"`
}
// UnmarshalJSON allows to parse the Hosts directly into netaddr objects
func (h *Hosts) UnmarshalJSON(data []byte) error {
hosts := Hosts{}
hs := make(map[string]string)
err := hujson.Unmarshal(data, &hs)
if err != nil {
return err
}
for k, v := range hs {
if !strings.Contains(v, "/") {
v = v + "/32"
}
prefix, err := netaddr.ParseIPPrefix(v)
if err != nil {
return err
}
hosts[k] = prefix
}
*h = hosts
return nil
}
// IsZero is perhaps a bit naive here
func (p ACLPolicy) IsZero() bool {
if len(p.Groups) == 0 && len(p.Hosts) == 0 && len(p.ACLs) == 0 {
return true
}
return false
}

398
api.go
View File

@@ -3,19 +3,19 @@ package headscale
import (
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net/http"
"time"
"github.com/rs/zerolog/log"
"github.com/gin-gonic/gin"
"github.com/jinzhu/gorm"
"github.com/klauspost/compress/zstd"
"gorm.io/datatypes"
"inet.af/netaddr"
"gorm.io/gorm"
"tailscale.com/tailcfg"
"tailscale.com/wgengine/wgcfg"
"tailscale.com/types/wgkey"
)
// KeyHandler provides the Headscale pub key
@@ -33,8 +33,6 @@ func (h *Headscale) RegisterWebAPI(c *gin.Context) {
return
}
// spew.Dump(c.Params)
c.Data(http.StatusOK, "text/html; charset=utf-8", []byte(fmt.Sprintf(`
<html>
<body>
@@ -45,7 +43,7 @@ func (h *Headscale) RegisterWebAPI(c *gin.Context) {
<p>
<code>
<b>headscale -n NAMESPACE node register %s</b>
<b>headscale -n NAMESPACE nodes register %s</b>
</code>
</p>
@@ -60,60 +58,70 @@ func (h *Headscale) RegisterWebAPI(c *gin.Context) {
func (h *Headscale) RegistrationHandler(c *gin.Context) {
body, _ := io.ReadAll(c.Request.Body)
mKeyStr := c.Param("id")
mKey, err := wgcfg.ParseHexKey(mKeyStr)
mKey, err := wgkey.ParseHex(mKeyStr)
if err != nil {
log.Printf("Cannot parse machine key: %s", err)
log.Error().
Str("handler", "Registration").
Err(err).
Msg("Cannot parse machine key")
c.String(http.StatusInternalServerError, "Sad!")
return
}
req := tailcfg.RegisterRequest{}
err = decode(body, &req, &mKey, h.privateKey)
if err != nil {
log.Printf("Cannot decode message: %s", err)
log.Error().
Str("handler", "Registration").
Err(err).
Msg("Cannot decode message")
c.String(http.StatusInternalServerError, "Very sad!")
return
}
db, err := h.db()
if err != nil {
log.Printf("Cannot open DB: %s", err)
c.String(http.StatusInternalServerError, ":(")
return
}
defer db.Close()
now := time.Now().UTC()
var m Machine
if db.First(&m, "machine_key = ?", mKey.HexString()).RecordNotFound() {
log.Println("New Machine!")
if result := h.db.Preload("Namespace").First(&m, "machine_key = ?", mKey.HexString()); errors.Is(result.Error, gorm.ErrRecordNotFound) {
log.Info().Str("machine", req.Hostinfo.Hostname).Msg("New machine")
m = Machine{
Expiry: &req.Expiry,
MachineKey: mKey.HexString(),
Name: req.Hostinfo.Hostname,
NodeKey: wgcfg.Key(req.NodeKey).HexString(),
Expiry: &req.Expiry,
MachineKey: mKey.HexString(),
Name: req.Hostinfo.Hostname,
NodeKey: wgkey.Key(req.NodeKey).HexString(),
LastSuccessfulUpdate: &now,
}
if err := db.Create(&m).Error; err != nil {
log.Printf("Could not create row: %s", err)
if err := h.db.Create(&m).Error; err != nil {
log.Error().
Str("handler", "Registration").
Err(err).
Msg("Could not create row")
return
}
}
if !m.Registered && req.Auth.AuthKey != "" {
h.handleAuthKey(c, db, mKey, req, m)
h.handleAuthKey(c, h.db, mKey, req, m)
return
}
resp := tailcfg.RegisterResponse{}
// We have the updated key!
if m.NodeKey == wgcfg.Key(req.NodeKey).HexString() {
if m.NodeKey == wgkey.Key(req.NodeKey).HexString() {
if m.Registered {
log.Printf("[%s] Client is registered and we have the current NodeKey. All clear to /map", m.Name)
log.Debug().
Str("handler", "Registration").
Str("machine", m.Name).
Msg("Client is registered and we have the current NodeKey. All clear to /map")
resp.AuthURL = ""
resp.MachineAuthorized = true
resp.User = *m.Namespace.toUser()
respBody, err := encode(resp, &mKey, h.privateKey)
if err != nil {
log.Printf("Cannot encode message: %s", err)
log.Error().
Str("handler", "Registration").
Err(err).
Msg("Cannot encode message")
c.String(http.StatusInternalServerError, "")
return
}
@@ -121,12 +129,18 @@ func (h *Headscale) RegistrationHandler(c *gin.Context) {
return
}
log.Printf("[%s] Not registered and not NodeKey rotation. Sending a authurl to register", m.Name)
log.Debug().
Str("handler", "Registration").
Str("machine", m.Name).
Msg("Not registered and not NodeKey rotation. Sending a authurl to register")
resp.AuthURL = fmt.Sprintf("%s/register?key=%s",
h.cfg.ServerURL, mKey.HexString())
respBody, err := encode(resp, &mKey, h.privateKey)
if err != nil {
log.Printf("Cannot encode message: %s", err)
log.Error().
Str("handler", "Registration").
Err(err).
Msg("Cannot encode message")
c.String(http.StatusInternalServerError, "")
return
}
@@ -135,16 +149,22 @@ func (h *Headscale) RegistrationHandler(c *gin.Context) {
}
// The NodeKey we have matches OldNodeKey, which means this is a refresh after an key expiration
if m.NodeKey == wgcfg.Key(req.OldNodeKey).HexString() {
log.Printf("[%s] We have the OldNodeKey in the database. This is a key refresh", m.Name)
m.NodeKey = wgcfg.Key(req.NodeKey).HexString()
db.Save(&m)
if m.NodeKey == wgkey.Key(req.OldNodeKey).HexString() {
log.Debug().
Str("handler", "Registration").
Str("machine", m.Name).
Msg("We have the OldNodeKey in the database. This is a key refresh")
m.NodeKey = wgkey.Key(req.NodeKey).HexString()
h.db.Save(&m)
resp.AuthURL = ""
resp.User = *m.Namespace.toUser()
respBody, err := encode(resp, &mKey, h.privateKey)
if err != nil {
log.Printf("Cannot encode message: %s", err)
log.Error().
Str("handler", "Registration").
Err(err).
Msg("Cannot encode message")
c.String(http.StatusInternalServerError, "Extremely sad!")
return
}
@@ -155,234 +175,97 @@ func (h *Headscale) RegistrationHandler(c *gin.Context) {
// We arrive here after a client is restarted without finalizing the authentication flow or
// when headscale is stopped in the middle of the auth process.
if m.Registered {
log.Printf("[%s] The node is sending us a new NodeKey, but machine is registered. All clear for /map", m.Name)
log.Debug().
Str("handler", "Registration").
Str("machine", m.Name).
Msg("The node is sending us a new NodeKey, but machine is registered. All clear for /map")
resp.AuthURL = ""
resp.MachineAuthorized = true
resp.User = *m.Namespace.toUser()
respBody, err := encode(resp, &mKey, h.privateKey)
if err != nil {
log.Printf("Cannot encode message: %s", err)
log.Error().
Str("handler", "Registration").
Err(err).
Msg("Cannot encode message")
c.String(http.StatusInternalServerError, "")
return
}
c.Data(200, "application/json; charset=utf-8", respBody)
return
}
log.Printf("[%s] The node is sending us a new NodeKey, sending auth url", m.Name)
log.Debug().
Str("handler", "Registration").
Str("machine", m.Name).
Msg("The node is sending us a new NodeKey, sending auth url")
resp.AuthURL = fmt.Sprintf("%s/register?key=%s",
h.cfg.ServerURL, mKey.HexString())
respBody, err := encode(resp, &mKey, h.privateKey)
if err != nil {
log.Printf("Cannot encode message: %s", err)
log.Error().
Str("handler", "Registration").
Err(err).
Msg("Cannot encode message")
c.String(http.StatusInternalServerError, "")
return
}
c.Data(200, "application/json; charset=utf-8", respBody)
}
// PollNetMapHandler takes care of /machine/:id/map
//
// This is the busiest endpoint, as it keeps the HTTP long poll that updates
// the clients when something in the network changes.
//
// The clients POST stuff like HostInfo and their Endpoints here, but
// only after their first request (marked with the ReadOnly field).
//
// At this moment the updates are sent in a quite horrendous way, but they kinda work.
func (h *Headscale) PollNetMapHandler(c *gin.Context) {
body, _ := io.ReadAll(c.Request.Body)
mKeyStr := c.Param("id")
mKey, err := wgcfg.ParseHexKey(mKeyStr)
func (h *Headscale) getMapResponse(mKey wgkey.Key, req tailcfg.MapRequest, m Machine) (*[]byte, error) {
log.Trace().
Str("func", "getMapResponse").
Str("machine", req.Hostinfo.Hostname).
Msg("Creating Map response")
node, err := m.toNode(true)
if err != nil {
log.Printf("Cannot parse client key: %s", err)
return
}
req := tailcfg.MapRequest{}
err = decode(body, &req, &mKey, h.privateKey)
if err != nil {
log.Printf("Cannot decode message: %s", err)
return
}
db, err := h.db()
if err != nil {
log.Printf("Cannot open DB: %s", err)
return
}
defer db.Close()
var m Machine
if db.First(&m, "machine_key = ?", mKey.HexString()).RecordNotFound() {
log.Printf("Ignoring request, cannot find machine with key %s", mKey.HexString())
return
}
hostinfo, _ := json.Marshal(req.Hostinfo)
m.Name = req.Hostinfo.Hostname
m.HostInfo = datatypes.JSON(hostinfo)
m.DiscoKey = wgcfg.Key(req.DiscoKey).HexString()
now := time.Now().UTC()
// From Tailscale client:
//
// ReadOnly is whether the client just wants to fetch the MapResponse,
// without updating their Endpoints. The Endpoints field will be ignored and
// LastSeen will not be updated and peers will not be notified of changes.
//
// The intended use is for clients to discover the DERP map at start-up
// before their first real endpoint update.
if !req.ReadOnly {
endpoints, _ := json.Marshal(req.Endpoints)
m.Endpoints = datatypes.JSON(endpoints)
m.LastSeen = &now
}
db.Save(&m)
pollData := make(chan []byte, 1)
update := make(chan []byte, 1)
cancelKeepAlive := make(chan []byte, 1)
defer close(pollData)
defer close(cancelKeepAlive)
h.pollMu.Lock()
h.clientsPolling[m.ID] = update
h.pollMu.Unlock()
data, err := h.getMapResponse(mKey, req, m)
if err != nil {
c.String(http.StatusInternalServerError, ":(")
return
}
// We update our peers if the client is not sending ReadOnly in the MapRequest
// so we don't distribute its initial request (it comes with
// empty endpoints to peers)
// Details on the protocol can be found in https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go#L696
log.Printf("[%s] ReadOnly=%t OmitPeers=%t Stream=%t", m.Name, req.ReadOnly, req.OmitPeers, req.Stream)
if req.ReadOnly {
log.Printf("[%s] Client is starting up. Asking for DERP map", m.Name)
c.Data(200, "application/json; charset=utf-8", *data)
return
}
if req.OmitPeers && !req.Stream {
log.Printf("[%s] Client sent endpoint update and is ok with a response without peer list", m.Name)
c.Data(200, "application/json; charset=utf-8", *data)
return
} else if req.OmitPeers && req.Stream {
log.Printf("[%s] Warning, ignoring request, don't know how to handle it", m.Name)
c.String(http.StatusBadRequest, "")
return
}
log.Printf("[%s] Client is ready to access the tailnet", m.Name)
log.Printf("[%s] Sending initial map", m.Name)
pollData <- *data
log.Printf("[%s] Notifying peers", m.Name)
peers, _ := h.getPeers(m)
h.pollMu.Lock()
for _, p := range *peers {
pUp, ok := h.clientsPolling[uint64(p.ID)]
if ok {
log.Printf("[%s] Notifying peer %s (%s)", m.Name, p.Name, p.Addresses[0])
pUp <- []byte{}
} else {
log.Printf("[%s] Peer %s does not appear to be polling", m.Name, p.Name)
}
}
h.pollMu.Unlock()
go h.keepAlive(cancelKeepAlive, pollData, mKey, req, m)
c.Stream(func(w io.Writer) bool {
select {
case data := <-pollData:
log.Printf("[%s] Sending data (%d bytes)", m.Name, len(data))
_, err := w.Write(data)
if err != nil {
log.Printf("[%s] 🤮 Cannot write data: %s", m.Name, err)
}
now := time.Now().UTC()
m.LastSeen = &now
db.Save(&m)
return true
case <-update:
log.Printf("[%s] Received a request for update", m.Name)
data, err := h.getMapResponse(mKey, req, m)
if err != nil {
log.Printf("[%s] Could not get the map update: %s", m.Name, err)
}
_, err = w.Write(*data)
if err != nil {
log.Printf("[%s] Could not write the map response: %s", m.Name, err)
}
return true
case <-c.Request.Context().Done():
log.Printf("[%s] The client has closed the connection", m.Name)
now := time.Now().UTC()
m.LastSeen = &now
db.Save(&m)
h.pollMu.Lock()
cancelKeepAlive <- []byte{}
delete(h.clientsPolling, m.ID)
close(update)
h.pollMu.Unlock()
return false
}
})
}
func (h *Headscale) keepAlive(cancel chan []byte, pollData chan []byte, mKey wgcfg.Key, req tailcfg.MapRequest, m Machine) {
for {
select {
case <-cancel:
return
default:
h.pollMu.Lock()
data, err := h.getMapKeepAliveResponse(mKey, req, m)
if err != nil {
log.Printf("Error generating the keep alive msg: %s", err)
return
}
log.Printf("[%s] Sending keepalive", m.Name)
pollData <- *data
h.pollMu.Unlock()
time.Sleep(60 * time.Second)
}
}
}
func (h *Headscale) getMapResponse(mKey wgcfg.Key, req tailcfg.MapRequest, m Machine) (*[]byte, error) {
node, err := m.toNode()
if err != nil {
log.Printf("Cannot convert to node: %s", err)
log.Error().
Str("func", "getMapResponse").
Err(err).
Msg("Cannot convert to node")
return nil, err
}
peers, err := h.getPeers(m)
if err != nil {
log.Printf("Cannot fetch peers: %s", err)
log.Error().
Str("func", "getMapResponse").
Err(err).
Msg("Cannot fetch peers")
return nil, err
}
resp := tailcfg.MapResponse{
KeepAlive: false,
Node: node,
Peers: *peers,
DNS: []netaddr.IP{},
SearchPaths: []string{},
Domain: "foobar@example.com",
PacketFilter: tailcfg.FilterAllowAll,
DERPMap: h.cfg.DerpMap,
UserProfiles: []tailcfg.UserProfile{},
Roles: []tailcfg.Role{},
profile := tailcfg.UserProfile{
ID: tailcfg.UserID(m.NamespaceID),
LoginName: m.Namespace.Name,
DisplayName: m.Namespace.Name,
}
resp := tailcfg.MapResponse{
KeepAlive: false,
Node: node,
Peers: *peers,
//TODO(kradalby): As per tailscale docs, if DNSConfig is nil,
// it means its not updated, maybe we can have some logic
// to check and only pass updates when its updates.
// This is probably more relevant if we try to implement
// "MagicDNS"
DNSConfig: h.cfg.DNSConfig,
SearchPaths: []string{},
Domain: "headscale.net",
PacketFilter: *h.aclRules,
DERPMap: h.cfg.DerpMap,
UserProfiles: []tailcfg.UserProfile{profile},
}
log.Trace().
Str("func", "getMapResponse").
Str("machine", req.Hostinfo.Hostname).
Msgf("Generated map response: %s", tailMapResponseToString(resp))
var respBody []byte
if req.Compress == "zstd" {
src, _ := json.Marshal(resp)
encoder, _ := zstd.NewWriter(nil)
srcCompressed := encoder.EncodeAll(src, nil)
respBody, err = encodeMsg(srcCompressed, &mKey, h.privateKey)
@@ -395,7 +278,6 @@ func (h *Headscale) getMapResponse(mKey wgcfg.Key, req tailcfg.MapRequest, m Mac
return nil, err
}
}
// spew.Dump(resp)
// declare the incoming size on the first 4 bytes
data := make([]byte, 4)
binary.LittleEndian.PutUint32(data, uint32(len(respBody)))
@@ -403,7 +285,7 @@ func (h *Headscale) getMapResponse(mKey wgcfg.Key, req tailcfg.MapRequest, m Mac
return &data, nil
}
func (h *Headscale) getMapKeepAliveResponse(mKey wgcfg.Key, req tailcfg.MapRequest, m Machine) (*[]byte, error) {
func (h *Headscale) getMapKeepAliveResponse(mKey wgkey.Key, req tailcfg.MapRequest, m Machine) (*[]byte, error) {
resp := tailcfg.MapResponse{
KeepAlive: true,
}
@@ -429,31 +311,55 @@ func (h *Headscale) getMapKeepAliveResponse(mKey wgcfg.Key, req tailcfg.MapReque
return &data, nil
}
func (h *Headscale) handleAuthKey(c *gin.Context, db *gorm.DB, idKey wgcfg.Key, req tailcfg.RegisterRequest, m Machine) {
func (h *Headscale) handleAuthKey(c *gin.Context, db *gorm.DB, idKey wgkey.Key, req tailcfg.RegisterRequest, m Machine) {
log.Debug().
Str("func", "handleAuthKey").
Str("machine", req.Hostinfo.Hostname).
Msgf("Processing auth key for %s", req.Hostinfo.Hostname)
resp := tailcfg.RegisterResponse{}
pak, err := h.checkKeyValidity(req.Auth.AuthKey)
if err != nil {
resp.MachineAuthorized = false
respBody, err := encode(resp, &idKey, h.privateKey)
if err != nil {
log.Printf("Cannot encode message: %s", err)
log.Error().
Str("func", "handleAuthKey").
Str("machine", m.Name).
Err(err).
Msg("Cannot encode message")
c.String(http.StatusInternalServerError, "")
return
}
c.Data(200, "application/json; charset=utf-8", respBody)
log.Printf("[%s] Failed authentication via AuthKey", m.Name)
log.Error().
Str("func", "handleAuthKey").
Str("machine", m.Name).
Msg("Failed authentication via AuthKey")
return
}
log.Debug().
Str("func", "handleAuthKey").
Str("machine", m.Name).
Msg("Authentication key was valid, proceeding to acquire an IP address")
ip, err := h.getAvailableIP()
if err != nil {
log.Println(err)
log.Error().
Str("func", "handleAuthKey").
Str("machine", m.Name).
Msg("Failed to find an available IP")
return
}
log.Info().
Str("func", "handleAuthKey").
Str("machine", m.Name).
Str("ip", ip.String()).
Msgf("Assigning %s to %s", ip, m.Name)
m.AuthKeyID = uint(pak.ID)
m.IPAddress = ip.String()
m.NamespaceID = pak.NamespaceID
m.NodeKey = wgcfg.Key(req.NodeKey).HexString() // we update it just in case
m.NodeKey = wgkey.Key(req.NodeKey).HexString() // we update it just in case
m.Registered = true
m.RegisterMethod = "authKey"
db.Save(&m)
@@ -462,10 +368,18 @@ func (h *Headscale) handleAuthKey(c *gin.Context, db *gorm.DB, idKey wgcfg.Key,
resp.User = *pak.Namespace.toUser()
respBody, err := encode(resp, &idKey, h.privateKey)
if err != nil {
log.Printf("Cannot encode message: %s", err)
log.Error().
Str("func", "handleAuthKey").
Str("machine", m.Name).
Err(err).
Msg("Cannot encode message")
c.String(http.StatusInternalServerError, "Extremely sad!")
return
}
c.Data(200, "application/json; charset=utf-8", respBody)
log.Printf("[%s] Successfully authenticated via AuthKey", m.Name)
log.Info().
Str("func", "handleAuthKey").
Str("machine", m.Name).
Str("ip", ip.String()).
Msg("Successfully authenticated via AuthKey")
}

129
app.go
View File

@@ -3,17 +3,20 @@ package headscale
import (
"errors"
"fmt"
"log"
"net/http"
"os"
"strings"
"sync"
"time"
"github.com/rs/zerolog/log"
"github.com/gin-gonic/gin"
"golang.org/x/crypto/acme/autocert"
"gorm.io/gorm"
"inet.af/netaddr"
"tailscale.com/tailcfg"
"tailscale.com/wgengine/wgcfg"
"tailscale.com/types/wgkey"
)
// Config contains the initial Headscale configuration
@@ -23,6 +26,7 @@ type Config struct {
PrivateKeyPath string
DerpMap *tailcfg.DERPMap
EphemeralNodeInactivityTimeout time.Duration
IPPrefix netaddr.IPPrefix
DBtype string
DBpath string
@@ -32,25 +36,34 @@ type Config struct {
DBuser string
DBpass string
TLSLetsEncryptListen string
TLSLetsEncryptHostname string
TLSLetsEncryptCacheDir string
TLSLetsEncryptChallengeType string
TLSCertPath string
TLSKeyPath string
DNSConfig *tailcfg.DNSConfig
}
// Headscale represents the base app of the service
type Headscale struct {
cfg Config
db *gorm.DB
dbString string
dbType string
dbDebug bool
publicKey *wgcfg.Key
privateKey *wgcfg.PrivateKey
publicKey *wgkey.Key
privateKey *wgkey.Private
pollMu sync.Mutex
clientsPolling map[uint64]chan []byte // this is by all means a hackity hack
aclPolicy *ACLPolicy
aclRules *[]tailcfg.FilterRule
clientsUpdateChannels sync.Map
clientsUpdateChannelMutex sync.Mutex
lastStateChange sync.Map
}
// NewHeadscale returns the Headscale app
@@ -59,7 +72,7 @@ func NewHeadscale(cfg Config) (*Headscale, error) {
if err != nil {
return nil, err
}
privKey, err := wgcfg.ParsePrivateKey(string(content))
privKey, err := wgkey.ParsePrivate(string(content))
if err != nil {
return nil, err
}
@@ -73,7 +86,7 @@ func NewHeadscale(cfg Config) (*Headscale, error) {
case "sqlite3":
dbString = cfg.DBpath
default:
return nil, errors.New("Unsupported DB")
return nil, errors.New("unsupported DB")
}
h := Headscale{
@@ -82,12 +95,14 @@ func NewHeadscale(cfg Config) (*Headscale, error) {
dbString: dbString,
privateKey: privKey,
publicKey: &pubKey,
aclRules: &tailcfg.FilterAllowAll, // default allowall
}
err = h.initDB()
if err != nil {
return nil, err
}
h.clientsPolling = make(map[uint64]chan []byte)
return &h, nil
}
@@ -97,9 +112,9 @@ func (h *Headscale) redirect(w http.ResponseWriter, req *http.Request) {
http.Redirect(w, req, target, http.StatusFound)
}
// ExpireEphemeralNodes deletes ephemeral machine records that have not been
// expireEphemeralNodes deletes ephemeral machine records that have not been
// seen for longer than h.cfg.EphemeralNodeInactivityTimeout
func (h *Headscale) ExpireEphemeralNodes(milliSeconds int64) {
func (h *Headscale) expireEphemeralNodes(milliSeconds int64) {
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
for range ticker.C {
h.expireEphemeralNodesWorker()
@@ -107,47 +122,71 @@ func (h *Headscale) ExpireEphemeralNodes(milliSeconds int64) {
}
func (h *Headscale) expireEphemeralNodesWorker() {
db, err := h.db()
if err != nil {
log.Printf("Cannot open DB: %s", err)
return
}
defer db.Close()
namespaces, err := h.ListNamespaces()
if err != nil {
log.Printf("Error listing namespaces: %s", err)
log.Error().Err(err).Msg("Error listing namespaces")
return
}
for _, ns := range *namespaces {
machines, err := h.ListMachinesInNamespace(ns.Name)
if err != nil {
log.Printf("Error listing machines in namespace %s: %s", ns.Name, err)
log.Error().Err(err).Str("namespace", ns.Name).Msg("Error listing machines in namespace")
return
}
for _, m := range *machines {
if m.AuthKey != nil && m.LastSeen != nil && m.AuthKey.Ephemeral && time.Now().After(m.LastSeen.Add(h.cfg.EphemeralNodeInactivityTimeout)) {
log.Printf("[%s] Ephemeral client removed from database\n", m.Name)
err = db.Unscoped().Delete(m).Error
log.Info().Str("machine", m.Name).Msg("Ephemeral client removed from database")
err = h.db.Unscoped().Delete(m).Error
if err != nil {
log.Printf("[%s] 🤮 Cannot delete ephemeral machine from the database: %s", m.Name, err)
log.Error().Err(err).Str("machine", m.Name).Msg("🤮 Cannot delete ephemeral machine from the database")
}
h.notifyChangesToPeers(&m)
}
}
}
}
// WatchForKVUpdates checks the KV DB table for requests to perform tailnet upgrades
// This is a way to communitate the CLI with the headscale server
func (h *Headscale) watchForKVUpdates(milliSeconds int64) {
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
for range ticker.C {
h.watchForKVUpdatesWorker()
}
}
func (h *Headscale) watchForKVUpdatesWorker() {
h.checkForNamespacesPendingUpdates()
// more functions will come here in the future
}
// Serve launches a GIN server with the Headscale API
func (h *Headscale) Serve() error {
r := gin.Default()
r.GET("/health", func(c *gin.Context) { c.JSON(200, gin.H{"healthy": "ok"}) })
r.GET("/key", h.KeyHandler)
r.GET("/register", h.RegisterWebAPI)
r.POST("/machine/:id/map", h.PollNetMapHandler)
r.POST("/machine/:id", h.RegistrationHandler)
r.GET("/apple", h.AppleMobileConfig)
r.GET("/apple/:platform", h.ApplePlatformConfig)
var err error
timeout := 30 * time.Second
go h.watchForKVUpdates(5000)
go h.expireEphemeralNodes(5000)
s := &http.Server{
Addr: h.cfg.Addr,
Handler: r,
ReadTimeout: timeout,
WriteTimeout: timeout,
}
if h.cfg.TLSLetsEncryptHostname != "" {
if !strings.HasPrefix(h.cfg.ServerURL, "https://") {
log.Println("WARNING: listening with TLS but ServerURL does not start with https://")
log.Warn().Msg("Listening with TLS but ServerURL does not start with https://")
}
m := autocert.Manager{
@@ -156,36 +195,58 @@ func (h *Headscale) Serve() error {
Cache: autocert.DirCache(h.cfg.TLSLetsEncryptCacheDir),
}
s := &http.Server{
Addr: h.cfg.Addr,
TLSConfig: m.TLSConfig(),
Handler: r,
Addr: h.cfg.Addr,
TLSConfig: m.TLSConfig(),
Handler: r,
ReadTimeout: timeout,
WriteTimeout: timeout,
}
if h.cfg.TLSLetsEncryptChallengeType == "TLS-ALPN-01" {
// Configuration via autocert with TLS-ALPN-01 (https://tools.ietf.org/html/rfc8737)
// The RFC requires that the validation is done on port 443; in other words, headscale
// must be configured to run on port 443.
// must be reachable on port 443.
err = s.ListenAndServeTLS("", "")
} else if h.cfg.TLSLetsEncryptChallengeType == "HTTP-01" {
// Configuration via autocert with HTTP-01. This requires listening on
// port 80 for the certificate validation in addition to the headscale
// service, which can be configured to run on any other port.
go func() {
log.Fatal(http.ListenAndServe(":http", m.HTTPHandler(http.HandlerFunc(h.redirect))))
log.Fatal().
Err(http.ListenAndServe(h.cfg.TLSLetsEncryptListen, m.HTTPHandler(http.HandlerFunc(h.redirect)))).
Msg("failed to set up a HTTP server")
}()
err = s.ListenAndServeTLS("", "")
} else {
return errors.New("Unknown value for TLSLetsEncryptChallengeType")
return errors.New("unknown value for TLSLetsEncryptChallengeType")
}
} else if h.cfg.TLSCertPath == "" {
if !strings.HasPrefix(h.cfg.ServerURL, "http://") {
log.Println("WARNING: listening without TLS but ServerURL does not start with http://")
log.Warn().Msg("Listening without TLS but ServerURL does not start with http://")
}
err = r.Run(h.cfg.Addr)
err = s.ListenAndServe()
} else {
if !strings.HasPrefix(h.cfg.ServerURL, "https://") {
log.Println("WARNING: listening with TLS but ServerURL does not start with https://")
log.Warn().Msg("Listening with TLS but ServerURL does not start with https://")
}
err = r.RunTLS(h.cfg.Addr, h.cfg.TLSCertPath, h.cfg.TLSKeyPath)
err = s.ListenAndServeTLS(h.cfg.TLSCertPath, h.cfg.TLSKeyPath)
}
return err
}
func (h *Headscale) setLastStateChangeToNow(namespace string) {
now := time.Now().UTC()
h.lastStateChange.Store(namespace, now)
}
func (h *Headscale) getLastStateChange(namespace string) time.Time {
if wrapped, ok := h.lastStateChange.Load(namespace); ok {
lastChange, _ := wrapped.(time.Time)
return lastChange
}
now := time.Now().UTC()
h.lastStateChange.Store(namespace, now)
return now
}

View File

@@ -5,9 +5,8 @@ import (
"os"
"testing"
_ "github.com/jinzhu/gorm/dialects/sqlite" // sql driver
"gopkg.in/check.v1"
"inet.af/netaddr"
)
func Test(t *testing.T) {
@@ -38,7 +37,9 @@ func (s *Suite) ResetDB(c *check.C) {
if err != nil {
c.Fatal(err)
}
cfg := Config{}
cfg := Config{
IPPrefix: netaddr.MustParseIPPrefix("10.27.0.0/23"),
}
h = Headscale{
cfg: cfg,
@@ -49,4 +50,9 @@ func (s *Suite) ResetDB(c *check.C) {
if err != nil {
c.Fatal(err)
}
db, err := h.openDB()
if err != nil {
c.Fatal(err)
}
h.db = db
}

226
apple_mobileconfig.go Normal file
View File

@@ -0,0 +1,226 @@
package headscale
import (
"bytes"
"net/http"
"text/template"
"github.com/rs/zerolog/log"
"github.com/gin-gonic/gin"
"github.com/gofrs/uuid"
)
// AppleMobileConfig shows a simple message in the browser to point to the CLI
// Listens in /register
func (h *Headscale) AppleMobileConfig(c *gin.Context) {
t := template.Must(template.New("apple").Parse(`
<html>
<body>
<h1>Apple configuration profiles</h1>
<p>
This page provides <a href="https://support.apple.com/guide/mdm/mdm-overview-mdmbf9e668/web">configuration profiles</a> for the official Tailscale clients for <a href="https://apps.apple.com/us/app/tailscale/id1470499037?ls=1">iOS</a> and <a href="https://apps.apple.com/ca/app/tailscale/id1475387142?mt=12">macOS</a>.
</p>
<p>
The profiles will configure Tailscale.app to use {{.Url}} as its control server.
</p>
<h3>Caution</h3>
<p>You should always inspect the profile before installing it:</p>
<!--
<p><code>curl {{.Url}}/apple/ios</code></p>
-->
<p><code>curl {{.Url}}/apple/macos</code></p>
<h2>Profiles</h2>
<!--
<h3>iOS</h3>
<p>
<a href="/apple/ios" download="headscale_ios.mobileconfig">iOS profile</a>
</p>
-->
<h3>macOS</h3>
<p>Headscale can be set to the default server by installing a Headscale configuration profile:</p>
<p>
<a href="/apple/macos" download="headscale_macos.mobileconfig">macOS profile</a>
</p>
<ol>
<li>Download the profile, then open it. When it has been opened, there should be a notification that a profile can be installed</li>
<li>Open System Preferences and go to "Profiles"</li>
<li>Find and install the Headscale profile</li>
<li>Restart Tailscale.app and log in</li>
</ol>
<p>Or</p>
<p>Use your terminal to configure the default setting for Tailscale by issuing:</p>
<code>defaults write io.tailscale.ipn.macos ControlURL {{.Url}}</code>
<p>Restart Tailscale.app and log in.</p>
</body>
</html>`))
config := map[string]interface{}{
"Url": h.cfg.ServerURL,
}
var payload bytes.Buffer
if err := t.Execute(&payload, config); err != nil {
log.Error().
Str("handler", "AppleMobileConfig").
Err(err).
Msg("Could not render Apple index template")
c.Data(http.StatusInternalServerError, "text/html; charset=utf-8", []byte("Could not render Apple index template"))
return
}
c.Data(http.StatusOK, "text/html; charset=utf-8", payload.Bytes())
}
func (h *Headscale) ApplePlatformConfig(c *gin.Context) {
platform := c.Param("platform")
id, err := uuid.NewV4()
if err != nil {
log.Error().
Str("handler", "ApplePlatformConfig").
Err(err).
Msg("Failed not create UUID")
c.Data(http.StatusInternalServerError, "text/html; charset=utf-8", []byte("Failed to create UUID"))
return
}
contentId, err := uuid.NewV4()
if err != nil {
log.Error().
Str("handler", "ApplePlatformConfig").
Err(err).
Msg("Failed not create UUID")
c.Data(http.StatusInternalServerError, "text/html; charset=utf-8", []byte("Failed to create UUID"))
return
}
platformConfig := AppleMobilePlatformConfig{
UUID: contentId,
Url: h.cfg.ServerURL,
}
var payload bytes.Buffer
switch platform {
case "macos":
if err := macosTemplate.Execute(&payload, platformConfig); err != nil {
log.Error().
Str("handler", "ApplePlatformConfig").
Err(err).
Msg("Could not render Apple macOS template")
c.Data(http.StatusInternalServerError, "text/html; charset=utf-8", []byte("Could not render Apple macOS template"))
return
}
case "ios":
if err := iosTemplate.Execute(&payload, platformConfig); err != nil {
log.Error().
Str("handler", "ApplePlatformConfig").
Err(err).
Msg("Could not render Apple iOS template")
c.Data(http.StatusInternalServerError, "text/html; charset=utf-8", []byte("Could not render Apple iOS template"))
return
}
default:
c.Data(http.StatusOK, "text/html; charset=utf-8", []byte("Invalid platform, only ios and macos is supported"))
return
}
config := AppleMobileConfig{
UUID: id,
Url: h.cfg.ServerURL,
Payload: payload.String(),
}
var content bytes.Buffer
if err := commonTemplate.Execute(&content, config); err != nil {
log.Error().
Str("handler", "ApplePlatformConfig").
Err(err).
Msg("Could not render Apple platform template")
c.Data(http.StatusInternalServerError, "text/html; charset=utf-8", []byte("Could not render Apple platform template"))
return
}
c.Data(http.StatusOK, "application/x-apple-aspen-config; charset=utf-8", content.Bytes())
}
type AppleMobileConfig struct {
UUID uuid.UUID
Url string
Payload string
}
type AppleMobilePlatformConfig struct {
UUID uuid.UUID
Url string
}
var commonTemplate = template.Must(template.New("mobileconfig").Parse(`<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>PayloadUUID</key>
<string>{{.UUID}}</string>
<key>PayloadDisplayName</key>
<string>Headscale</string>
<key>PayloadDescription</key>
<string>Configure Tailscale login server to: {{.Url}}</string>
<key>PayloadIdentifier</key>
<string>com.github.juanfont.headscale</string>
<key>PayloadRemovalDisallowed</key>
<false/>
<key>PayloadType</key>
<string>Configuration</string>
<key>PayloadVersion</key>
<integer>1</integer>
<key>PayloadContent</key>
<array>
{{.Payload}}
</array>
</dict>
</plist>`))
var iosTemplate = template.Must(template.New("iosTemplate").Parse(`
<dict>
<key>PayloadType</key>
<string>io.tailscale.ipn.ios</string>
<key>PayloadUUID</key>
<string>{{.UUID}}</string>
<key>PayloadIdentifier</key>
<string>com.github.juanfont.headscale</string>
<key>PayloadVersion</key>
<integer>1</integer>
<key>PayloadEnabled</key>
<true/>
<key>ControlURL</key>
<string>{{.Url}}</string>
</dict>
`))
var macosTemplate = template.Must(template.New("macosTemplate").Parse(`
<dict>
<key>PayloadType</key>
<string>io.tailscale.ipn.macos</string>
<key>PayloadUUID</key>
<string>{{.UUID}}</string>
<key>PayloadIdentifier</key>
<string>com.github.juanfont.headscale</string>
<key>PayloadVersion</key>
<integer>1</integer>
<key>PayloadEnabled</key>
<true/>
<key>ControlURL</key>
<string>{{.Url}}</string>
</dict>
`))

17
cli.go
View File

@@ -2,9 +2,9 @@ package headscale
import (
"errors"
"log"
"tailscale.com/wgengine/wgcfg"
"gorm.io/gorm"
"tailscale.com/types/wgkey"
)
// RegisterMachine is executed from the CLI to register a new Machine using its MachineKey
@@ -13,18 +13,13 @@ func (h *Headscale) RegisterMachine(key string, namespace string) (*Machine, err
if err != nil {
return nil, err
}
mKey, err := wgcfg.ParseHexKey(key)
mKey, err := wgkey.ParseHex(key)
if err != nil {
return nil, err
}
db, err := h.db()
if err != nil {
log.Printf("Cannot open DB: %s", err)
return nil, err
}
defer db.Close()
m := Machine{}
if db.First(&m, "machine_key = ?", mKey.HexString()).RecordNotFound() {
if result := h.db.First(&m, "machine_key = ?", mKey.HexString()); errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, errors.New("Machine not found")
}
@@ -40,6 +35,6 @@ func (h *Headscale) RegisterMachine(key string, namespace string) (*Machine, err
m.NamespaceID = ns.ID
m.Registered = true
m.RegisterMethod = "cli"
db.Save(&m)
h.db.Save(&m)
return &m, nil
}

View File

@@ -8,12 +8,6 @@ func (s *Suite) TestRegisterMachine(c *check.C) {
n, err := h.CreateNamespace("test")
c.Assert(err, check.IsNil)
db, err := h.db()
if err != nil {
c.Fatal(err)
}
defer db.Close()
m := Machine{
ID: 0,
MachineKey: "8ce002a935f8c394e55e78fbbb410576575ff8ec5cfa2e627e4b807f1be15b0e",
@@ -21,8 +15,9 @@ func (s *Suite) TestRegisterMachine(c *check.C) {
DiscoKey: "faa",
Name: "testmachine",
NamespaceID: n.ID,
IPAddress: "10.0.0.1",
}
db.Save(&m)
h.db.Save(&m)
_, err = h.GetMachine("test", "testmachine")
c.Assert(err, check.IsNil)

View File

@@ -3,17 +3,26 @@ package cli
import (
"fmt"
"log"
"strconv"
"strings"
"github.com/pterm/pterm"
"github.com/spf13/cobra"
)
var NamespaceCmd = &cobra.Command{
Use: "namespace",
func init() {
rootCmd.AddCommand(namespaceCmd)
namespaceCmd.AddCommand(createNamespaceCmd)
namespaceCmd.AddCommand(listNamespacesCmd)
namespaceCmd.AddCommand(destroyNamespaceCmd)
}
var namespaceCmd = &cobra.Command{
Use: "namespaces",
Short: "Manage the namespaces of Headscale",
}
var CreateNamespaceCmd = &cobra.Command{
var createNamespaceCmd = &cobra.Command{
Use: "create NAME",
Short: "Creates a new namespace",
Args: func(cmd *cobra.Command, args []string) error {
@@ -41,7 +50,7 @@ var CreateNamespaceCmd = &cobra.Command{
},
}
var DestroyNamespaceCmd = &cobra.Command{
var destroyNamespaceCmd = &cobra.Command{
Use: "destroy NAME",
Short: "Destroys a namespace",
Args: func(cmd *cobra.Command, args []string) error {
@@ -69,7 +78,7 @@ var DestroyNamespaceCmd = &cobra.Command{
},
}
var ListNamespacesCmd = &cobra.Command{
var listNamespacesCmd = &cobra.Command{
Use: "list",
Short: "List all the namespaces",
Run: func(cmd *cobra.Command, args []string) {
@@ -87,9 +96,14 @@ var ListNamespacesCmd = &cobra.Command{
fmt.Println(err)
return
}
fmt.Printf("ID\tName\n")
d := pterm.TableData{{"ID", "Name", "Created"}}
for _, n := range *namespaces {
fmt.Printf("%d\t%s\n", n.ID, n.Name)
d = append(d, []string{strconv.FormatUint(uint64(n.ID), 10), n.Name, n.CreatedAt.Format("2006-01-02 15:04:05")})
}
err = pterm.DefaultTable.WithHasHeader().WithData(d).Render()
if err != nil {
log.Fatal(err)
}
},
}

View File

@@ -3,17 +3,42 @@ package cli
import (
"fmt"
"log"
"strconv"
"strings"
"time"
survey "github.com/AlecAivazis/survey/v2"
"github.com/juanfont/headscale"
"github.com/pterm/pterm"
"github.com/spf13/cobra"
"tailscale.com/tailcfg"
"tailscale.com/types/wgkey"
)
var RegisterCmd = &cobra.Command{
func init() {
rootCmd.AddCommand(nodeCmd)
nodeCmd.PersistentFlags().StringP("namespace", "n", "", "Namespace")
err := nodeCmd.MarkPersistentFlagRequired("namespace")
if err != nil {
log.Fatalf(err.Error())
}
nodeCmd.AddCommand(listNodesCmd)
nodeCmd.AddCommand(registerNodeCmd)
nodeCmd.AddCommand(deleteNodeCmd)
nodeCmd.AddCommand(shareMachineCmd)
}
var nodeCmd = &cobra.Command{
Use: "nodes",
Short: "Manage the nodes of Headscale",
}
var registerNodeCmd = &cobra.Command{
Use: "register machineID",
Short: "Registers a machine to your network",
Args: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return fmt.Errorf("Missing parameters")
return fmt.Errorf("missing parameters")
}
return nil
},
@@ -41,7 +66,7 @@ var RegisterCmd = &cobra.Command{
},
}
var ListNodesCmd = &cobra.Command{
var listNodesCmd = &cobra.Command{
Use: "list",
Short: "List the nodes in a given namespace",
Run: func(cmd *cobra.Command, args []string) {
@@ -55,9 +80,26 @@ var ListNodesCmd = &cobra.Command{
if err != nil {
log.Fatalf("Error initializing: %s", err)
}
namespace, err := h.GetNamespace(n)
if err != nil {
log.Fatalf("Error fetching namespace: %s", err)
}
machines, err := h.ListMachinesInNamespace(n)
if err != nil {
log.Fatalf("Error fetching machines: %s", err)
}
sharedMachines, err := h.ListSharedMachinesInNamespace(n)
if err != nil {
log.Fatalf("Error fetching shared machines: %s", err)
}
allMachines := append(*machines, *sharedMachines...)
if strings.HasPrefix(o, "json") {
JsonOutput(machines, err, o)
JsonOutput(allMachines, err, o)
return
}
@@ -65,19 +107,150 @@ var ListNodesCmd = &cobra.Command{
log.Fatalf("Error getting nodes: %s", err)
}
fmt.Printf("name\t\tlast seen\t\tephemeral\n")
for _, m := range *machines {
var ephemeral bool
if m.AuthKey != nil && m.AuthKey.Ephemeral {
ephemeral = true
}
fmt.Printf("%s\t%s\t%t\n", m.Name, m.LastSeen.Format("2006-01-02 15:04:05"), ephemeral)
d, err := nodesToPtables(*namespace, allMachines)
if err != nil {
log.Fatalf("Error converting to table: %s", err)
}
err = pterm.DefaultTable.WithHasHeader().WithData(d).Render()
if err != nil {
log.Fatal(err)
}
},
}
var NodeCmd = &cobra.Command{
Use: "node",
Short: "Manage the nodes of Headscale",
var deleteNodeCmd = &cobra.Command{
Use: "delete ID",
Short: "Delete a node",
Args: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return fmt.Errorf("missing parameters")
}
return nil
},
Run: func(cmd *cobra.Command, args []string) {
h, err := getHeadscaleApp()
if err != nil {
log.Fatalf("Error initializing: %s", err)
}
id, err := strconv.Atoi(args[0])
if err != nil {
log.Fatalf("Error converting ID to integer: %s", err)
}
m, err := h.GetMachineByID(uint64(id))
if err != nil {
log.Fatalf("Error getting node: %s", err)
}
confirm := false
prompt := &survey.Confirm{
Message: fmt.Sprintf("Do you want to remove the node %s?", m.Name),
}
err = survey.AskOne(prompt, &confirm)
if err != nil {
return
}
if confirm {
err = h.DeleteMachine(m)
if err != nil {
log.Fatalf("Error deleting node: %s", err)
}
fmt.Printf("Node deleted\n")
} else {
fmt.Printf("Node not deleted\n")
}
},
}
var shareMachineCmd = &cobra.Command{
Use: "share ID namespace",
Short: "Shares a node from the current namespace to the specified one",
Args: func(cmd *cobra.Command, args []string) error {
if len(args) < 2 {
return fmt.Errorf("missing parameters")
}
return nil
},
Run: func(cmd *cobra.Command, args []string) {
namespace, err := cmd.Flags().GetString("namespace")
if err != nil {
log.Fatalf("Error getting namespace: %s", err)
}
output, _ := cmd.Flags().GetString("output")
h, err := getHeadscaleApp()
if err != nil {
log.Fatalf("Error initializing: %s", err)
}
_, err = h.GetNamespace(namespace)
if err != nil {
log.Fatalf("Error fetching origin namespace: %s", err)
}
destinationNamespace, err := h.GetNamespace(args[1])
if err != nil {
log.Fatalf("Error fetching destination namespace: %s", err)
}
id, err := strconv.Atoi(args[0])
if err != nil {
log.Fatalf("Error converting ID to integer: %s", err)
}
machine, err := h.GetMachineByID(uint64(id))
if err != nil {
log.Fatalf("Error getting node: %s", err)
}
err = h.AddSharedMachineToNamespace(machine, destinationNamespace)
if strings.HasPrefix(output, "json") {
JsonOutput(map[string]string{"Result": "Node shared"}, err, output)
return
}
if err != nil {
fmt.Printf("Error sharing node: %s\n", err)
return
}
fmt.Println("Node shared!")
},
}
func nodesToPtables(currentNamespace headscale.Namespace, machines []headscale.Machine) (pterm.TableData, error) {
d := pterm.TableData{{"ID", "Name", "NodeKey", "Namespace", "IP address", "Ephemeral", "Last seen", "Online"}}
for _, machine := range machines {
var ephemeral bool
if machine.AuthKey != nil && machine.AuthKey.Ephemeral {
ephemeral = true
}
var lastSeen time.Time
var lastSeenTime string
if machine.LastSeen != nil {
lastSeen = *machine.LastSeen
lastSeenTime = lastSeen.Format("2006-01-02 15:04:05")
}
nKey, err := wgkey.ParseHex(machine.NodeKey)
if err != nil {
return nil, err
}
nodeKey := tailcfg.NodeKey(nKey)
var online string
if lastSeen.After(time.Now().Add(-5 * time.Minute)) { // TODO: Find a better way to reliably show if online
online = pterm.LightGreen("true")
} else {
online = pterm.LightRed("false")
}
var namespace string
if currentNamespace.ID == machine.NamespaceID {
namespace = pterm.LightMagenta(machine.Namespace.Name)
} else {
namespace = pterm.LightYellow(machine.Namespace.Name)
}
d = append(d, []string{strconv.FormatUint(machine.ID, 10), machine.Name, nodeKey.ShortString(), namespace, machine.IPAddress, strconv.FormatBool(ephemeral), lastSeenTime, online})
}
return d, nil
}

View File

@@ -3,19 +3,36 @@ package cli
import (
"fmt"
"log"
"strconv"
"strings"
"time"
"github.com/hako/durafmt"
"github.com/pterm/pterm"
"github.com/spf13/cobra"
)
var PreauthkeysCmd = &cobra.Command{
Use: "preauthkey",
func init() {
rootCmd.AddCommand(preauthkeysCmd)
preauthkeysCmd.PersistentFlags().StringP("namespace", "n", "", "Namespace")
err := preauthkeysCmd.MarkPersistentFlagRequired("namespace")
if err != nil {
log.Fatalf(err.Error())
}
preauthkeysCmd.AddCommand(listPreAuthKeys)
preauthkeysCmd.AddCommand(createPreAuthKeyCmd)
preauthkeysCmd.AddCommand(expirePreAuthKeyCmd)
createPreAuthKeyCmd.PersistentFlags().Bool("reusable", false, "Make the preauthkey reusable")
createPreAuthKeyCmd.PersistentFlags().Bool("ephemeral", false, "Preauthkey for ephemeral nodes")
createPreAuthKeyCmd.Flags().StringP("expiration", "e", "", "Human-readable expiration of the key (30m, 24h, 365d...)")
}
var preauthkeysCmd = &cobra.Command{
Use: "preauthkeys",
Short: "Handle the preauthkeys in Headscale",
}
var ListPreAuthKeys = &cobra.Command{
var listPreAuthKeys = &cobra.Command{
Use: "list",
Short: "List the preauthkeys for this namespace",
Run: func(cmd *cobra.Command, args []string) {
@@ -39,25 +56,39 @@ var ListPreAuthKeys = &cobra.Command{
fmt.Printf("Error getting the list of keys: %s\n", err)
return
}
d := pterm.TableData{{"ID", "Key", "Reusable", "Ephemeral", "Expiration", "Created"}}
for _, k := range *keys {
expiration := "-"
if k.Expiration != nil {
expiration = k.Expiration.Format("2006-01-02 15:04:05")
}
fmt.Printf(
"key: %s, namespace: %s, reusable: %v, ephemeral: %v, expiration: %s, created_at: %s\n",
var reusable string
if k.Ephemeral {
reusable = "N/A"
} else {
reusable = fmt.Sprintf("%v", k.Reusable)
}
d = append(d, []string{
strconv.FormatUint(k.ID, 10),
k.Key,
k.Namespace.Name,
k.Reusable,
k.Ephemeral,
reusable,
strconv.FormatBool(k.Ephemeral),
expiration,
k.CreatedAt.Format("2006-01-02 15:04:05"),
)
})
}
err = pterm.DefaultTable.WithHasHeader().WithData(d).Render()
if err != nil {
log.Fatal(err)
}
},
}
var CreatePreAuthKeyCmd = &cobra.Command{
var createPreAuthKeyCmd = &cobra.Command{
Use: "create",
Short: "Creates a new preauthkey in the specified namespace",
Run: func(cmd *cobra.Command, args []string) {
@@ -94,6 +125,45 @@ var CreatePreAuthKeyCmd = &cobra.Command{
fmt.Println(err)
return
}
fmt.Printf("Key: %s\n", k.Key)
fmt.Printf("%s\n", k.Key)
},
}
var expirePreAuthKeyCmd = &cobra.Command{
Use: "expire",
Short: "Expire a preauthkey",
Args: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return fmt.Errorf("missing parameters")
}
return nil
},
Run: func(cmd *cobra.Command, args []string) {
n, err := cmd.Flags().GetString("namespace")
if err != nil {
log.Fatalf("Error getting namespace: %s", err)
}
o, _ := cmd.Flags().GetString("output")
h, err := getHeadscaleApp()
if err != nil {
log.Fatalf("Error initializing: %s", err)
}
k, err := h.GetPreAuthKey(n, args[0])
if err != nil {
log.Fatalf("Error getting the key: %s", err)
}
err = h.MarkExpirePreAuthKey(k)
if strings.HasPrefix(o, "json") {
JsonOutput(k, err, o)
return
}
if err != nil {
fmt.Println(err)
return
}
fmt.Println("Expired")
},
}

28
cmd/headscale/cli/root.go Normal file
View File

@@ -0,0 +1,28 @@
package cli
import (
"fmt"
"os"
"github.com/spf13/cobra"
)
func init() {
rootCmd.PersistentFlags().StringP("output", "o", "", "Output format. Empty for human-readable, 'json' or 'json-line'")
}
var rootCmd = &cobra.Command{
Use: "headscale",
Short: "headscale - a Tailscale control server",
Long: `
headscale is an open source implementation of the Tailscale control server
https://github.com/juanfont/headscale`,
}
func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -5,15 +5,30 @@ import (
"log"
"strings"
"github.com/pterm/pterm"
"github.com/spf13/cobra"
)
var RoutesCmd = &cobra.Command{
func init() {
rootCmd.AddCommand(routesCmd)
routesCmd.PersistentFlags().StringP("namespace", "n", "", "Namespace")
err := routesCmd.MarkPersistentFlagRequired("namespace")
if err != nil {
log.Fatalf(err.Error())
}
enableRouteCmd.Flags().BoolP("all", "a", false, "Enable all routes advertised by the node")
routesCmd.AddCommand(listRoutesCmd)
routesCmd.AddCommand(enableRouteCmd)
}
var routesCmd = &cobra.Command{
Use: "routes",
Short: "Manage the routes of Headscale",
}
var ListRoutesCmd = &cobra.Command{
var listRoutesCmd = &cobra.Command{
Use: "list NODE",
Short: "List the routes exposed by this node",
Args: func(cmd *cobra.Command, args []string) error {
@@ -33,52 +48,100 @@ var ListRoutesCmd = &cobra.Command{
if err != nil {
log.Fatalf("Error initializing: %s", err)
}
routes, err := h.GetNodeRoutes(n, args[0])
if strings.HasPrefix(o, "json") {
JsonOutput(routes, err, o)
return
}
availableRoutes, err := h.GetAdvertisedNodeRoutes(n, args[0])
if err != nil {
fmt.Println(err)
return
}
fmt.Println(routes)
if strings.HasPrefix(o, "json") {
// TODO: Add enable/disabled information to this interface
JsonOutput(availableRoutes, err, o)
return
}
d := h.RoutesToPtables(n, args[0], *availableRoutes)
err = pterm.DefaultTable.WithHasHeader().WithData(d).Render()
if err != nil {
log.Fatal(err)
}
},
}
var EnableRouteCmd = &cobra.Command{
var enableRouteCmd = &cobra.Command{
Use: "enable node-name route",
Short: "Allows exposing a route declared by this node to the rest of the nodes",
Args: func(cmd *cobra.Command, args []string) error {
if len(args) < 2 {
return fmt.Errorf("Missing parameters")
all, err := cmd.Flags().GetBool("all")
if err != nil {
log.Fatalf("Error getting namespace: %s", err)
}
if all {
if len(args) < 1 {
return fmt.Errorf("Missing parameters")
}
return nil
} else {
if len(args) < 2 {
return fmt.Errorf("Missing parameters")
}
return nil
}
return nil
},
Run: func(cmd *cobra.Command, args []string) {
n, err := cmd.Flags().GetString("namespace")
if err != nil {
log.Fatalf("Error getting namespace: %s", err)
}
o, _ := cmd.Flags().GetString("output")
all, err := cmd.Flags().GetBool("all")
if err != nil {
log.Fatalf("Error getting namespace: %s", err)
}
h, err := getHeadscaleApp()
if err != nil {
log.Fatalf("Error initializing: %s", err)
}
route, err := h.EnableNodeRoute(n, args[0], args[1])
if strings.HasPrefix(o, "json") {
JsonOutput(route, err, o)
return
}
if err != nil {
fmt.Println(err)
return
if all {
availableRoutes, err := h.GetAdvertisedNodeRoutes(n, args[0])
if err != nil {
fmt.Println(err)
return
}
for _, availableRoute := range *availableRoutes {
err = h.EnableNodeRoute(n, args[0], availableRoute.String())
if err != nil {
fmt.Println(err)
return
}
if strings.HasPrefix(o, "json") {
JsonOutput(availableRoute, err, o)
} else {
fmt.Printf("Enabled route %s\n", availableRoute)
}
}
} else {
err = h.EnableNodeRoute(n, args[0], args[1])
if strings.HasPrefix(o, "json") {
JsonOutput(args[1], err, o)
return
}
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("Enabled route %s\n", args[1])
}
fmt.Printf("Enabled route %s\n", route)
},
}

View File

@@ -6,7 +6,11 @@ import (
"github.com/spf13/cobra"
)
var ServeCmd = &cobra.Command{
func init() {
rootCmd.AddCommand(serveCmd)
}
var serveCmd = &cobra.Command{
Use: "serve",
Short: "Launches the headscale server",
Args: func(cmd *cobra.Command, args []string) error {
@@ -17,7 +21,7 @@ var ServeCmd = &cobra.Command{
if err != nil {
log.Fatalf("Error initializing: %s", err)
}
go h.ExpireEphemeralNodes(5000)
err = h.Serve()
if err != nil {
log.Fatalf("Error initializing: %s", err)

View File

@@ -5,16 +5,18 @@ import (
"errors"
"fmt"
"io"
"log"
"os"
"path/filepath"
"strings"
"time"
"github.com/juanfont/headscale"
"github.com/rs/zerolog/log"
"github.com/spf13/viper"
"gopkg.in/yaml.v2"
"inet.af/netaddr"
"tailscale.com/tailcfg"
"tailscale.com/types/dnstype"
)
type ErrorOutput struct {
@@ -36,6 +38,12 @@ func LoadConfig(path string) error {
viper.SetDefault("tls_letsencrypt_cache_dir", "/var/www/.cache")
viper.SetDefault("tls_letsencrypt_challenge_type", "HTTP-01")
viper.SetDefault("ip_prefix", "100.64.0.0/10")
viper.SetDefault("log_level", "info")
viper.SetDefault("dns_config", nil)
err := viper.ReadInConfig()
if err != nil {
return fmt.Errorf("Fatal error reading config file: %s \n", err)
@@ -48,7 +56,9 @@ func LoadConfig(path string) error {
}
if (viper.GetString("tls_letsencrypt_hostname") != "") && (viper.GetString("tls_letsencrypt_challenge_type") == "TLS-ALPN-01") && (!strings.HasSuffix(viper.GetString("listen_addr"), ":443")) {
errorText += "Fatal config error: when using tls_letsencrypt_hostname with TLS-ALPN-01 as challenge type, listen_addr must end in :443\n"
// this is only a warning because there could be something sitting in front of headscale that redirects the traffic (e.g. an iptables rule)
log.Warn().
Msg("Warning: when using tls_letsencrypt_hostname with TLS-ALPN-01 as challenge type, headscale must be reachable on port 443, i.e. listen_addr should probably end in :443")
}
if (viper.GetString("tls_letsencrypt_challenge_type") != "HTTP-01") && (viper.GetString("tls_letsencrypt_challenge_type") != "TLS-ALPN-01") {
@@ -63,6 +73,45 @@ func LoadConfig(path string) error {
} else {
return nil
}
}
func GetDNSConfig() *tailcfg.DNSConfig {
if viper.IsSet("dns_config") {
dnsConfig := &tailcfg.DNSConfig{}
if viper.IsSet("dns_config.nameservers") {
nameserversStr := viper.GetStringSlice("dns_config.nameservers")
nameservers := make([]netaddr.IP, len(nameserversStr))
resolvers := make([]dnstype.Resolver, len(nameserversStr))
for index, nameserverStr := range nameserversStr {
nameserver, err := netaddr.ParseIP(nameserverStr)
if err != nil {
log.Error().
Str("func", "getDNSConfig").
Err(err).
Msgf("Could not parse nameserver IP: %s", nameserverStr)
}
nameservers[index] = nameserver
resolvers[index] = dnstype.Resolver{
Addr: nameserver.String(),
}
}
dnsConfig.Nameservers = nameservers
dnsConfig.Resolvers = resolvers
}
if viper.IsSet("dns_config.domains") {
dnsConfig.Domains = viper.GetStringSlice("dns_config.domains")
}
return dnsConfig
}
return nil
}
func absPath(path string) string {
@@ -78,9 +127,13 @@ func absPath(path string) string {
}
func getHeadscaleApp() (*headscale.Headscale, error) {
derpMap, err := loadDerpMap(absPath(viper.GetString("derp_map_path")))
derpPath := absPath(viper.GetString("derp_map_path"))
derpMap, err := loadDerpMap(derpPath)
if err != nil {
log.Printf("Could not load DERP servers map file: %s", err)
log.Error().
Str("path", derpPath).
Err(err).
Msg("Could not load DERP servers map file")
}
// Minimum inactivity time out is keepalive timeout (60s) plus a few seconds
@@ -96,6 +149,7 @@ func getHeadscaleApp() (*headscale.Headscale, error) {
Addr: viper.GetString("listen_addr"),
PrivateKeyPath: absPath(viper.GetString("private_key_path")),
DerpMap: derpMap,
IPPrefix: netaddr.MustParseIPPrefix(viper.GetString("ip_prefix")),
EphemeralNodeInactivityTimeout: viper.GetDuration("ephemeral_node_inactivity_timeout"),
@@ -108,17 +162,34 @@ func getHeadscaleApp() (*headscale.Headscale, error) {
DBpass: viper.GetString("db_pass"),
TLSLetsEncryptHostname: viper.GetString("tls_letsencrypt_hostname"),
TLSLetsEncryptListen: viper.GetString("tls_letsencrypt_listen"),
TLSLetsEncryptCacheDir: absPath(viper.GetString("tls_letsencrypt_cache_dir")),
TLSLetsEncryptChallengeType: viper.GetString("tls_letsencrypt_challenge_type"),
TLSCertPath: absPath(viper.GetString("tls_cert_path")),
TLSKeyPath: absPath(viper.GetString("tls_key_path")),
DNSConfig: GetDNSConfig(),
}
h, err := headscale.NewHeadscale(cfg)
if err != nil {
return nil, err
}
// We are doing this here, as in the future could be cool to have it also hot-reload
if viper.GetString("acl_policy_path") != "" {
aclPath := absPath(viper.GetString("acl_policy_path"))
err = h.LoadACLPolicy(aclPath)
if err != nil {
log.Error().
Str("path", aclPath).
Err(err).
Msg("Could not load the ACL policy")
}
}
return h, nil
}
@@ -145,24 +216,24 @@ func JsonOutput(result interface{}, errResult error, outputFormat string) {
if errResult != nil {
j, err = json.MarshalIndent(ErrorOutput{errResult.Error()}, "", "\t")
if err != nil {
log.Fatalln(err)
log.Fatal().Err(err)
}
} else {
j, err = json.MarshalIndent(result, "", "\t")
if err != nil {
log.Fatalln(err)
log.Fatal().Err(err)
}
}
case "json-line":
if errResult != nil {
j, err = json.Marshal(ErrorOutput{errResult.Error()})
if err != nil {
log.Fatalln(err)
log.Fatal().Err(err)
}
} else {
j, err = json.Marshal(result)
if err != nil {
log.Fatalln(err)
log.Fatal().Err(err)
}
}
}

View File

@@ -0,0 +1,27 @@
package cli
import (
"fmt"
"github.com/spf13/cobra"
"strings"
)
var version = "dev"
func init() {
rootCmd.AddCommand(versionCmd)
}
var versionCmd = &cobra.Command{
Use: "version",
Short: "Print the version.",
Long: "The version of headscale.",
Run: func(cmd *cobra.Command, args []string) {
o, _ := cmd.Flags().GetString("output")
if strings.HasPrefix(o, "json") {
JsonOutput(map[string]string{"version": version}, nil, o)
return
}
fmt.Println(version)
},
}

View File

@@ -1,93 +1,63 @@
package main
import (
"fmt"
"log"
"os"
"strings"
"time"
"github.com/efekarakus/termcolor"
"github.com/juanfont/headscale/cmd/headscale/cli"
"github.com/spf13/cobra"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/spf13/viper"
)
var version = "dev"
var versionCmd = &cobra.Command{
Use: "version",
Short: "Print the version.",
Long: "The version of headscale.",
Run: func(cmd *cobra.Command, args []string) {
o, _ := cmd.Flags().GetString("output")
if strings.HasPrefix(o, "json") {
cli.JsonOutput(map[string]string{"version": version}, nil, o)
return
}
fmt.Println(version)
},
}
var headscaleCmd = &cobra.Command{
Use: "headscale",
Short: "headscale - a Tailscale control server",
Long: `
headscale is an open source implementation of the Tailscale control server
Juan Font Alonso <juanfontalonso@gmail.com> - 2021
https://gitlab.com/juanfont/headscale`,
}
func main() {
var colors bool
switch l := termcolor.SupportLevel(os.Stderr); l {
case termcolor.Level16M:
colors = true
case termcolor.Level256:
colors = true
case termcolor.LevelBasic:
colors = true
default:
// no color, return text as is.
colors = false
}
// Adhere to no-color.org manifesto of allowing users to
// turn off color in cli/services
if _, noColorIsSet := os.LookupEnv("NO_COLOR"); noColorIsSet {
colors = false
}
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
log.Logger = log.Output(zerolog.ConsoleWriter{
Out: os.Stdout,
TimeFormat: time.RFC3339,
NoColor: !colors,
})
err := cli.LoadConfig("")
if err != nil {
log.Fatalf(err.Error())
log.Fatal().Err(err)
}
headscaleCmd.AddCommand(cli.NamespaceCmd)
headscaleCmd.AddCommand(cli.NodeCmd)
headscaleCmd.AddCommand(cli.PreauthkeysCmd)
headscaleCmd.AddCommand(cli.RoutesCmd)
headscaleCmd.AddCommand(cli.ServeCmd)
headscaleCmd.AddCommand(versionCmd)
cli.NodeCmd.PersistentFlags().StringP("namespace", "n", "", "Namespace")
err = cli.NodeCmd.MarkPersistentFlagRequired("namespace")
if err != nil {
log.Fatalf(err.Error())
logLevel := viper.GetString("log_level")
switch logLevel {
case "trace":
zerolog.SetGlobalLevel(zerolog.TraceLevel)
case "debug":
zerolog.SetGlobalLevel(zerolog.DebugLevel)
case "info":
zerolog.SetGlobalLevel(zerolog.InfoLevel)
case "warn":
zerolog.SetGlobalLevel(zerolog.WarnLevel)
case "error":
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
default:
zerolog.SetGlobalLevel(zerolog.DebugLevel)
}
cli.PreauthkeysCmd.PersistentFlags().StringP("namespace", "n", "", "Namespace")
err = cli.PreauthkeysCmd.MarkPersistentFlagRequired("namespace")
if err != nil {
log.Fatalf(err.Error())
}
cli.RoutesCmd.PersistentFlags().StringP("namespace", "n", "", "Namespace")
err = cli.RoutesCmd.MarkPersistentFlagRequired("namespace")
if err != nil {
log.Fatalf(err.Error())
}
cli.NamespaceCmd.AddCommand(cli.CreateNamespaceCmd)
cli.NamespaceCmd.AddCommand(cli.ListNamespacesCmd)
cli.NamespaceCmd.AddCommand(cli.DestroyNamespaceCmd)
cli.NodeCmd.AddCommand(cli.ListNodesCmd)
cli.NodeCmd.AddCommand(cli.RegisterCmd)
cli.RoutesCmd.AddCommand(cli.ListRoutesCmd)
cli.RoutesCmd.AddCommand(cli.EnableRouteCmd)
cli.PreauthkeysCmd.AddCommand(cli.ListPreAuthKeys)
cli.PreauthkeysCmd.AddCommand(cli.CreatePreAuthKeyCmd)
cli.CreatePreAuthKeyCmd.PersistentFlags().Bool("reusable", false, "Make the preauthkey reusable")
cli.CreatePreAuthKeyCmd.PersistentFlags().Bool("ephemeral", false, "Preauthkey for ephemeral nodes")
cli.CreatePreAuthKeyCmd.Flags().StringP("expiration", "e", "", "Human-readable expiration of the key (30m, 24h, 365d...)")
headscaleCmd.PersistentFlags().StringP("output", "o", "", "Output format. Empty for human-readable, 'json' or 'json-line'")
if err := headscaleCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(-1)
}
cli.Execute()
}

View File

@@ -51,13 +51,14 @@ func (*Suite) TestPostgresConfigLoading(c *check.C) {
c.Assert(err, check.IsNil)
// Test that config file was interpreted correctly
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8000")
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8000")
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8080")
c.Assert(viper.GetString("derp_map_path"), check.Equals, "derp.yaml")
c.Assert(viper.GetString("db_type"), check.Equals, "postgres")
c.Assert(viper.GetString("db_port"), check.Equals, "5432")
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
c.Assert(viper.GetStringSlice("dns_config.nameservers")[0], check.Equals, "1.1.1.1")
}
func (*Suite) TestSqliteConfigLoading(c *check.C) {
@@ -83,13 +84,45 @@ func (*Suite) TestSqliteConfigLoading(c *check.C) {
c.Assert(err, check.IsNil)
// Test that config file was interpreted correctly
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8000")
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8000")
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8080")
c.Assert(viper.GetString("derp_map_path"), check.Equals, "derp.yaml")
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite3")
c.Assert(viper.GetString("db_path"), check.Equals, "db.sqlite")
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
c.Assert(viper.GetStringSlice("dns_config.nameservers")[0], check.Equals, "1.1.1.1")
}
func (*Suite) TestDNSConfigLoading(c *check.C) {
tmpDir, err := ioutil.TempDir("", "headscale")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
path, err := os.Getwd()
if err != nil {
c.Fatal(err)
}
// Symlink the example config file
err = os.Symlink(filepath.Clean(path+"/../../config.json.sqlite.example"), filepath.Join(tmpDir, "config.json"))
if err != nil {
c.Fatal(err)
}
// Load example config, it should load without validation errors
err = cli.LoadConfig(tmpDir)
c.Assert(err, check.IsNil)
dnsConfig := cli.GetDNSConfig()
fmt.Println(dnsConfig)
c.Assert(dnsConfig.Nameservers[0].String(), check.Equals, "1.1.1.1")
c.Assert(dnsConfig.Resolvers[0].Addr, check.Equals, "1.1.1.1")
}
func writeConfig(c *check.C, tmpDir string, configYaml []byte) {
@@ -123,9 +156,8 @@ func (*Suite) TestTLSConfigValidation(c *check.C) {
fmt.Println(tmp)
// Check configuration validation errors (2)
configYaml = []byte("---\nserver_url: \"http://127.0.0.1:8000\"\ntls_letsencrypt_hostname: \"example.com\"\ntls_letsencrypt_challenge_type: \"TLS-ALPN-01\"")
configYaml = []byte("---\nserver_url: \"http://127.0.0.1:8080\"\ntls_letsencrypt_hostname: \"example.com\"\ntls_letsencrypt_challenge_type: \"TLS-ALPN-01\"")
writeConfig(c, tmpDir, configYaml)
err = cli.LoadConfig(tmpDir)
c.Assert(err, check.NotNil)
c.Assert(err, check.ErrorMatches, "Fatal config error: when using tls_letsencrypt_hostname with TLS-ALPN-01 as challenge type, listen_addr must end in :443.*")
c.Assert(err, check.IsNil)
}

View File

@@ -1,6 +1,6 @@
{
"server_url": "http://127.0.0.1:8000",
"listen_addr": "0.0.0.0:8000",
"server_url": "http://127.0.0.1:8080",
"listen_addr": "0.0.0.0:8080",
"private_key_path": "private.key",
"derp_map_path": "derp.yaml",
"ephemeral_node_inactivity_timeout": "30m",
@@ -11,8 +11,15 @@
"db_user": "foo",
"db_pass": "bar",
"tls_letsencrypt_hostname": "",
"tls_letsencrypt_listen": ":http",
"tls_letsencrypt_cache_dir": ".cache",
"tls_letsencrypt_challenge_type": "HTTP-01",
"tls_cert_path": "",
"tls_key_path": ""
"tls_key_path": "",
"acl_policy_path": "",
"dns_config": {
"nameservers": [
"1.1.1.1"
]
}
}

View File

@@ -1,14 +1,21 @@
{
"server_url": "http://127.0.0.1:8000",
"listen_addr": "0.0.0.0:8000",
"server_url": "http://127.0.0.1:8080",
"listen_addr": "0.0.0.0:8080",
"private_key_path": "private.key",
"derp_map_path": "derp.yaml",
"ephemeral_node_inactivity_timeout": "30m",
"db_type": "sqlite3",
"db_path": "db.sqlite",
"tls_letsencrypt_hostname": "",
"tls_letsencrypt_listen": ":http",
"tls_letsencrypt_cache_dir": ".cache",
"tls_letsencrypt_challenge_type": "HTTP-01",
"tls_cert_path": "",
"tls_key_path": ""
"tls_key_path": "",
"acl_policy_path": "",
"dns_config": {
"nameservers": [
"1.1.1.1"
]
}
}

88
db.go
View File

@@ -3,9 +3,10 @@ package headscale
import (
"errors"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/postgres" // sql driver
_ "github.com/jinzhu/gorm/dialects/sqlite" // sql driver
"gorm.io/driver/postgres"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
"gorm.io/gorm/logger"
)
const dbVersion = "1"
@@ -17,63 +18,94 @@ type KV struct {
}
func (h *Headscale) initDB() error {
db, err := gorm.Open(h.dbType, h.dbString)
db, err := h.openDB()
if err != nil {
return err
}
h.db = db
if h.dbType == "postgres" {
db.Exec("create extension if not exists \"uuid-ossp\";")
}
db.AutoMigrate(&Machine{})
db.AutoMigrate(&KV{})
db.AutoMigrate(&Namespace{})
db.AutoMigrate(&PreAuthKey{})
db.Close()
err = db.AutoMigrate(&Machine{})
if err != nil {
return err
}
err = db.AutoMigrate(&KV{})
if err != nil {
return err
}
err = db.AutoMigrate(&Namespace{})
if err != nil {
return err
}
err = db.AutoMigrate(&PreAuthKey{})
if err != nil {
return err
}
err = db.AutoMigrate(&SharedMachine{})
if err != nil {
return err
}
err = h.setValue("db_version", dbVersion)
return err
}
func (h *Headscale) db() (*gorm.DB, error) {
db, err := gorm.Open(h.dbType, h.dbString)
func (h *Headscale) openDB() (*gorm.DB, error) {
var db *gorm.DB
var err error
var log logger.Interface
if h.dbDebug {
log = logger.Default
} else {
log = logger.Default.LogMode(logger.Silent)
}
switch h.dbType {
case "sqlite3":
db, err = gorm.Open(sqlite.Open(h.dbString), &gorm.Config{
DisableForeignKeyConstraintWhenMigrating: true,
Logger: log,
})
case "postgres":
db, err = gorm.Open(postgres.Open(h.dbString), &gorm.Config{
DisableForeignKeyConstraintWhenMigrating: true,
Logger: log,
})
}
if err != nil {
return nil, err
}
if h.dbDebug {
db.LogMode(true)
}
return db, nil
}
// getValue returns the value for the given key in KV
func (h *Headscale) getValue(key string) (string, error) {
db, err := h.db()
if err != nil {
return "", err
}
defer db.Close()
var row KV
if db.First(&row, "key = ?", key).RecordNotFound() {
if result := h.db.First(&row, "key = ?", key); errors.Is(result.Error, gorm.ErrRecordNotFound) {
return "", errors.New("not found")
}
return row.Value, nil
}
// setValue sets value for the given key in KV
func (h *Headscale) setValue(key string, value string) error {
kv := KV{
Key: key,
Value: value,
}
db, err := h.db()
if err != nil {
return err
}
defer db.Close()
_, err = h.getValue(key)
_, err := h.getValue(key)
if err == nil {
db.Model(&kv).Where("key = ?", key).Update("value", value)
h.db.Model(&kv).Where("key = ?", key).Update("value", value)
return nil
}
db.Create(kv)
h.db.Create(kv)
return nil
}

View File

@@ -1,7 +1,7 @@
# This file contains some of the official Tailscale DERP servers,
# shamelessly taken from https://github.com/tailscale/tailscale/blob/main/derp/derpmap/derpmap.go
# shamelessly taken from https://github.com/tailscale/tailscale/blob/main/net/dnsfallback/dns-fallback-servers.json
#
# If you plan to somehow use headscale, please deploy your own DERP infra
# If you plan to somehow use headscale, please deploy your own DERP infra: https://tailscale.com/kb/1118/custom-derp-servers/
regions:
1:
regionid: 1
@@ -16,6 +16,14 @@ regions:
stunport: 0
stunonly: false
derptestport: 0
- name: 1b
regionid: 1
hostname: derp1b.tailscale.com
ipv4: 45.55.35.93
ipv6: "2604:a880:800:a1::f:2001"
stunport: 0
stunonly: false
derptestport: 0
2:
regionid: 2
regioncode: sfo
@@ -29,6 +37,14 @@ regions:
stunport: 0
stunonly: false
derptestport: 0
- name: 2b
regionid: 2
hostname: derp2b.tailscale.com
ipv4: 64.227.106.23
ipv6: "2604:a880:4:1d0::29:9000"
stunport: 0
stunonly: false
derptestport: 0
3:
regionid: 3
regioncode: sin
@@ -54,4 +70,77 @@ regions:
ipv6: "2a03:b0c0:3:e0::36e:900"
stunport: 0
stunonly: false
derptestport: 0
derptestport: 0
- name: 4b
regionid: 4
hostname: derp4b.tailscale.com
ipv4: 157.230.25.0
ipv6: "2a03:b0c0:3:e0::58f:3001"
stunport: 0
stunonly: false
derptestport: 0
5:
regionid: 5
regioncode: syd
regionname: Sydney
nodes:
- name: 5a
regionid: 5
hostname: derp5.tailscale.com
ipv4: 103.43.75.49
ipv6: "2001:19f0:5801:10b7:5400:2ff:feaa:284c"
stunport: 0
stunonly: false
derptestport: 0
6:
regionid: 6
regioncode: blr
regionname: Bangalore
nodes:
- name: 6a
regionid: 6
hostname: derp6.tailscale.com
ipv4: 68.183.90.120
ipv6: "2400:6180:100:d0::982:d001"
stunport: 0
stunonly: false
derptestport: 0
7:
regionid: 7
regioncode: tok
regionname: Tokyo
nodes:
- name: 7a
regionid: 7
hostname: derp7.tailscale.com
ipv4: 167.179.89.145
ipv6: "2401:c080:1000:467f:5400:2ff:feee:22aa"
stunport: 0
stunonly: false
derptestport: 0
8:
regionid: 8
regioncode: lhr
regionname: London
nodes:
- name: 8a
regionid: 8
hostname: derp8.tailscale.com
ipv4: 167.71.139.179
ipv6: "2a03:b0c0:1:e0::3cc:e001"
stunport: 0
stunonly: false
derptestport: 0
9:
regionid: 9
regioncode: sao
regionname: São Paulo
nodes:
- name: 9a
regionid: 9
hostname: derp9.tailscale.com
ipv4: 207.148.3.137
ipv6: "2001:19f0:6401:1d9c:5400:2ff:feef:bb82"
stunport: 0
stunonly: false
derptestport: 0

View File

@@ -1,62 +0,0 @@
FROM golang:alpine
# Set necessary environmet variables needed for our image
ENV GO111MODULE=on \
CGO_ENABLED=0 \
GOOS=linux \
GOARCH=amd64
ENV PATH /usr/lib/postgresql/$PG_MAJOR/bin:$PATH
ENV PGDATA /var/lib/postgresql/data
ENV POSTGRES_DB headscale
ENV POSTGRES_USER admin
ENV LANG en_US.utf8
RUN apk update && \
apk add git su-exec tzdata libpq postgresql-client postgresql postgresql-contrib gnupg supervisor inotify-tools wireguard-tools openssh && \
mkdir /docker-entrypoint-initdb.d && \
rm -rf /var/cache/apk/*
RUN gpg --keyserver ipv4.pool.sks-keyservers.net --recv-keys B42F6819007F00F88E364FD4036A9C25BF357DD4
RUN gpg --list-keys --fingerprint --with-colons | sed -E -n -e 's/^fpr:::::::::([0-9A-F]+):$/\1:6:/p' | gpg --import-ownertrust
RUN wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/1.7/gosu-amd64" && \
wget -O /usr/local/bin/gosu.asc "https://github.com/tianon/gosu/releases/download/1.7/gosu-amd64.asc" && \
gpg --verify /usr/local/bin/gosu.asc && \
rm /usr/local/bin/gosu.asc && \
chmod +x /usr/local/bin/gosu
RUN apk --purge del gnupg ca-certificates
VOLUME /var/lib/postgresql/data
RUN rm -rf /etc/ssh/ssh_host_rsa_key /etc/ssh/ssh_host_dsa_key
WORKDIR /build
RUN git clone https://github.com/juanfont/headscale.git
WORKDIR /build/headscale
RUN go build cmd/headscale/headscale.go
COPY headscale.sh /headscale.sh
COPY postgres.sh /postgres.sh
COPY supervisord.conf /etc/supervisord.conf
WORKDIR /
RUN mkdir -p /run/postgresql
RUN chown postgres:postgres /run/postgresql
RUN adduser -S headscale
#ENV GIN_MODE release
EXPOSE 8000
CMD ["supervisord","--nodaemon", "--configuration", "/etc/supervisord.conf"]

View File

@@ -1,28 +0,0 @@
#!/bin/bash
cd /build/headscale
echo 'Writing config...'
echo '''
{
"server_url": "$SERVER_URL",
"listen_addr": "0.0.0.0:8000",
"private_key_path": "private.key",
"public_key_path": "public.key",
"db_host": "localhost",
"db_port": 5432,
"db_name": "headscale",
"db_user": "admin",
"db_pass": "$POSTGRES_PASSWORD"
}
''' > config.json
# Wait until PostgreSQL started and listens on port 5432.
while [ -z "`netstat -tln | grep 5432`" ]; do
echo 'Waiting for PostgreSQL to start ...'
sleep 1
done
echo 'PostgreSQL started.'
# Start server.
echo 'Starting server...'
./headscale

View File

@@ -1,58 +0,0 @@
#!/bin/sh
chown -R postgres "$PGDATA"
if [ -z "$(ls -A "$PGDATA")" ]; then
gosu postgres initdb
sed -ri "s/^#(listen_addresses\s*=\s*)\S+/\1'*'/" "$PGDATA"/postgresql.conf
: ${POSTGRES_USER:="postgres"}
: ${POSTGRES_DB:=$POSTGRES_USER}
if [ "$POSTGRES_PASSWORD" ]; then
pass="PASSWORD '$POSTGRES_PASSWORD'"
authMethod=md5
else
echo "==============================="
echo "!!! NO PASSWORD SET !!! (Use \$POSTGRES_PASSWORD env var)"
echo "==============================="
pass=
authMethod=trust
fi
echo
if [ "$POSTGRES_DB" != 'postgres' ]; then
createSql="CREATE DATABASE $POSTGRES_DB;"
echo $createSql | gosu postgres postgres --single -jE
echo
fi
if [ "$POSTGRES_USER" != 'postgres' ]; then
op=CREATE
else
op=ALTER
fi
userSql="$op USER $POSTGRES_USER WITH SUPERUSER $pass;"
echo $userSql | gosu postgres postgres --single -jE
echo
gosu postgres pg_ctl -D "$PGDATA" \
-o "-c listen_addresses=''" \
-w start
echo
for f in /docker-entrypoint-initdb.d/*; do
case "$f" in
*.sh) echo "$0: running $f"; . "$f" ;;
*.sql) echo "$0: running $f"; psql --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" < "$f" && echo ;;
*) echo "$0: ignoring $f" ;;
esac
echo
done
gosu postgres pg_ctl -D "$PGDATA" -m fast -w stop
{ echo; echo "host all all 0.0.0.0/0 $authMethod"; } >> "$PGDATA"/pg_hba.conf
fi
exec gosu postgres postgres

View File

@@ -1,4 +0,0 @@
# Example of how to user the docker image
POSTGRES_PASSWORD=
docker build . -t headscale-docker
docker run -p 8000:8000 -v $(pwd)/pgdata:/var/lib/postgresql/data -v "$(pwd)/private.key:/build/headscale/private.key" -v "$(pwd)/public.key:/build/headscale/public.key" -e SERVER_URL=127.0.0.1:8000 -e POSTGRES_PASSWORD=$POSTGRES_PASSWORD -ti headscale-docker

View File

@@ -1,13 +0,0 @@
[supervisord]
nodaemon=true
user = root
[program:headscale]
command=/bin/bash -c "/headscale.sh"
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
[program:postgres]
command=/bin/bash -c "/postgres.sh"
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0

51
go.mod
View File

@@ -3,26 +3,37 @@ module github.com/juanfont/headscale
go 1.16
require (
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/gin-gonic/gin v1.7.1
github.com/hako/durafmt v0.0.0-20210316092057-3a2c319c1acd
github.com/jinzhu/gorm v1.9.16
github.com/json-iterator/go v1.1.11 // indirect
github.com/klauspost/compress v1.12.2
github.com/kr/text v0.2.0 // indirect
github.com/lib/pq v1.10.1 // indirect
github.com/mattn/go-sqlite3 v1.14.7 // indirect
github.com/spf13/cobra v1.1.3
github.com/spf13/viper v1.7.1
github.com/stretchr/testify v1.7.0 // indirect
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a
golang.org/x/text v0.3.6 // indirect
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
google.golang.org/appengine v1.6.7 // indirect
github.com/AlecAivazis/survey/v2 v2.3.2
github.com/Microsoft/go-winio v0.5.0 // indirect
github.com/cenkalti/backoff/v4 v4.1.1 // indirect
github.com/containerd/continuity v0.1.0 // indirect
github.com/docker/cli v20.10.8+incompatible // indirect
github.com/docker/docker v20.10.8+incompatible // indirect
github.com/efekarakus/termcolor v1.0.1
github.com/gofrs/uuid v4.0.0+incompatible // indirect
github.com/gin-gonic/gin v1.7.4
github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b
github.com/klauspost/compress v1.13.5
github.com/lib/pq v1.10.3 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/opencontainers/runc v1.0.2 // indirect
github.com/ory/dockertest/v3 v3.7.0
github.com/pterm/pterm v0.12.30
github.com/rs/zerolog v1.25.0
github.com/spf13/cobra v1.2.1
github.com/spf13/viper v1.8.1
github.com/stretchr/testify v1.7.0
github.com/tailscale/hujson v0.0.0-20210818175511-7360507a6e88
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
golang.org/x/net v0.0.0-20210913180222-943fd674d43e // indirect
golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
gopkg.in/yaml.v2 v2.4.0
gorm.io/datatypes v1.0.1
inet.af/netaddr v0.0.0-20210511181906-37180328850c
tailscale.com v1.6.0
gorm.io/datatypes v1.0.2
gorm.io/driver/postgres v1.1.1
gorm.io/driver/sqlite v1.1.5
gorm.io/gorm v1.21.15
inet.af/netaddr v0.0.0-20210903134321-85fa6c94624e
tailscale.com v1.14.2
)

1108
go.sum

File diff suppressed because it is too large Load Diff

701
integration_test.go Normal file
View File

@@ -0,0 +1,701 @@
//go:build integration
// +build integration
package headscale
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"strings"
"testing"
"time"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"tailscale.com/client/tailscale/apitype"
"tailscale.com/ipn/ipnstate"
"inet.af/netaddr"
)
var (
integrationTmpDir string
ih Headscale
)
var (
pool dockertest.Pool
network dockertest.Network
headscale dockertest.Resource
)
var tailscaleVersions = []string{"1.14.3", "1.12.3"}
type TestNamespace struct {
count int
tailscales map[string]dockertest.Resource
}
type IntegrationTestSuite struct {
suite.Suite
stats *suite.SuiteInformation
namespaces map[string]TestNamespace
}
func TestIntegrationTestSuite(t *testing.T) {
s := new(IntegrationTestSuite)
s.namespaces = map[string]TestNamespace{
"main": {
count: 20,
tailscales: make(map[string]dockertest.Resource),
},
"shared": {
count: 5,
tailscales: make(map[string]dockertest.Resource),
},
}
suite.Run(t, s)
// HandleStats, which allows us to check if we passed and save logs
// is called after TearDown, so we cannot tear down containers before
// we have potentially saved the logs.
for _, scales := range s.namespaces {
for _, tailscale := range scales.tailscales {
if err := pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
}
}
if !s.stats.Passed() {
err := saveLog(&headscale, "test_output")
if err != nil {
log.Printf("Could not save log: %s\n", err)
}
}
if err := pool.Purge(&headscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
if err := network.Close(); err != nil {
log.Printf("Could not close network: %s\n", err)
}
}
func executeCommand(resource *dockertest.Resource, cmd []string, env []string) (string, error) {
var stdout bytes.Buffer
var stderr bytes.Buffer
exitCode, err := resource.Exec(
cmd,
dockertest.ExecOptions{
Env: env,
StdOut: &stdout,
StdErr: &stderr,
},
)
if err != nil {
return "", err
}
if exitCode != 0 {
fmt.Println("Command: ", cmd)
fmt.Println("stdout: ", stdout.String())
fmt.Println("stderr: ", stderr.String())
return "", fmt.Errorf("command failed with: %s", stderr.String())
}
return stdout.String(), nil
}
func saveLog(resource *dockertest.Resource, basePath string) error {
err := os.MkdirAll(basePath, os.ModePerm)
if err != nil {
return err
}
var stdout bytes.Buffer
var stderr bytes.Buffer
err = pool.Client.Logs(
docker.LogsOptions{
Context: context.TODO(),
Container: resource.Container.ID,
OutputStream: &stdout,
ErrorStream: &stderr,
Tail: "all",
RawTerminal: false,
Stdout: true,
Stderr: true,
Follow: false,
Timestamps: false,
},
)
if err != nil {
return err
}
fmt.Printf("Saving logs for %s to %s\n", resource.Container.Name, basePath)
err = ioutil.WriteFile(path.Join(basePath, resource.Container.Name+".stdout.log"), []byte(stdout.String()), 0o644)
if err != nil {
return err
}
err = ioutil.WriteFile(path.Join(basePath, resource.Container.Name+".stderr.log"), []byte(stdout.String()), 0o644)
if err != nil {
return err
}
return nil
}
func dockerRestartPolicy(config *docker.HostConfig) {
// set AutoRemove to true so that stopped container goes away by itself
config.AutoRemove = true
config.RestartPolicy = docker.RestartPolicy{
Name: "no",
}
}
func tailscaleContainer(namespace, identifier, version string) (string, *dockertest.Resource) {
tailscaleBuildOptions := &dockertest.BuildOptions{
Dockerfile: "Dockerfile.tailscale",
ContextDir: ".",
BuildArgs: []docker.BuildArg{
{
Name: "TAILSCALE_VERSION",
Value: version,
},
},
}
hostname := fmt.Sprintf("%s-tailscale-%s-%s", namespace, strings.Replace(version, ".", "-", -1), identifier)
tailscaleOptions := &dockertest.RunOptions{
Name: hostname,
Networks: []*dockertest.Network{&network},
Cmd: []string{"tailscaled", "--tun=userspace-networking", "--socks5-server=localhost:1055"},
}
pts, err := pool.BuildAndRunWithBuildOptions(tailscaleBuildOptions, tailscaleOptions, dockerRestartPolicy)
if err != nil {
log.Fatalf("Could not start resource: %s", err)
}
fmt.Printf("Created %s container\n", hostname)
return hostname, pts
}
func (s *IntegrationTestSuite) SetupSuite() {
var err error
h = Headscale{
dbType: "sqlite3",
dbString: "integration_test_db.sqlite3",
}
if ppool, err := dockertest.NewPool(""); err == nil {
pool = *ppool
} else {
log.Fatalf("Could not connect to docker: %s", err)
}
if pnetwork, err := pool.CreateNetwork("headscale-test"); err == nil {
network = *pnetwork
} else {
log.Fatalf("Could not create network: %s", err)
}
headscaleBuildOptions := &dockertest.BuildOptions{
Dockerfile: "Dockerfile",
ContextDir: ".",
}
currentPath, err := os.Getwd()
if err != nil {
log.Fatalf("Could not determine current path: %s", err)
}
headscaleOptions := &dockertest.RunOptions{
Name: "headscale",
Mounts: []string{
fmt.Sprintf("%s/integration_test/etc:/etc/headscale", currentPath),
fmt.Sprintf("%s/derp.yaml:/etc/headscale/derp.yaml", currentPath),
},
Networks: []*dockertest.Network{&network},
Cmd: []string{"headscale", "serve"},
PortBindings: map[docker.Port][]docker.PortBinding{
"8080/tcp": {{HostPort: "8080"}},
},
}
fmt.Println("Creating headscale container")
if pheadscale, err := pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, dockerRestartPolicy); err == nil {
headscale = *pheadscale
} else {
log.Fatalf("Could not start resource: %s", err)
}
fmt.Println("Created headscale container")
fmt.Println("Creating tailscale containers")
for namespace, scales := range s.namespaces {
for i := 0; i < scales.count; i++ {
version := tailscaleVersions[i%len(tailscaleVersions)]
hostname, container := tailscaleContainer(namespace, fmt.Sprint(i), version)
scales.tailscales[hostname] = *container
}
}
fmt.Println("Waiting for headscale to be ready")
hostEndpoint := fmt.Sprintf("localhost:%s", headscale.GetPort("8080/tcp"))
if err := pool.Retry(func() error {
url := fmt.Sprintf("http://%s/health", hostEndpoint)
resp, err := http.Get(url)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("status code not OK")
}
return nil
}); err != nil {
log.Fatalf("Could not connect to docker: %s", err)
}
fmt.Println("headscale container is ready")
for namespace, scales := range s.namespaces {
fmt.Printf("Creating headscale namespace: %s\n", namespace)
result, err := executeCommand(
&headscale,
[]string{"headscale", "namespaces", "create", namespace},
[]string{},
)
assert.Nil(s.T(), err)
fmt.Println("headscale create namespace result: ", result)
fmt.Printf("Creating pre auth key for %s\n", namespace)
authKey, err := executeCommand(
&headscale,
[]string{"headscale", "--namespace", namespace, "preauthkeys", "create", "--reusable", "--expiration", "24h"},
[]string{},
)
assert.Nil(s.T(), err)
headscaleEndpoint := fmt.Sprintf("http://headscale:%s", headscale.GetPort("8080/tcp"))
fmt.Printf("Joining tailscale containers to headscale at %s\n", headscaleEndpoint)
for hostname, tailscale := range scales.tailscales {
command := []string{"tailscale", "up", "-login-server", headscaleEndpoint, "--authkey", strings.TrimSuffix(authKey, "\n"), "--hostname", hostname}
fmt.Println("Join command:", command)
fmt.Printf("Running join command for %s\n", hostname)
result, err := executeCommand(
&tailscale,
command,
[]string{},
)
fmt.Println("tailscale result: ", result)
assert.Nil(s.T(), err)
fmt.Printf("%s joined\n", hostname)
}
}
// The nodes need a bit of time to get their updated maps from headscale
// TODO: See if we can have a more deterministic wait here.
time.Sleep(60 * time.Second)
}
func (s *IntegrationTestSuite) TearDownSuite() {
}
func (s *IntegrationTestSuite) HandleStats(suiteName string, stats *suite.SuiteInformation) {
s.stats = stats
}
func (s *IntegrationTestSuite) TestListNodes() {
for namespace, scales := range s.namespaces {
fmt.Println("Listing nodes")
result, err := executeCommand(
&headscale,
[]string{"headscale", "--namespace", namespace, "nodes", "list"},
[]string{},
)
assert.Nil(s.T(), err)
fmt.Printf("List nodes: \n%s\n", result)
// Chck that the correct count of host is present in node list
lines := strings.Split(result, "\n")
assert.Equal(s.T(), len(scales.tailscales), len(lines)-2)
for hostname := range scales.tailscales {
assert.Contains(s.T(), result, hostname)
}
}
}
func (s *IntegrationTestSuite) TestGetIpAddresses() {
for _, scales := range s.namespaces {
ipPrefix := netaddr.MustParseIPPrefix("100.64.0.0/10")
ips, err := getIPs(scales.tailscales)
assert.Nil(s.T(), err)
for hostname := range scales.tailscales {
s.T().Run(hostname, func(t *testing.T) {
ip := ips[hostname]
fmt.Printf("IP for %s: %s\n", hostname, ip)
// c.Assert(ip.Valid(), check.IsTrue)
assert.True(t, ip.Is4())
assert.True(t, ipPrefix.Contains(ip))
ips[hostname] = ip
})
}
}
}
func (s *IntegrationTestSuite) TestStatus() {
for _, scales := range s.namespaces {
ips, err := getIPs(scales.tailscales)
assert.Nil(s.T(), err)
for hostname, tailscale := range scales.tailscales {
s.T().Run(hostname, func(t *testing.T) {
command := []string{"tailscale", "status", "--json"}
fmt.Printf("Getting status for %s\n", hostname)
result, err := executeCommand(
&tailscale,
command,
[]string{},
)
assert.Nil(t, err)
var status ipnstate.Status
err = json.Unmarshal([]byte(result), &status)
assert.Nil(s.T(), err)
// TODO(kradalby): Replace this check with peer length of SAME namespace
// Check if we have as many nodes in status
// as we have IPs/tailscales
// lines := strings.Split(result, "\n")
// assert.Equal(t, len(ips), len(lines)-1)
// assert.Equal(t, len(scales.tailscales), len(lines)-1)
peerIps := getIPsfromIPNstate(status)
// Check that all hosts is present in all hosts status
for ipHostname, ip := range ips {
if hostname != ipHostname {
assert.Contains(t, peerIps, ip)
}
}
})
}
}
}
func getIPsfromIPNstate(status ipnstate.Status) []netaddr.IP {
ips := make([]netaddr.IP, 0)
for _, peer := range status.Peer {
ips = append(ips, peer.TailscaleIPs...)
}
return ips
}
func (s *IntegrationTestSuite) TestPingAllPeers() {
for _, scales := range s.namespaces {
ips, err := getIPs(scales.tailscales)
assert.Nil(s.T(), err)
for hostname, tailscale := range scales.tailscales {
for peername, ip := range ips {
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
// We currently cant ping ourselves, so skip that.
if peername != hostname {
// We are only interested in "direct ping" which means what we
// might need a couple of more attempts before reaching the node.
command := []string{
"tailscale", "ping",
"--timeout=1s",
"--c=20",
"--until-direct=true",
ip.String(),
}
fmt.Printf("Pinging from %s (%s) to %s (%s)\n", hostname, ips[hostname], peername, ip)
result, err := executeCommand(
&tailscale,
command,
[]string{},
)
assert.Nil(t, err)
fmt.Printf("Result for %s: %s\n", hostname, result)
assert.Contains(t, result, "pong")
}
})
}
}
}
}
func (s *IntegrationTestSuite) TestSharedNodes() {
main := s.namespaces["main"]
shared := s.namespaces["shared"]
result, err := executeCommand(
&headscale,
[]string{"headscale", "nodes", "list", "-o", "json", "--namespace", "shared"},
[]string{},
)
assert.Nil(s.T(), err)
var machineList []Machine
err = json.Unmarshal([]byte(result), &machineList)
assert.Nil(s.T(), err)
for _, machine := range machineList {
result, err := executeCommand(
&headscale,
[]string{"headscale", "nodes", "share", "--namespace", "shared", fmt.Sprint(machine.ID), "main"},
[]string{},
)
assert.Nil(s.T(), err)
fmt.Println("Shared node with result: ", result)
}
result, err = executeCommand(
&headscale,
[]string{"headscale", "nodes", "list", "--namespace", "main"},
[]string{},
)
assert.Nil(s.T(), err)
fmt.Println("Nodelist after sharing", result)
// Chck that the correct count of host is present in node list
lines := strings.Split(result, "\n")
assert.Equal(s.T(), len(main.tailscales)+len(shared.tailscales), len(lines)-2)
for hostname := range main.tailscales {
assert.Contains(s.T(), result, hostname)
}
for hostname := range shared.tailscales {
assert.Contains(s.T(), result, hostname)
}
// TODO(kradalby): Figure out why these connections are not set up
// // TODO: See if we can have a more deterministic wait here.
// time.Sleep(100 * time.Second)
// mainIps, err := getIPs(main.tailscales)
// assert.Nil(s.T(), err)
// sharedIps, err := getIPs(shared.tailscales)
// assert.Nil(s.T(), err)
// for hostname, tailscale := range main.tailscales {
// for peername, ip := range sharedIps {
// s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
// // We currently cant ping ourselves, so skip that.
// if peername != hostname {
// // We are only interested in "direct ping" which means what we
// // might need a couple of more attempts before reaching the node.
// command := []string{
// "tailscale", "ping",
// "--timeout=1s",
// "--c=20",
// "--until-direct=true",
// ip.String(),
// }
// fmt.Printf("Pinging from %s (%s) to %s (%s)\n", hostname, mainIps[hostname], peername, ip)
// result, err := executeCommand(
// &tailscale,
// command,
// )
// assert.Nil(t, err)
// fmt.Printf("Result for %s: %s\n", hostname, result)
// assert.Contains(t, result, "pong")
// }
// })
// }
// }
}
func (s *IntegrationTestSuite) TestTailDrop() {
for _, scales := range s.namespaces {
ips, err := getIPs(scales.tailscales)
assert.Nil(s.T(), err)
apiURLs, err := getAPIURLs(scales.tailscales)
assert.Nil(s.T(), err)
for hostname, tailscale := range scales.tailscales {
command := []string{"touch", fmt.Sprintf("/tmp/file_from_%s", hostname)}
_, err := executeCommand(
&tailscale,
command,
[]string{},
)
assert.Nil(s.T(), err)
for peername, ip := range ips {
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
if peername != hostname {
// Under normal circumstances, we should be able to send a file
// using `tailscale file cp` - but not in userspace networking mode
// So curl!
peerAPI, ok := apiURLs[ip]
assert.True(t, ok)
// TODO(juanfont): We still have some issues with the test infrastructure, so
// lets run curl multiple times until it works.
attempts := 0
var err error
for {
command := []string{
"curl",
"--retry-connrefused",
"--retry-delay",
"30",
"--retry",
"10",
"--connect-timeout",
"60",
"-X",
"PUT",
"--upload-file",
fmt.Sprintf("/tmp/file_from_%s", hostname),
fmt.Sprintf("%s/v0/put/file_from_%s", peerAPI, hostname),
}
fmt.Printf("Sending file from %s (%s) to %s (%s)\n", hostname, ips[hostname], peername, ip)
_, err = executeCommand(
&tailscale,
command,
[]string{"ALL_PROXY=socks5://localhost:1055/"},
)
if err == nil {
break
} else {
time.Sleep(10 * time.Second)
attempts++
if attempts > 10 {
break
}
}
}
assert.Nil(t, err)
}
})
}
}
for hostname, tailscale := range scales.tailscales {
command := []string{
"tailscale", "file",
"get",
"/tmp/",
}
_, err := executeCommand(
&tailscale,
command,
[]string{},
)
assert.Nil(s.T(), err)
for peername, ip := range ips {
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
if peername != hostname {
command := []string{
"ls",
fmt.Sprintf("/tmp/file_from_%s", peername),
}
fmt.Printf("Checking file in %s (%s) from %s (%s)\n", hostname, ips[hostname], peername, ip)
result, err := executeCommand(
&tailscale,
command,
[]string{},
)
assert.Nil(t, err)
fmt.Printf("Result for %s: %s\n", peername, result)
assert.Equal(t, result, fmt.Sprintf("/tmp/file_from_%s\n", peername))
}
})
}
}
}
}
func getIPs(tailscales map[string]dockertest.Resource) (map[string]netaddr.IP, error) {
ips := make(map[string]netaddr.IP)
for hostname, tailscale := range tailscales {
command := []string{"tailscale", "ip"}
result, err := executeCommand(
&tailscale,
command,
[]string{},
)
if err != nil {
return nil, err
}
ip, err := netaddr.ParseIP(strings.TrimSuffix(result, "\n"))
if err != nil {
return nil, err
}
ips[hostname] = ip
}
return ips, nil
}
func getAPIURLs(tailscales map[string]dockertest.Resource) (map[netaddr.IP]string, error) {
fts := make(map[netaddr.IP]string)
for _, tailscale := range tailscales {
command := []string{
"curl",
"--unix-socket",
"/run/tailscale/tailscaled.sock",
"http://localhost/localapi/v0/file-targets",
}
result, err := executeCommand(
&tailscale,
command,
[]string{},
)
if err != nil {
return nil, err
}
var pft []apitype.FileTarget
if err := json.Unmarshal([]byte(result), &pft); err != nil {
return nil, fmt.Errorf("invalid JSON: %w", err)
}
for _, ft := range pft {
n := ft.Node
for _, a := range n.Addresses { // just add all the addresses
if _, ok := fts[a.IP()]; !ok {
fts[a.IP()] = ft.PeerAPIURL
}
}
}
}
return fts, nil
}

3
integration_test/.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
derp.yaml
*.sqlite
*.sqlite3

View File

@@ -0,0 +1,11 @@
{
"server_url": "http://headscale:8080",
"listen_addr": "0.0.0.0:8080",
"private_key_path": "private.key",
"derp_map_path": "derp.yaml",
"ephemeral_node_inactivity_timeout": "30m",
"db_type": "sqlite3",
"db_path": "/tmp/integration_test_db.sqlite3",
"acl_policy_path": "",
"log_level": "debug"
}

View File

@@ -0,0 +1 @@
SEmQwCu+tGywQWEUsf93TpTRUvlB7WhnCdHgWrSXjEA=

2
k8s/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
/**/site
/**/secrets

97
k8s/README.md Normal file
View File

@@ -0,0 +1,97 @@
# Deploying Headscale on Kubernetes
This directory contains [Kustomize](https://kustomize.io) templates that deploy
Headscale in various configurations.
These templates currently support Rancher k3s. Other clusters may require
adaptation, especially around volume claims and ingress.
Commands below assume this directory is your current working directory.
# Generate secrets and site configuration
Run `./init.bash` to generate keys, passwords, and site configuration files.
Edit `base/site/public.env`, changing `public-hostname` to the public DNS name
that will be used for your headscale deployment.
Set `public-proto` to "https" if you're planning to use TLS & Let's Encrypt.
Configure DERP servers by editing `base/site/derp.yaml` if needed.
# Add the image to the registry
You'll somehow need to get `headscale:latest` into your cluster image registry.
An easy way to do this with k3s:
- Reconfigure k3s to use docker instead of containerd (`k3s server --docker`)
- `docker build -t headscale:latest ..` from here
# Create the namespace
If it doesn't already exist, `kubectl create ns headscale`.
# Deploy headscale
## sqlite
`kubectl -n headscale apply -k ./sqlite`
## postgres
`kubectl -n headscale apply -k ./postgres`
# TLS & Let's Encrypt
Test a staging certificate with your configured DNS name and Let's Encrypt.
`kubectl -n headscale apply -k ./staging-tls`
Replace with a production certificate.
`kubectl -n headscale apply -k ./production-tls`
## Static / custom TLS certificates
Only Let's Encrypt is supported. If you need other TLS settings, modify or patch the ingress.
# Administration
Use the wrapper script to remotely operate headscale to perform administrative
tasks like creating namespaces, authkeys, etc.
```
[c@nix-slate:~/Projects/headscale/k8s]$ ./headscale.bash
headscale is an open source implementation of the Tailscale control server
https://gitlab.com/juanfont/headscale
Usage:
headscale [command]
Available Commands:
help Help about any command
namespace Manage the namespaces of Headscale
node Manage the nodes of Headscale
preauthkey Handle the preauthkeys in Headscale
routes Manage the routes of Headscale
serve Launches the headscale server
version Print the version.
Flags:
-h, --help help for headscale
-o, --output string Output format. Empty for human-readable, 'json' or 'json-line'
Use "headscale [command] --help" for more information about a command.
```
# TODO / Ideas
- Interpolate `email:` option to the ClusterIssuer from site configuration.
This probably needs to be done with a transformer, kustomize vars don't seem to work.
- Add kustomize examples for cloud-native ingress, load balancer
- CockroachDB for the backend
- DERP server deployment
- Tor hidden service

8
k8s/base/configmap.yaml Normal file
View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: headscale-config
data:
server_url: $(PUBLIC_PROTO)://$(PUBLIC_HOSTNAME)
listen_addr: "0.0.0.0:8080"
ephemeral_node_inactivity_timeout: "30m"

18
k8s/base/ingress.yaml Normal file
View File

@@ -0,0 +1,18 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: headscale
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: $(PUBLIC_HOSTNAME)
http:
paths:
- backend:
service:
name: headscale
port:
number: 8080
path: /
pathType: Prefix

View File

@@ -0,0 +1,42 @@
namespace: headscale
resources:
- configmap.yaml
- ingress.yaml
- service.yaml
generatorOptions:
disableNameSuffixHash: true
configMapGenerator:
- name: headscale-site
files:
- derp.yaml=site/derp.yaml
envs:
- site/public.env
- name: headscale-etc
literals:
- config.json={}
secretGenerator:
- name: headscale
files:
- secrets/private-key
vars:
- name: PUBLIC_PROTO
objRef:
kind: ConfigMap
name: headscale-site
apiVersion: v1
fieldRef:
fieldPath: data.public-proto
- name: PUBLIC_HOSTNAME
objRef:
kind: ConfigMap
name: headscale-site
apiVersion: v1
fieldRef:
fieldPath: data.public-hostname
- name: CONTACT_EMAIL
objRef:
kind: ConfigMap
name: headscale-site
apiVersion: v1
fieldRef:
fieldPath: data.contact-email

13
k8s/base/service.yaml Normal file
View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: headscale
labels:
app: headscale
spec:
selector:
app: headscale
ports:
- name: http
targetPort: http
port: 8080

3
k8s/headscale.bash Executable file
View File

@@ -0,0 +1,3 @@
#!/usr/bin/env bash
set -eu
exec kubectl -n headscale exec -ti pod/headscale-0 -- /go/bin/headscale "$@"

22
k8s/init.bash Executable file
View File

@@ -0,0 +1,22 @@
#!/usr/bin/env bash
set -eux
cd $(dirname $0)
umask 022
mkdir -p base/site/
[ ! -e base/site/public.env ] && (
cat >base/site/public.env <<EOF
public-hostname=localhost
public-proto=http
contact-email=headscale@example.com
EOF
)
[ ! -e base/site/derp.yaml ] && cp ../derp.yaml base/site/derp.yaml
umask 077
mkdir -p base/secrets/
[ ! -e base/secrets/private-key ] && (
wg genkey > base/secrets/private-key
)
mkdir -p postgres/secrets/
[ ! -e postgres/secrets/password ] && (head -c 32 /dev/urandom | base64 -w0 > postgres/secrets/password)

3
k8s/install-cert-manager.bash Executable file
View File

@@ -0,0 +1,3 @@
#!/usr/bin/env bash
set -eux
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.4.0/cert-manager.yaml

View File

@@ -0,0 +1,78 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: headscale
spec:
replicas: 2
selector:
matchLabels:
app: headscale
template:
metadata:
labels:
app: headscale
spec:
containers:
- name: headscale
image: "headscale:latest"
imagePullPolicy: IfNotPresent
command: ["/go/bin/headscale", "serve"]
env:
- name: SERVER_URL
value: $(PUBLIC_PROTO)://$(PUBLIC_HOSTNAME)
- name: LISTEN_ADDR
valueFrom:
configMapKeyRef:
name: headscale-config
key: listen_addr
- name: PRIVATE_KEY_PATH
value: /vol/secret/private-key
- name: DERP_MAP_PATH
value: /vol/config/derp.yaml
- name: EPHEMERAL_NODE_INACTIVITY_TIMEOUT
valueFrom:
configMapKeyRef:
name: headscale-config
key: ephemeral_node_inactivity_timeout
- name: DB_TYPE
value: postgres
- name: DB_HOST
value: postgres.headscale.svc.cluster.local
- name: DB_PORT
value: "5432"
- name: DB_USER
value: headscale
- name: DB_PASS
valueFrom:
secretKeyRef:
name: postgresql
key: password
- name: DB_NAME
value: headscale
ports:
- name: http
protocol: TCP
containerPort: 8080
livenessProbe:
tcpSocket:
port: http
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 15
volumeMounts:
- name: config
mountPath: /vol/config
- name: secret
mountPath: /vol/secret
- name: etc
mountPath: /etc/headscale
volumes:
- name: config
configMap:
name: headscale-site
- name: etc
configMap:
name: headscale-etc
- name: secret
secret:
secretName: headscale

View File

@@ -0,0 +1,13 @@
namespace: headscale
bases:
- ../base
resources:
- deployment.yaml
- postgres-service.yaml
- postgres-statefulset.yaml
generatorOptions:
disableNameSuffixHash: true
secretGenerator:
- name: postgresql
files:
- secrets/password

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: postgres
labels:
app: postgres
spec:
selector:
app: postgres
ports:
- name: postgres
targetPort: postgres
port: 5432

View File

@@ -0,0 +1,49 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: postgres
spec:
serviceName: postgres
replicas: 1
selector:
matchLabels:
app: postgres
template:
metadata:
labels:
app: postgres
spec:
containers:
- name: postgres
image: "postgres:13"
imagePullPolicy: IfNotPresent
env:
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: postgresql
key: password
- name: POSTGRES_USER
value: headscale
ports:
- name: postgres
protocol: TCP
containerPort: 5432
livenessProbe:
tcpSocket:
port: 5432
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 15
volumeMounts:
- name: pgdata
mountPath: /var/lib/postgresql/data
volumeClaimTemplates:
- metadata:
name: pgdata
spec:
storageClassName: local-path
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,11 @@
kind: Ingress
metadata:
name: headscale
annotations:
cert-manager.io/cluster-issuer: letsencrypt-production
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
tls:
- hosts:
- $(PUBLIC_HOSTNAME)
secretName: production-cert

View File

@@ -0,0 +1,9 @@
namespace: headscale
bases:
- ../base
resources:
- production-issuer.yaml
patches:
- path: ingress-patch.yaml
target:
kind: Ingress

View File

@@ -0,0 +1,16 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-production
spec:
acme:
# TODO: figure out how to get kustomize to interpolate this, or use a transformer
#email: $(CONTACT_EMAIL)
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
# Secret resource used to store the account's private key.
name: letsencrypt-production-acc-key
solvers:
- http01:
ingress:
class: traefik

View File

@@ -0,0 +1,5 @@
namespace: headscale
bases:
- ../base
resources:
- statefulset.yaml

View File

@@ -0,0 +1,79 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: headscale
spec:
serviceName: headscale
replicas: 1
selector:
matchLabels:
app: headscale
template:
metadata:
labels:
app: headscale
spec:
containers:
- name: headscale
image: "headscale:latest"
imagePullPolicy: IfNotPresent
command: ["/go/bin/headscale", "serve"]
env:
- name: SERVER_URL
value: $(PUBLIC_PROTO)://$(PUBLIC_HOSTNAME)
- name: LISTEN_ADDR
valueFrom:
configMapKeyRef:
name: headscale-config
key: listen_addr
- name: PRIVATE_KEY_PATH
value: /vol/secret/private-key
- name: DERP_MAP_PATH
value: /vol/config/derp.yaml
- name: EPHEMERAL_NODE_INACTIVITY_TIMEOUT
valueFrom:
configMapKeyRef:
name: headscale-config
key: ephemeral_node_inactivity_timeout
- name: DB_TYPE
value: sqlite3
- name: DB_PATH
value: /vol/data/db.sqlite
ports:
- name: http
protocol: TCP
containerPort: 8080
livenessProbe:
tcpSocket:
port: http
initialDelaySeconds: 30
timeoutSeconds: 5
periodSeconds: 15
volumeMounts:
- name: config
mountPath: /vol/config
- name: data
mountPath: /vol/data
- name: secret
mountPath: /vol/secret
- name: etc
mountPath: /etc/headscale
volumes:
- name: config
configMap:
name: headscale-site
- name: etc
configMap:
name: headscale-etc
- name: secret
secret:
secretName: headscale
volumeClaimTemplates:
- metadata:
name: data
spec:
storageClassName: local-path
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi

View File

@@ -0,0 +1,11 @@
kind: Ingress
metadata:
name: headscale
annotations:
cert-manager.io/cluster-issuer: letsencrypt-staging
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
tls:
- hosts:
- $(PUBLIC_HOSTNAME)
secretName: staging-cert

View File

@@ -0,0 +1,9 @@
namespace: headscale
bases:
- ../base
resources:
- staging-issuer.yaml
patches:
- path: ingress-patch.yaml
target:
kind: Ingress

View File

@@ -0,0 +1,16 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
# TODO: figure out how to get kustomize to interpolate this, or use a transformer
#email: $(CONTACT_EMAIL)
server: https://acme-staging-v02.api.letsencrypt.org/directory
privateKeySecretRef:
# Secret resource used to store the account's private key.
name: letsencrypt-staging-acc-key
solvers:
- http01:
ingress:
class: traefik

View File

@@ -2,16 +2,18 @@ package headscale
import (
"encoding/json"
"errors"
"fmt"
"log"
"sort"
"strconv"
"time"
"github.com/rs/zerolog/log"
"gorm.io/datatypes"
"inet.af/netaddr"
"tailscale.com/tailcfg"
"tailscale.com/wgengine/wgcfg"
"tailscale.com/types/wgkey"
)
// Machine is a Headscale client
@@ -23,15 +25,16 @@ type Machine struct {
IPAddress string
Name string
NamespaceID uint
Namespace Namespace
Namespace Namespace `gorm:"foreignKey:NamespaceID"`
Registered bool // temp
RegisterMethod string
AuthKeyID uint
AuthKey *PreAuthKey
LastSeen *time.Time
Expiry *time.Time
LastSeen *time.Time
LastSuccessfulUpdate *time.Time
Expiry *time.Time
HostInfo datatypes.JSON
Endpoints datatypes.JSON
@@ -47,19 +50,21 @@ func (m Machine) isAlreadyRegistered() bool {
return m.Registered
}
func (m Machine) toNode() (*tailcfg.Node, error) {
nKey, err := wgcfg.ParseHexKey(m.NodeKey)
// toNode converts a Machine into a Tailscale Node. includeRoutes is false for shared nodes
// as per the expected behaviour in the official SaaS
func (m Machine) toNode(includeRoutes bool) (*tailcfg.Node, error) {
nKey, err := wgkey.ParseHex(m.NodeKey)
if err != nil {
return nil, err
}
mKey, err := wgcfg.ParseHexKey(m.MachineKey)
mKey, err := wgkey.ParseHex(m.MachineKey)
if err != nil {
return nil, err
}
var discoKey tailcfg.DiscoKey
if m.DiscoKey != "" {
dKey, err := wgcfg.ParseHexKey(m.DiscoKey)
dKey, err := wgkey.ParseHex(m.DiscoKey)
if err != nil {
return nil, err
}
@@ -71,6 +76,10 @@ func (m Machine) toNode() (*tailcfg.Node, error) {
addrs := []netaddr.IPPrefix{}
ip, err := netaddr.ParseIPPrefix(fmt.Sprintf("%s/32", m.IPAddress))
if err != nil {
log.Trace().
Str("func", "toNode").
Str("ip", m.IPAddress).
Msgf("Failed to parse IP Prefix from IP: %s", m.IPAddress)
return nil, err
}
addrs = append(addrs, ip) // missing the ipv6 ?
@@ -78,24 +87,26 @@ func (m Machine) toNode() (*tailcfg.Node, error) {
allowedIPs := []netaddr.IPPrefix{}
allowedIPs = append(allowedIPs, ip) // we append the node own IP, as it is required by the clients
routesStr := []string{}
if len(m.EnabledRoutes) != 0 {
allwIps, err := m.EnabledRoutes.MarshalJSON()
if err != nil {
return nil, err
if includeRoutes {
routesStr := []string{}
if len(m.EnabledRoutes) != 0 {
allwIps, err := m.EnabledRoutes.MarshalJSON()
if err != nil {
return nil, err
}
err = json.Unmarshal(allwIps, &routesStr)
if err != nil {
return nil, err
}
}
err = json.Unmarshal(allwIps, &routesStr)
if err != nil {
return nil, err
}
}
for _, aip := range routesStr {
ip, err := netaddr.ParseIPPrefix(aip)
if err != nil {
return nil, err
for _, routeStr := range routesStr {
ip, err := netaddr.ParseIPPrefix(routeStr)
if err != nil {
return nil, err
}
allowedIPs = append(allowedIPs, ip)
}
allowedIPs = append(allowedIPs, ip)
}
endpoints := []string{}
@@ -129,13 +140,20 @@ func (m Machine) toNode() (*tailcfg.Node, error) {
derp = "127.3.3.40:0" // Zero means disconnected or unknown.
}
var keyExpiry time.Time
if m.Expiry != nil {
keyExpiry = *m.Expiry
} else {
keyExpiry = time.Time{}
}
n := tailcfg.Node{
ID: tailcfg.NodeID(m.ID), // this is the actual ID
StableID: tailcfg.StableNodeID(strconv.FormatUint(m.ID, 10)), // in headscale, unlike tailcontrol server, IDs are permanent
Name: hostinfo.Hostname,
User: tailcfg.UserID(m.NamespaceID),
Key: tailcfg.NodeKey(nKey),
KeyExpiry: *m.Expiry,
KeyExpiry: keyExpiry,
Machine: tailcfg.MachineKey(mKey),
DiscoKey: discoKey,
Addresses: addrs,
@@ -149,34 +167,52 @@ func (m Machine) toNode() (*tailcfg.Node, error) {
KeepAlive: true,
MachineAuthorized: m.Registered,
Capabilities: []string{tailcfg.CapabilityFileSharing},
}
return &n, nil
}
func (h *Headscale) getPeers(m Machine) (*[]*tailcfg.Node, error) {
db, err := h.db()
if err != nil {
log.Printf("Cannot open DB: %s", err)
return nil, err
}
defer db.Close()
log.Trace().
Str("func", "getPeers").
Str("machine", m.Name).
Msg("Finding peers")
machines := []Machine{}
if err = db.Where("namespace_id = ? AND machine_key <> ? AND registered",
if err := h.db.Where("namespace_id = ? AND machine_key <> ? AND registered",
m.NamespaceID, m.MachineKey).Find(&machines).Error; err != nil {
log.Printf("Error accessing db: %s", err)
log.Error().Err(err).Msg("Error accessing db")
return nil, err
}
// We fetch here machines that are shared to the `Namespace` of the machine we are getting peers for
sharedMachines := []SharedMachine{}
if err := h.db.Preload("Namespace").Preload("Machine").Where("namespace_id = ?",
m.NamespaceID).Find(&sharedMachines).Error; err != nil {
return nil, err
}
peers := []*tailcfg.Node{}
for _, mn := range machines {
peer, err := mn.toNode()
peer, err := mn.toNode(true)
if err != nil {
return nil, err
}
peers = append(peers, peer)
}
for _, sharedMachine := range sharedMachines {
peer, err := sharedMachine.Machine.toNode(false) // shared nodes do not expose their routes
if err != nil {
return nil, err
}
peers = append(peers, peer)
}
sort.Slice(peers, func(i, j int) bool { return peers[i].ID < peers[j].ID })
log.Trace().
Str("func", "getPeers").
Str("machine", m.Name).
Msgf("Found peers: %s", tailNodesToString(peers))
return &peers, nil
}
@@ -192,7 +228,46 @@ func (h *Headscale) GetMachine(namespace string, name string) (*Machine, error)
return &m, nil
}
}
return nil, fmt.Errorf("not found")
return nil, fmt.Errorf("machine not found")
}
// GetMachineByID finds a Machine by ID and returns the Machine struct
func (h *Headscale) GetMachineByID(id uint64) (*Machine, error) {
m := Machine{}
if result := h.db.Preload("Namespace").Find(&Machine{ID: id}).First(&m); result.Error != nil {
return nil, result.Error
}
return &m, nil
}
// UpdateMachine takes a Machine struct pointer (typically already loaded from database
// and updates it with the latest data from the database.
func (h *Headscale) UpdateMachine(m *Machine) error {
if result := h.db.Find(m).First(&m); result.Error != nil {
return result.Error
}
return nil
}
// DeleteMachine softs deletes a Machine from the database
func (h *Headscale) DeleteMachine(m *Machine) error {
m.Registered = false
namespaceID := m.NamespaceID
h.db.Save(&m) // we mark it as unregistered, just in case
if err := h.db.Delete(&m).Error; err != nil {
return err
}
return h.RequestMapUpdates(namespaceID)
}
// HardDeleteMachine hard deletes a Machine from the database
func (h *Headscale) HardDeleteMachine(m *Machine) error {
namespaceID := m.NamespaceID
if err := h.db.Unscoped().Delete(&m).Error; err != nil {
return err
}
return h.RequestMapUpdates(namespaceID)
}
// GetHostInfo returns a Hostinfo struct for the machine
@@ -210,3 +285,121 @@ func (m *Machine) GetHostInfo() (*tailcfg.Hostinfo, error) {
}
return &hostinfo, nil
}
func (h *Headscale) notifyChangesToPeers(m *Machine) {
peers, err := h.getPeers(*m)
if err != nil {
log.Error().
Str("func", "notifyChangesToPeers").
Str("machine", m.Name).
Msgf("Error getting peers: %s", err)
return
}
for _, p := range *peers {
log.Info().
Str("func", "notifyChangesToPeers").
Str("machine", m.Name).
Str("peer", p.Name).
Str("address", p.Addresses[0].String()).
Msgf("Notifying peer %s (%s)", p.Name, p.Addresses[0])
err := h.sendRequestOnUpdateChannel(p)
if err != nil {
log.Info().
Str("func", "notifyChangesToPeers").
Str("machine", m.Name).
Str("peer", p.Name).
Msgf("Peer %s does not appear to be polling", p.Name)
}
log.Trace().
Str("func", "notifyChangesToPeers").
Str("machine", m.Name).
Str("peer", p.Name).
Str("address", p.Addresses[0].String()).
Msgf("Notified peer %s (%s)", p.Name, p.Addresses[0])
}
}
func (h *Headscale) getOrOpenUpdateChannel(m *Machine) <-chan struct{} {
var updateChan chan struct{}
if storedChan, ok := h.clientsUpdateChannels.Load(m.ID); ok {
if unwrapped, ok := storedChan.(chan struct{}); ok {
updateChan = unwrapped
} else {
log.Error().
Str("handler", "openUpdateChannel").
Str("machine", m.Name).
Msg("Failed to convert update channel to struct{}")
}
} else {
log.Debug().
Str("handler", "openUpdateChannel").
Str("machine", m.Name).
Msg("Update channel not found, creating")
updateChan = make(chan struct{})
h.clientsUpdateChannels.Store(m.ID, updateChan)
}
return updateChan
}
func (h *Headscale) closeUpdateChannel(m *Machine) {
h.clientsUpdateChannelMutex.Lock()
defer h.clientsUpdateChannelMutex.Unlock()
if storedChan, ok := h.clientsUpdateChannels.Load(m.ID); ok {
if unwrapped, ok := storedChan.(chan struct{}); ok {
close(unwrapped)
}
}
h.clientsUpdateChannels.Delete(m.ID)
}
func (h *Headscale) sendRequestOnUpdateChannel(m *tailcfg.Node) error {
h.clientsUpdateChannelMutex.Lock()
defer h.clientsUpdateChannelMutex.Unlock()
pUp, ok := h.clientsUpdateChannels.Load(uint64(m.ID))
if ok {
log.Info().
Str("func", "requestUpdate").
Str("machine", m.Name).
Msgf("Notifying peer %s", m.Name)
if update, ok := pUp.(chan struct{}); ok {
log.Trace().
Str("func", "requestUpdate").
Str("machine", m.Name).
Msgf("Update channel is %#v", update)
update <- struct{}{}
log.Trace().
Str("func", "requestUpdate").
Str("machine", m.Name).
Msgf("Notified machine %s", m.Name)
}
} else {
log.Info().
Str("func", "requestUpdate").
Str("machine", m.Name).
Msgf("Machine %s does not appear to be polling", m.Name)
return errors.New("machine does not seem to be polling")
}
return nil
}
func (h *Headscale) isOutdated(m *Machine) bool {
err := h.UpdateMachine(m)
if err != nil {
return true
}
lastChange := h.getLastStateChange(m.Namespace.Name)
log.Trace().
Str("func", "keepAlive").
Str("machine", m.Name).
Time("last_successful_update", *m.LastSuccessfulUpdate).
Time("last_state_change", lastChange).
Msgf("Checking if %s is missing updates", m.Name)
return m.LastSuccessfulUpdate.Before(lastChange)
}

View File

@@ -1,6 +1,8 @@
package headscale
import (
"encoding/json"
"gopkg.in/check.v1"
)
@@ -11,12 +13,6 @@ func (s *Suite) TestGetMachine(c *check.C) {
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
c.Assert(err, check.IsNil)
db, err := h.db()
if err != nil {
c.Fatal(err)
}
defer db.Close()
_, err = h.GetMachine("test", "testmachine")
c.Assert(err, check.NotNil)
@@ -31,12 +27,92 @@ func (s *Suite) TestGetMachine(c *check.C) {
RegisterMethod: "authKey",
AuthKeyID: uint(pak.ID),
}
db.Save(&m)
h.db.Save(&m)
m1, err := h.GetMachine("test", "testmachine")
c.Assert(err, check.IsNil)
_, err = m1.GetHostInfo()
c.Assert(err, check.IsNil)
}
func (s *Suite) TestGetMachineByID(c *check.C) {
n, err := h.CreateNamespace("test")
c.Assert(err, check.IsNil)
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = h.GetMachineByID(0)
c.Assert(err, check.NotNil)
m := Machine{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Name: "testmachine",
NamespaceID: n.ID,
Registered: true,
RegisterMethod: "authKey",
AuthKeyID: uint(pak.ID),
}
h.db.Save(&m)
m1, err := h.GetMachineByID(0)
c.Assert(err, check.IsNil)
_, err = m1.GetHostInfo()
c.Assert(err, check.IsNil)
}
func (s *Suite) TestDeleteMachine(c *check.C) {
n, err := h.CreateNamespace("test")
c.Assert(err, check.IsNil)
m := Machine{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Name: "testmachine",
NamespaceID: n.ID,
Registered: true,
RegisterMethod: "authKey",
AuthKeyID: uint(1),
}
h.db.Save(&m)
err = h.DeleteMachine(&m)
c.Assert(err, check.IsNil)
v, err := h.getValue("namespaces_pending_updates")
c.Assert(err, check.IsNil)
names := []string{}
err = json.Unmarshal([]byte(v), &names)
c.Assert(err, check.IsNil)
c.Assert(names, check.DeepEquals, []string{n.Name})
h.checkForNamespacesPendingUpdates()
v, _ = h.getValue("namespaces_pending_updates")
c.Assert(v, check.Equals, "")
_, err = h.GetMachine(n.Name, "testmachine")
c.Assert(err, check.NotNil)
}
func (s *Suite) TestHardDeleteMachine(c *check.C) {
n, err := h.CreateNamespace("test")
c.Assert(err, check.IsNil)
m := Machine{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Name: "testmachine3",
NamespaceID: n.ID,
Registered: true,
RegisterMethod: "authKey",
AuthKeyID: uint(1),
}
h.db.Save(&m)
err = h.HardDeleteMachine(&m)
c.Assert(err, check.IsNil)
_, err = h.GetMachine(n.Name, "testmachine3")
c.Assert(err, check.NotNil)
}

View File

@@ -1,10 +1,13 @@
package headscale
import (
"log"
"encoding/json"
"errors"
"fmt"
"time"
"github.com/jinzhu/gorm"
"github.com/rs/zerolog/log"
"gorm.io/gorm"
"tailscale.com/tailcfg"
)
@@ -24,20 +27,16 @@ type Namespace struct {
// CreateNamespace creates a new Namespace. Returns error if could not be created
// or another namespace already exists
func (h *Headscale) CreateNamespace(name string) (*Namespace, error) {
db, err := h.db()
if err != nil {
log.Printf("Cannot open DB: %s", err)
return nil, err
}
defer db.Close()
n := Namespace{}
if err := db.Where("name = ?", name).First(&n).Error; err == nil {
if err := h.db.Where("name = ?", name).First(&n).Error; err == nil {
return nil, errorNamespaceExists
}
n.Name = name
if err := db.Create(&n).Error; err != nil {
log.Printf("Could not create row: %s", err)
if err := h.db.Create(&n).Error; err != nil {
log.Error().
Str("func", "CreateNamespace").
Err(err).
Msg("Could not create row")
return nil, err
}
return &n, nil
@@ -46,13 +45,6 @@ func (h *Headscale) CreateNamespace(name string) (*Namespace, error) {
// DestroyNamespace destroys a Namespace. Returns error if the Namespace does
// not exist or if there are machines associated with it.
func (h *Headscale) DestroyNamespace(name string) error {
db, err := h.db()
if err != nil {
log.Printf("Cannot open DB: %s", err)
return err
}
defer db.Close()
n, err := h.GetNamespace(name)
if err != nil {
return errorNamespaceNotFound
@@ -66,8 +58,7 @@ func (h *Headscale) DestroyNamespace(name string) error {
return errorNamespaceNotEmpty
}
err = db.Unscoped().Delete(&n).Error
if err != nil {
if result := h.db.Unscoped().Delete(&n); result.Error != nil {
return err
}
@@ -76,15 +67,8 @@ func (h *Headscale) DestroyNamespace(name string) error {
// GetNamespace fetches a namespace by name
func (h *Headscale) GetNamespace(name string) (*Namespace, error) {
db, err := h.db()
if err != nil {
log.Printf("Cannot open DB: %s", err)
return nil, err
}
defer db.Close()
n := Namespace{}
if db.First(&n, "name = ?", name).RecordNotFound() {
if result := h.db.First(&n, "name = ?", name); errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, errorNamespaceNotFound
}
return &n, nil
@@ -92,14 +76,8 @@ func (h *Headscale) GetNamespace(name string) (*Namespace, error) {
// ListNamespaces gets all the existing namespaces
func (h *Headscale) ListNamespaces() (*[]Namespace, error) {
db, err := h.db()
if err != nil {
log.Printf("Cannot open DB: %s", err)
return nil, err
}
defer db.Close()
namespaces := []Namespace{}
if err := db.Find(&namespaces).Error; err != nil {
if err := h.db.Find(&namespaces).Error; err != nil {
return nil, err
}
return &namespaces, nil
@@ -111,46 +89,135 @@ func (h *Headscale) ListMachinesInNamespace(name string) (*[]Machine, error) {
if err != nil {
return nil, err
}
db, err := h.db()
if err != nil {
log.Printf("Cannot open DB: %s", err)
return nil, err
}
defer db.Close()
machines := []Machine{}
if err := db.Preload("AuthKey").Where(&Machine{NamespaceID: n.ID}).Find(&machines).Error; err != nil {
if err := h.db.Preload("AuthKey").Preload("Namespace").Where(&Machine{NamespaceID: n.ID}).Find(&machines).Error; err != nil {
return nil, err
}
return &machines, nil
}
// ListSharedMachinesInNamespace returns all the machines that are shared to the specified namespace
func (h *Headscale) ListSharedMachinesInNamespace(name string) (*[]Machine, error) {
namespace, err := h.GetNamespace(name)
if err != nil {
return nil, err
}
sharedMachines := []SharedMachine{}
if err := h.db.Preload("Namespace").Where(&SharedMachine{NamespaceID: namespace.ID}).Find(&sharedMachines).Error; err != nil {
return nil, err
}
machines := []Machine{}
for _, sharedMachine := range sharedMachines {
machine, err := h.GetMachineByID(sharedMachine.MachineID) // otherwise not everything comes filled
if err != nil {
return nil, err
}
machines = append(machines, *machine)
}
return &machines, nil
}
// SetMachineNamespace assigns a Machine to a namespace
func (h *Headscale) SetMachineNamespace(m *Machine, namespaceName string) error {
n, err := h.GetNamespace(namespaceName)
if err != nil {
return err
}
db, err := h.db()
if err != nil {
log.Printf("Cannot open DB: %s", err)
m.NamespaceID = n.ID
h.db.Save(&m)
return nil
}
// RequestMapUpdates signals the KV worker to update the maps for this namespace
func (h *Headscale) RequestMapUpdates(namespaceID uint) error {
namespace := Namespace{}
if err := h.db.First(&namespace, namespaceID).Error; err != nil {
return err
}
defer db.Close()
m.NamespaceID = n.ID
db.Save(&m)
return nil
v, err := h.getValue("namespaces_pending_updates")
if err != nil || v == "" {
err = h.setValue("namespaces_pending_updates", fmt.Sprintf(`["%s"]`, namespace.Name))
if err != nil {
return err
}
return nil
}
names := []string{}
err = json.Unmarshal([]byte(v), &names)
if err != nil {
err = h.setValue("namespaces_pending_updates", fmt.Sprintf(`["%s"]`, namespace.Name))
if err != nil {
return err
}
return nil
}
names = append(names, namespace.Name)
data, err := json.Marshal(names)
if err != nil {
log.Error().
Str("func", "RequestMapUpdates").
Err(err).
Msg("Could not marshal namespaces_pending_updates")
return err
}
return h.setValue("namespaces_pending_updates", string(data))
}
func (h *Headscale) checkForNamespacesPendingUpdates() {
v, err := h.getValue("namespaces_pending_updates")
if err != nil {
return
}
if v == "" {
return
}
names := []string{}
err = json.Unmarshal([]byte(v), &names)
if err != nil {
return
}
for _, name := range names {
log.Trace().
Str("func", "RequestMapUpdates").
Str("machine", name).
Msg("Sending updates to nodes in namespace")
machines, err := h.ListMachinesInNamespace(name)
if err != nil {
continue
}
for _, m := range *machines {
h.notifyChangesToPeers(&m)
}
}
newV, err := h.getValue("namespaces_pending_updates")
if err != nil {
return
}
if v == newV { // only clear when no changes, so we notified everybody
err = h.setValue("namespaces_pending_updates", "")
if err != nil {
log.Error().
Str("func", "checkForNamespacesPendingUpdates").
Err(err).
Msg("Could not save to KV")
return
}
}
}
func (n *Namespace) toUser() *tailcfg.User {
u := tailcfg.User{
ID: tailcfg.UserID(n.ID),
LoginName: "",
LoginName: n.Name,
DisplayName: n.Name,
ProfilePicURL: "",
Domain: "",
Domain: "headscale.net",
Logins: []tailcfg.LoginID{},
Roles: []tailcfg.RoleID{},
Created: time.Time{},
}
return &u

View File

@@ -30,11 +30,6 @@ func (s *Suite) TestDestroyNamespaceErrors(c *check.C) {
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
c.Assert(err, check.IsNil)
db, err := h.db()
if err != nil {
c.Fatal(err)
}
defer db.Close()
m := Machine{
ID: 0,
MachineKey: "foo",
@@ -46,7 +41,7 @@ func (s *Suite) TestDestroyNamespaceErrors(c *check.C) {
RegisterMethod: "authKey",
AuthKeyID: uint(pak.ID),
}
db.Save(&m)
h.db.Save(&m)
err = h.DestroyNamespace("test")
c.Assert(err, check.Equals, errorNamespaceNotEmpty)

454
poll.go Normal file
View File

@@ -0,0 +1,454 @@
package headscale
import (
"encoding/json"
"errors"
"io"
"net/http"
"time"
"github.com/gin-gonic/gin"
"github.com/rs/zerolog/log"
"gorm.io/datatypes"
"gorm.io/gorm"
"tailscale.com/tailcfg"
"tailscale.com/types/wgkey"
)
// PollNetMapHandler takes care of /machine/:id/map
//
// This is the busiest endpoint, as it keeps the HTTP long poll that updates
// the clients when something in the network changes.
//
// The clients POST stuff like HostInfo and their Endpoints here, but
// only after their first request (marked with the ReadOnly field).
//
// At this moment the updates are sent in a quite horrendous way, but they kinda work.
func (h *Headscale) PollNetMapHandler(c *gin.Context) {
log.Trace().
Str("handler", "PollNetMap").
Str("id", c.Param("id")).
Msg("PollNetMapHandler called")
body, _ := io.ReadAll(c.Request.Body)
mKeyStr := c.Param("id")
mKey, err := wgkey.ParseHex(mKeyStr)
if err != nil {
log.Error().
Str("handler", "PollNetMap").
Err(err).
Msg("Cannot parse client key")
c.String(http.StatusBadRequest, "")
return
}
req := tailcfg.MapRequest{}
err = decode(body, &req, &mKey, h.privateKey)
if err != nil {
log.Error().
Str("handler", "PollNetMap").
Err(err).
Msg("Cannot decode message")
c.String(http.StatusBadRequest, "")
return
}
var m Machine
if result := h.db.Preload("Namespace").First(&m, "machine_key = ?", mKey.HexString()); errors.Is(result.Error, gorm.ErrRecordNotFound) {
log.Warn().
Str("handler", "PollNetMap").
Msgf("Ignoring request, cannot find machine with key %s", mKey.HexString())
c.String(http.StatusUnauthorized, "")
return
}
log.Trace().
Str("handler", "PollNetMap").
Str("id", c.Param("id")).
Str("machine", m.Name).
Msg("Found machine in database")
hostinfo, _ := json.Marshal(req.Hostinfo)
m.Name = req.Hostinfo.Hostname
m.HostInfo = datatypes.JSON(hostinfo)
m.DiscoKey = wgkey.Key(req.DiscoKey).HexString()
now := time.Now().UTC()
// From Tailscale client:
//
// ReadOnly is whether the client just wants to fetch the MapResponse,
// without updating their Endpoints. The Endpoints field will be ignored and
// LastSeen will not be updated and peers will not be notified of changes.
//
// The intended use is for clients to discover the DERP map at start-up
// before their first real endpoint update.
if !req.ReadOnly {
endpoints, _ := json.Marshal(req.Endpoints)
m.Endpoints = datatypes.JSON(endpoints)
m.LastSeen = &now
}
h.db.Save(&m)
data, err := h.getMapResponse(mKey, req, m)
if err != nil {
log.Error().
Str("handler", "PollNetMap").
Str("id", c.Param("id")).
Str("machine", m.Name).
Err(err).
Msg("Failed to get Map response")
c.String(http.StatusInternalServerError, ":(")
return
}
// We update our peers if the client is not sending ReadOnly in the MapRequest
// so we don't distribute its initial request (it comes with
// empty endpoints to peers)
// Details on the protocol can be found in https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go#L696
log.Debug().
Str("handler", "PollNetMap").
Str("id", c.Param("id")).
Str("machine", m.Name).
Bool("readOnly", req.ReadOnly).
Bool("omitPeers", req.OmitPeers).
Bool("stream", req.Stream).
Msg("Client map request processed")
if req.ReadOnly {
log.Info().
Str("handler", "PollNetMap").
Str("machine", m.Name).
Msg("Client is starting up. Probably interested in a DERP map")
c.Data(200, "application/json; charset=utf-8", *data)
return
}
// There has been an update to _any_ of the nodes that the other nodes would
// need to know about
h.setLastStateChangeToNow(m.Namespace.Name)
// The request is not ReadOnly, so we need to set up channels for updating
// peers via longpoll
// Only create update channel if it has not been created
log.Trace().
Str("handler", "PollNetMap").
Str("id", c.Param("id")).
Str("machine", m.Name).
Msg("Loading or creating update channel")
updateChan := h.getOrOpenUpdateChannel(&m)
pollDataChan := make(chan []byte)
// defer close(pollData)
keepAliveChan := make(chan []byte)
cancelKeepAlive := make(chan struct{})
defer close(cancelKeepAlive)
if req.OmitPeers && !req.Stream {
log.Info().
Str("handler", "PollNetMap").
Str("machine", m.Name).
Msg("Client sent endpoint update and is ok with a response without peer list")
c.Data(200, "application/json; charset=utf-8", *data)
// It sounds like we should update the nodes when we have received a endpoint update
// even tho the comments in the tailscale code dont explicitly say so.
go h.notifyChangesToPeers(&m)
return
} else if req.OmitPeers && req.Stream {
log.Warn().
Str("handler", "PollNetMap").
Str("machine", m.Name).
Msg("Ignoring request, don't know how to handle it")
c.String(http.StatusBadRequest, "")
return
}
log.Info().
Str("handler", "PollNetMap").
Str("machine", m.Name).
Msg("Client is ready to access the tailnet")
log.Info().
Str("handler", "PollNetMap").
Str("machine", m.Name).
Msg("Sending initial map")
go func() { pollDataChan <- *data }()
log.Info().
Str("handler", "PollNetMap").
Str("machine", m.Name).
Msg("Notifying peers")
go h.notifyChangesToPeers(&m)
h.PollNetMapStream(c, m, req, mKey, pollDataChan, keepAliveChan, updateChan, cancelKeepAlive)
log.Trace().
Str("handler", "PollNetMap").
Str("id", c.Param("id")).
Str("machine", m.Name).
Msg("Finished stream, closing PollNetMap session")
}
// PollNetMapStream takes care of /machine/:id/map
// stream logic, ensuring we communicate updates and data
// to the connected clients.
func (h *Headscale) PollNetMapStream(
c *gin.Context,
m Machine,
req tailcfg.MapRequest,
mKey wgkey.Key,
pollDataChan chan []byte,
keepAliveChan chan []byte,
updateChan <-chan struct{},
cancelKeepAlive chan struct{},
) {
go h.scheduledPollWorker(cancelKeepAlive, keepAliveChan, mKey, req, m)
c.Stream(func(w io.Writer) bool {
log.Trace().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Msg("Waiting for data to stream...")
log.Trace().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Msgf("pollData is %#v, keepAliveChan is %#v, updateChan is %#v", pollDataChan, keepAliveChan, updateChan)
select {
case data := <-pollDataChan:
log.Trace().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Str("channel", "pollData").
Int("bytes", len(data)).
Msg("Sending data received via pollData channel")
_, err := w.Write(data)
if err != nil {
log.Error().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Str("channel", "pollData").
Err(err).
Msg("Cannot write data")
}
log.Trace().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Str("channel", "pollData").
Int("bytes", len(data)).
Msg("Data from pollData channel written successfully")
// TODO: Abstract away all the database calls, this can cause race conditions
// when an outdated machine object is kept alive, e.g. db is update from
// command line, but then overwritten.
err = h.UpdateMachine(&m)
if err != nil {
log.Error().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Str("channel", "pollData").
Err(err).
Msg("Cannot update machine from database")
}
now := time.Now().UTC()
m.LastSeen = &now
m.LastSuccessfulUpdate = &now
h.db.Save(&m)
log.Trace().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Str("channel", "pollData").
Int("bytes", len(data)).
Msg("Machine updated successfully after sending pollData")
return true
case data := <-keepAliveChan:
log.Trace().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Str("channel", "keepAlive").
Int("bytes", len(data)).
Msg("Sending keep alive message")
_, err := w.Write(data)
if err != nil {
log.Error().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Str("channel", "keepAlive").
Err(err).
Msg("Cannot write keep alive message")
}
log.Trace().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Str("channel", "keepAlive").
Int("bytes", len(data)).
Msg("Keep alive sent successfully")
// TODO: Abstract away all the database calls, this can cause race conditions
// when an outdated machine object is kept alive, e.g. db is update from
// command line, but then overwritten.
err = h.UpdateMachine(&m)
if err != nil {
log.Error().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Str("channel", "keepAlive").
Err(err).
Msg("Cannot update machine from database")
}
now := time.Now().UTC()
m.LastSeen = &now
h.db.Save(&m)
log.Trace().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Str("channel", "keepAlive").
Int("bytes", len(data)).
Msg("Machine updated successfully after sending keep alive")
return true
case <-updateChan:
log.Trace().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Str("channel", "update").
Msg("Received a request for update")
if h.isOutdated(&m) {
log.Debug().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Time("last_successful_update", *m.LastSuccessfulUpdate).
Time("last_state_change", h.getLastStateChange(m.Namespace.Name)).
Msgf("There has been updates since the last successful update to %s", m.Name)
data, err := h.getMapResponse(mKey, req, m)
if err != nil {
log.Error().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Str("channel", "update").
Err(err).
Msg("Could not get the map update")
}
_, err = w.Write(*data)
if err != nil {
log.Error().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Str("channel", "update").
Err(err).
Msg("Could not write the map response")
}
log.Trace().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Str("channel", "update").
Msg("Updated Map has been sent")
// Keep track of the last successful update,
// we sometimes end in a state were the update
// is not picked up by a client and we use this
// to determine if we should "force" an update.
// TODO: Abstract away all the database calls, this can cause race conditions
// when an outdated machine object is kept alive, e.g. db is update from
// command line, but then overwritten.
err = h.UpdateMachine(&m)
if err != nil {
log.Error().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Str("channel", "update").
Err(err).
Msg("Cannot update machine from database")
}
now := time.Now().UTC()
m.LastSuccessfulUpdate = &now
h.db.Save(&m)
} else {
log.Trace().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Time("last_successful_update", *m.LastSuccessfulUpdate).
Time("last_state_change", h.getLastStateChange(m.Namespace.Name)).
Msgf("%s is up to date", m.Name)
}
return true
case <-c.Request.Context().Done():
log.Info().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Msg("The client has closed the connection")
// TODO: Abstract away all the database calls, this can cause race conditions
// when an outdated machine object is kept alive, e.g. db is update from
// command line, but then overwritten.
err := h.UpdateMachine(&m)
if err != nil {
log.Error().
Str("handler", "PollNetMapStream").
Str("machine", m.Name).
Str("channel", "Done").
Err(err).
Msg("Cannot update machine from database")
}
now := time.Now().UTC()
m.LastSeen = &now
h.db.Save(&m)
cancelKeepAlive <- struct{}{}
h.closeUpdateChannel(&m)
close(pollDataChan)
close(keepAliveChan)
return false
}
})
}
func (h *Headscale) scheduledPollWorker(
cancelChan <-chan struct{},
keepAliveChan chan<- []byte,
mKey wgkey.Key,
req tailcfg.MapRequest,
m Machine,
) {
keepAliveTicker := time.NewTicker(60 * time.Second)
updateCheckerTicker := time.NewTicker(30 * time.Second)
for {
select {
case <-cancelChan:
return
case <-keepAliveTicker.C:
data, err := h.getMapKeepAliveResponse(mKey, req, m)
if err != nil {
log.Error().
Str("func", "keepAlive").
Err(err).
Msg("Error generating the keep alive msg")
return
}
log.Debug().
Str("func", "keepAlive").
Str("machine", m.Name).
Msg("Sending keepalive")
keepAliveChan <- *data
case <-updateCheckerTicker.C:
// Send an update request regardless of outdated or not, if data is sent
// to the node is determined in the updateChan consumer block
n, _ := m.toNode(true)
err := h.sendRequestOnUpdateChannel(n)
if err != nil {
log.Error().
Str("func", "keepAlive").
Str("machine", m.Name).
Err(err).
Msgf("Failed to send update request to %s", m.Name)
}
}
}
}

View File

@@ -3,8 +3,10 @@ package headscale
import (
"crypto/rand"
"encoding/hex"
"log"
"errors"
"time"
"gorm.io/gorm"
)
const errorAuthKeyNotFound = Error("AuthKey not found")
@@ -31,13 +33,6 @@ func (h *Headscale) CreatePreAuthKey(namespaceName string, reusable bool, epheme
return nil, err
}
db, err := h.db()
if err != nil {
log.Printf("Cannot open DB: %s", err)
return nil, err
}
defer db.Close()
now := time.Now().UTC()
kstr, err := h.generateKey()
if err != nil {
@@ -53,7 +48,7 @@ func (h *Headscale) CreatePreAuthKey(namespaceName string, reusable bool, epheme
CreatedAt: &now,
Expiration: expiration,
}
db.Save(&k)
h.db.Save(&k)
return &k, nil
}
@@ -64,31 +59,41 @@ func (h *Headscale) GetPreAuthKeys(namespaceName string) (*[]PreAuthKey, error)
if err != nil {
return nil, err
}
db, err := h.db()
if err != nil {
log.Printf("Cannot open DB: %s", err)
return nil, err
}
defer db.Close()
keys := []PreAuthKey{}
if err := db.Preload("Namespace").Where(&PreAuthKey{NamespaceID: n.ID}).Find(&keys).Error; err != nil {
if err := h.db.Preload("Namespace").Where(&PreAuthKey{NamespaceID: n.ID}).Find(&keys).Error; err != nil {
return nil, err
}
return &keys, nil
}
// checkKeyValidity does the heavy lifting for validation of the PreAuthKey coming from a node
// If returns no error and a PreAuthKey, it can be used
func (h *Headscale) checkKeyValidity(k string) (*PreAuthKey, error) {
db, err := h.db()
// GetPreAuthKey returns a PreAuthKey for a given key
func (h *Headscale) GetPreAuthKey(namespace string, key string) (*PreAuthKey, error) {
pak, err := h.checkKeyValidity(key)
if err != nil {
return nil, err
}
defer db.Close()
if pak.Namespace.Name != namespace {
return nil, errors.New("Namespace mismatch")
}
return pak, nil
}
// MarkExpirePreAuthKey marks a PreAuthKey as expired
func (h *Headscale) MarkExpirePreAuthKey(k *PreAuthKey) error {
if err := h.db.Model(&k).Update("Expiration", time.Now()).Error; err != nil {
return err
}
return nil
}
// checkKeyValidity does the heavy lifting for validation of the PreAuthKey coming from a node
// If returns no error and a PreAuthKey, it can be used
func (h *Headscale) checkKeyValidity(k string) (*PreAuthKey, error) {
pak := PreAuthKey{}
if db.Preload("Namespace").First(&pak, "key = ?", k).RecordNotFound() {
if result := h.db.Preload("Namespace").First(&pak, "key = ?", k); errors.Is(result.Error, gorm.ErrRecordNotFound) {
return nil, errorAuthKeyNotFound
}
@@ -101,7 +106,7 @@ func (h *Headscale) checkKeyValidity(k string) (*PreAuthKey, error) {
}
machines := []Machine{}
if err := db.Preload("AuthKey").Where(&Machine{AuthKeyID: uint(pak.ID)}).Find(&machines).Error; err != nil {
if err := h.db.Preload("AuthKey").Where(&Machine{AuthKeyID: uint(pak.ID)}).Find(&machines).Error; err != nil {
return nil, err
}

View File

@@ -73,11 +73,6 @@ func (*Suite) TestAlreadyUsedKey(c *check.C) {
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
c.Assert(err, check.IsNil)
db, err := h.db()
if err != nil {
c.Fatal(err)
}
defer db.Close()
m := Machine{
ID: 0,
MachineKey: "foo",
@@ -89,7 +84,7 @@ func (*Suite) TestAlreadyUsedKey(c *check.C) {
RegisterMethod: "authKey",
AuthKeyID: uint(pak.ID),
}
db.Save(&m)
h.db.Save(&m)
p, err := h.checkKeyValidity(pak.Key)
c.Assert(err, check.Equals, errorAuthKeyNotReusableAlreadyUsed)
@@ -103,11 +98,6 @@ func (*Suite) TestReusableBeingUsedKey(c *check.C) {
pak, err := h.CreatePreAuthKey(n.Name, true, false, nil)
c.Assert(err, check.IsNil)
db, err := h.db()
if err != nil {
c.Fatal(err)
}
defer db.Close()
m := Machine{
ID: 1,
MachineKey: "foo",
@@ -119,7 +109,7 @@ func (*Suite) TestReusableBeingUsedKey(c *check.C) {
RegisterMethod: "authKey",
AuthKeyID: uint(pak.ID),
}
db.Save(&m)
h.db.Save(&m)
p, err := h.checkKeyValidity(pak.Key)
c.Assert(err, check.IsNil)
@@ -145,11 +135,6 @@ func (*Suite) TestEphemeralKey(c *check.C) {
pak, err := h.CreatePreAuthKey(n.Name, false, true, nil)
c.Assert(err, check.IsNil)
db, err := h.db()
if err != nil {
c.Fatal(err)
}
defer db.Close()
now := time.Now()
m := Machine{
ID: 0,
@@ -163,7 +148,7 @@ func (*Suite) TestEphemeralKey(c *check.C) {
LastSeen: &now,
AuthKeyID: uint(pak.ID),
}
db.Save(&m)
h.db.Save(&m)
_, err = h.checkKeyValidity(pak.Key)
// Ephemeral keys are by definition reusable
@@ -178,3 +163,20 @@ func (*Suite) TestEphemeralKey(c *check.C) {
_, err = h.GetMachine("test7", "testest")
c.Assert(err, check.NotNil)
}
func (*Suite) TestExpirePreauthKey(c *check.C) {
n, err := h.CreateNamespace("test3")
c.Assert(err, check.IsNil)
pak, err := h.CreatePreAuthKey(n.Name, true, false, nil)
c.Assert(err, check.IsNil)
c.Assert(pak.Expiration, check.IsNil)
err = h.MarkExpirePreAuthKey(pak)
c.Assert(err, check.IsNil)
c.Assert(pak.Expiration, check.NotNil)
p, err := h.checkKeyValidity(pak.Key)
c.Assert(err, check.Equals, errorAuthKeyExpired)
c.Assert(p, check.IsNil)
}

144
routes.go
View File

@@ -2,72 +2,142 @@ package headscale
import (
"encoding/json"
"errors"
"log"
"fmt"
"strconv"
"github.com/pterm/pterm"
"gorm.io/datatypes"
"inet.af/netaddr"
)
// GetNodeRoutes returns the subnet routes advertised by a node (identified by
// GetAdvertisedNodeRoutes returns the subnet routes advertised by a node (identified by
// namespace and node name)
func (h *Headscale) GetNodeRoutes(namespace string, nodeName string) (*[]netaddr.IPPrefix, error) {
func (h *Headscale) GetAdvertisedNodeRoutes(namespace string, nodeName string) (*[]netaddr.IPPrefix, error) {
m, err := h.GetMachine(namespace, nodeName)
if err != nil {
return nil, err
}
hi, err := m.GetHostInfo()
hostInfo, err := m.GetHostInfo()
if err != nil {
return nil, err
}
return &hi.RoutableIPs, nil
return &hostInfo.RoutableIPs, nil
}
// GetEnabledNodeRoutes returns the subnet routes enabled by a node (identified by
// namespace and node name)
func (h *Headscale) GetEnabledNodeRoutes(namespace string, nodeName string) ([]netaddr.IPPrefix, error) {
m, err := h.GetMachine(namespace, nodeName)
if err != nil {
return nil, err
}
data, err := m.EnabledRoutes.MarshalJSON()
if err != nil {
return nil, err
}
routesStr := []string{}
err = json.Unmarshal(data, &routesStr)
if err != nil {
return nil, err
}
routes := make([]netaddr.IPPrefix, len(routesStr))
for index, routeStr := range routesStr {
route, err := netaddr.ParseIPPrefix(routeStr)
if err != nil {
return nil, err
}
routes[index] = route
}
return routes, nil
}
// IsNodeRouteEnabled checks if a certain route has been enabled
func (h *Headscale) IsNodeRouteEnabled(namespace string, nodeName string, routeStr string) bool {
route, err := netaddr.ParseIPPrefix(routeStr)
if err != nil {
return false
}
enabledRoutes, err := h.GetEnabledNodeRoutes(namespace, nodeName)
if err != nil {
return false
}
for _, enabledRoute := range enabledRoutes {
if route == enabledRoute {
return true
}
}
return false
}
// EnableNodeRoute enables a subnet route advertised by a node (identified by
// namespace and node name)
func (h *Headscale) EnableNodeRoute(namespace string, nodeName string, routeStr string) (*netaddr.IPPrefix, error) {
func (h *Headscale) EnableNodeRoute(namespace string, nodeName string, routeStr string) error {
m, err := h.GetMachine(namespace, nodeName)
if err != nil {
return nil, err
}
hi, err := m.GetHostInfo()
if err != nil {
return nil, err
return err
}
route, err := netaddr.ParseIPPrefix(routeStr)
if err != nil {
return nil, err
return err
}
for _, rIP := range hi.RoutableIPs {
if rIP == route {
db, err := h.db()
if err != nil {
log.Printf("Cannot open DB: %s", err)
return nil, err
}
availableRoutes, err := h.GetAdvertisedNodeRoutes(namespace, nodeName)
if err != nil {
return err
}
routes, _ := json.Marshal([]string{routeStr}) // TODO: only one for the time being, so overwriting the rest
m.EnabledRoutes = datatypes.JSON(routes)
db.Save(&m)
db.Close()
enabledRoutes, err := h.GetEnabledNodeRoutes(namespace, nodeName)
if err != nil {
return err
}
// THIS IS COMPLETELY USELESS.
// The peers map is stored in memory in the server process.
// Definetely not accessible from the CLI tool.
// We need RPC to the server - or some kind of 'needsUpdate' field in the DB
peers, _ := h.getPeers(*m)
h.pollMu.Lock()
for _, p := range *peers {
if pUp, ok := h.clientsPolling[uint64(p.ID)]; ok {
pUp <- []byte{}
}
available := false
for _, availableRoute := range *availableRoutes {
// If the route is available, and not yet enabled, add it to the new routing table
if route == availableRoute {
available = true
if !h.IsNodeRouteEnabled(namespace, nodeName, routeStr) {
enabledRoutes = append(enabledRoutes, route)
}
h.pollMu.Unlock()
return &rIP, nil
}
}
return nil, errors.New("could not find routable range")
if !available {
return fmt.Errorf("route (%s) is not available on node %s", nodeName, routeStr)
}
routes, err := json.Marshal(enabledRoutes)
if err != nil {
return err
}
m.EnabledRoutes = datatypes.JSON(routes)
h.db.Save(&m)
err = h.RequestMapUpdates(m.NamespaceID)
if err != nil {
return err
}
return nil
}
// RoutesToPtables converts the list of routes to a nice table
func (h *Headscale) RoutesToPtables(namespace string, nodeName string, availableRoutes []netaddr.IPPrefix) pterm.TableData {
d := pterm.TableData{{"Route", "Enabled"}}
for _, route := range availableRoutes {
enabled := h.IsNodeRouteEnabled(namespace, nodeName, route.String())
d = append(d, []string{route.String(), strconv.FormatBool(enabled)})
}
return d
}

View File

@@ -16,13 +16,7 @@ func (s *Suite) TestGetRoutes(c *check.C) {
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
c.Assert(err, check.IsNil)
db, err := h.db()
if err != nil {
c.Fatal(err)
}
defer db.Close()
_, err = h.GetMachine("test", "testmachine")
_, err = h.GetMachine("test", "test_get_route_machine")
c.Assert(err, check.NotNil)
route, err := netaddr.ParseIPPrefix("10.0.0.0/24")
@@ -39,23 +33,96 @@ func (s *Suite) TestGetRoutes(c *check.C) {
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Name: "testmachine",
Name: "test_get_route_machine",
NamespaceID: n.ID,
Registered: true,
RegisterMethod: "authKey",
AuthKeyID: uint(pak.ID),
HostInfo: datatypes.JSON(hostinfo),
}
db.Save(&m)
h.db.Save(&m)
r, err := h.GetNodeRoutes("test", "testmachine")
r, err := h.GetAdvertisedNodeRoutes("test", "test_get_route_machine")
c.Assert(err, check.IsNil)
c.Assert(len(*r), check.Equals, 1)
_, err = h.EnableNodeRoute("test", "testmachine", "192.168.0.0/24")
err = h.EnableNodeRoute("test", "test_get_route_machine", "192.168.0.0/24")
c.Assert(err, check.NotNil)
_, err = h.EnableNodeRoute("test", "testmachine", "10.0.0.0/24")
err = h.EnableNodeRoute("test", "test_get_route_machine", "10.0.0.0/24")
c.Assert(err, check.IsNil)
}
func (s *Suite) TestGetEnableRoutes(c *check.C) {
n, err := h.CreateNamespace("test")
c.Assert(err, check.IsNil)
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = h.GetMachine("test", "test_enable_route_machine")
c.Assert(err, check.NotNil)
route, err := netaddr.ParseIPPrefix(
"10.0.0.0/24",
)
c.Assert(err, check.IsNil)
route2, err := netaddr.ParseIPPrefix(
"150.0.10.0/25",
)
c.Assert(err, check.IsNil)
hi := tailcfg.Hostinfo{
RoutableIPs: []netaddr.IPPrefix{route, route2},
}
hostinfo, err := json.Marshal(hi)
c.Assert(err, check.IsNil)
m := Machine{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Name: "test_enable_route_machine",
NamespaceID: n.ID,
Registered: true,
RegisterMethod: "authKey",
AuthKeyID: uint(pak.ID),
HostInfo: datatypes.JSON(hostinfo),
}
h.db.Save(&m)
availableRoutes, err := h.GetAdvertisedNodeRoutes("test", "test_enable_route_machine")
c.Assert(err, check.IsNil)
c.Assert(len(*availableRoutes), check.Equals, 2)
enabledRoutes, err := h.GetEnabledNodeRoutes("test", "test_enable_route_machine")
c.Assert(err, check.IsNil)
c.Assert(len(enabledRoutes), check.Equals, 0)
err = h.EnableNodeRoute("test", "test_enable_route_machine", "192.168.0.0/24")
c.Assert(err, check.NotNil)
err = h.EnableNodeRoute("test", "test_enable_route_machine", "10.0.0.0/24")
c.Assert(err, check.IsNil)
enabledRoutes1, err := h.GetEnabledNodeRoutes("test", "test_enable_route_machine")
c.Assert(err, check.IsNil)
c.Assert(len(enabledRoutes1), check.Equals, 1)
// Adding it twice will just let it pass through
err = h.EnableNodeRoute("test", "test_enable_route_machine", "10.0.0.0/24")
c.Assert(err, check.IsNil)
enabledRoutes2, err := h.GetEnabledNodeRoutes("test", "test_enable_route_machine")
c.Assert(err, check.IsNil)
c.Assert(len(enabledRoutes2), check.Equals, 1)
err = h.EnableNodeRoute("test", "test_enable_route_machine", "150.0.10.0/25")
c.Assert(err, check.IsNil)
enabledRoutes3, err := h.GetEnabledNodeRoutes("test", "test_enable_route_machine")
c.Assert(err, check.IsNil)
c.Assert(len(enabledRoutes3), check.Equals, 2)
}

View File

@@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
set -e -o pipefail
commit="$1"

37
sharing.go Normal file
View File

@@ -0,0 +1,37 @@
package headscale
import "gorm.io/gorm"
const errorSameNamespace = Error("Destination namespace same as origin")
const errorMachineAlreadyShared = Error("Node already shared to this namespace")
// SharedMachine is a join table to support sharing nodes between namespaces
type SharedMachine struct {
gorm.Model
MachineID uint64
Machine Machine
NamespaceID uint
Namespace Namespace
}
// AddSharedMachineToNamespace adds a machine as a shared node to a namespace
func (h *Headscale) AddSharedMachineToNamespace(m *Machine, ns *Namespace) error {
if m.NamespaceID == ns.ID {
return errorSameNamespace
}
sharedMachine := SharedMachine{}
if err := h.db.Where("machine_id = ? AND namespace_id", m.ID, ns.ID).First(&sharedMachine).Error; err == nil {
return errorMachineAlreadyShared
}
sharedMachine = SharedMachine{
MachineID: m.ID,
Machine: *m,
NamespaceID: ns.ID,
Namespace: *ns,
}
h.db.Save(&sharedMachine)
return nil
}

359
sharing_test.go Normal file
View File

@@ -0,0 +1,359 @@
package headscale
import (
"gopkg.in/check.v1"
"tailscale.com/tailcfg"
)
func (s *Suite) TestBasicSharedNodesInNamespace(c *check.C) {
n1, err := h.CreateNamespace("shared1")
c.Assert(err, check.IsNil)
n2, err := h.CreateNamespace("shared2")
c.Assert(err, check.IsNil)
pak1, err := h.CreatePreAuthKey(n1.Name, false, false, nil)
c.Assert(err, check.IsNil)
pak2, err := h.CreatePreAuthKey(n2.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = h.GetMachine(n1.Name, "test_get_shared_nodes_1")
c.Assert(err, check.NotNil)
m1 := Machine{
ID: 0,
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
DiscoKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
Name: "test_get_shared_nodes_1",
NamespaceID: n1.ID,
Registered: true,
RegisterMethod: "authKey",
IPAddress: "100.64.0.1",
AuthKeyID: uint(pak1.ID),
}
h.db.Save(&m1)
_, err = h.GetMachine(n1.Name, m1.Name)
c.Assert(err, check.IsNil)
m2 := Machine{
ID: 1,
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
Name: "test_get_shared_nodes_2",
NamespaceID: n2.ID,
Registered: true,
RegisterMethod: "authKey",
IPAddress: "100.64.0.2",
AuthKeyID: uint(pak2.ID),
}
h.db.Save(&m2)
_, err = h.GetMachine(n2.Name, m2.Name)
c.Assert(err, check.IsNil)
p1s, err := h.getPeers(m1)
c.Assert(err, check.IsNil)
c.Assert(len(*p1s), check.Equals, 0)
err = h.AddSharedMachineToNamespace(&m2, n1)
c.Assert(err, check.IsNil)
p1sAfter, err := h.getPeers(m1)
c.Assert(err, check.IsNil)
c.Assert(len(*p1sAfter), check.Equals, 1)
c.Assert((*p1sAfter)[0].ID, check.Equals, tailcfg.NodeID(m2.ID))
}
func (s *Suite) TestSameNamespace(c *check.C) {
n1, err := h.CreateNamespace("shared1")
c.Assert(err, check.IsNil)
n2, err := h.CreateNamespace("shared2")
c.Assert(err, check.IsNil)
pak1, err := h.CreatePreAuthKey(n1.Name, false, false, nil)
c.Assert(err, check.IsNil)
pak2, err := h.CreatePreAuthKey(n2.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = h.GetMachine(n1.Name, "test_get_shared_nodes_1")
c.Assert(err, check.NotNil)
m1 := Machine{
ID: 0,
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
DiscoKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
Name: "test_get_shared_nodes_1",
NamespaceID: n1.ID,
Registered: true,
RegisterMethod: "authKey",
IPAddress: "100.64.0.1",
AuthKeyID: uint(pak1.ID),
}
h.db.Save(&m1)
_, err = h.GetMachine(n1.Name, m1.Name)
c.Assert(err, check.IsNil)
m2 := Machine{
ID: 1,
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
Name: "test_get_shared_nodes_2",
NamespaceID: n2.ID,
Registered: true,
RegisterMethod: "authKey",
IPAddress: "100.64.0.2",
AuthKeyID: uint(pak2.ID),
}
h.db.Save(&m2)
_, err = h.GetMachine(n2.Name, m2.Name)
c.Assert(err, check.IsNil)
p1s, err := h.getPeers(m1)
c.Assert(err, check.IsNil)
c.Assert(len(*p1s), check.Equals, 0)
err = h.AddSharedMachineToNamespace(&m1, n1)
c.Assert(err, check.Equals, errorSameNamespace)
}
func (s *Suite) TestAlreadyShared(c *check.C) {
n1, err := h.CreateNamespace("shared1")
c.Assert(err, check.IsNil)
n2, err := h.CreateNamespace("shared2")
c.Assert(err, check.IsNil)
pak1, err := h.CreatePreAuthKey(n1.Name, false, false, nil)
c.Assert(err, check.IsNil)
pak2, err := h.CreatePreAuthKey(n2.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = h.GetMachine(n1.Name, "test_get_shared_nodes_1")
c.Assert(err, check.NotNil)
m1 := Machine{
ID: 0,
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
DiscoKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
Name: "test_get_shared_nodes_1",
NamespaceID: n1.ID,
Registered: true,
RegisterMethod: "authKey",
IPAddress: "100.64.0.1",
AuthKeyID: uint(pak1.ID),
}
h.db.Save(&m1)
_, err = h.GetMachine(n1.Name, m1.Name)
c.Assert(err, check.IsNil)
m2 := Machine{
ID: 1,
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
Name: "test_get_shared_nodes_2",
NamespaceID: n2.ID,
Registered: true,
RegisterMethod: "authKey",
IPAddress: "100.64.0.2",
AuthKeyID: uint(pak2.ID),
}
h.db.Save(&m2)
_, err = h.GetMachine(n2.Name, m2.Name)
c.Assert(err, check.IsNil)
p1s, err := h.getPeers(m1)
c.Assert(err, check.IsNil)
c.Assert(len(*p1s), check.Equals, 0)
err = h.AddSharedMachineToNamespace(&m2, n1)
c.Assert(err, check.IsNil)
err = h.AddSharedMachineToNamespace(&m2, n1)
c.Assert(err, check.Equals, errorMachineAlreadyShared)
}
func (s *Suite) TestDoNotIncludeRoutesOnShared(c *check.C) {
n1, err := h.CreateNamespace("shared1")
c.Assert(err, check.IsNil)
n2, err := h.CreateNamespace("shared2")
c.Assert(err, check.IsNil)
pak1, err := h.CreatePreAuthKey(n1.Name, false, false, nil)
c.Assert(err, check.IsNil)
pak2, err := h.CreatePreAuthKey(n2.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = h.GetMachine(n1.Name, "test_get_shared_nodes_1")
c.Assert(err, check.NotNil)
m1 := Machine{
ID: 0,
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
DiscoKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
Name: "test_get_shared_nodes_1",
NamespaceID: n1.ID,
Registered: true,
RegisterMethod: "authKey",
IPAddress: "100.64.0.1",
AuthKeyID: uint(pak1.ID),
}
h.db.Save(&m1)
_, err = h.GetMachine(n1.Name, m1.Name)
c.Assert(err, check.IsNil)
m2 := Machine{
ID: 1,
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
Name: "test_get_shared_nodes_2",
NamespaceID: n2.ID,
Registered: true,
RegisterMethod: "authKey",
IPAddress: "100.64.0.2",
AuthKeyID: uint(pak2.ID),
}
h.db.Save(&m2)
_, err = h.GetMachine(n2.Name, m2.Name)
c.Assert(err, check.IsNil)
p1s, err := h.getPeers(m1)
c.Assert(err, check.IsNil)
c.Assert(len(*p1s), check.Equals, 0)
err = h.AddSharedMachineToNamespace(&m2, n1)
c.Assert(err, check.IsNil)
p1sAfter, err := h.getPeers(m1)
c.Assert(err, check.IsNil)
c.Assert(len(*p1sAfter), check.Equals, 1)
c.Assert(len((*p1sAfter)[0].AllowedIPs), check.Equals, 1)
}
func (s *Suite) TestComplexSharingAcrossNamespaces(c *check.C) {
n1, err := h.CreateNamespace("shared1")
c.Assert(err, check.IsNil)
n2, err := h.CreateNamespace("shared2")
c.Assert(err, check.IsNil)
n3, err := h.CreateNamespace("shared3")
c.Assert(err, check.IsNil)
pak1, err := h.CreatePreAuthKey(n1.Name, false, false, nil)
c.Assert(err, check.IsNil)
pak2, err := h.CreatePreAuthKey(n2.Name, false, false, nil)
c.Assert(err, check.IsNil)
pak3, err := h.CreatePreAuthKey(n3.Name, false, false, nil)
c.Assert(err, check.IsNil)
pak4, err := h.CreatePreAuthKey(n1.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = h.GetMachine(n1.Name, "test_get_shared_nodes_1")
c.Assert(err, check.NotNil)
m1 := Machine{
ID: 0,
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
DiscoKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
Name: "test_get_shared_nodes_1",
NamespaceID: n1.ID,
Registered: true,
RegisterMethod: "authKey",
IPAddress: "100.64.0.1",
AuthKeyID: uint(pak1.ID),
}
h.db.Save(&m1)
_, err = h.GetMachine(n1.Name, m1.Name)
c.Assert(err, check.IsNil)
m2 := Machine{
ID: 1,
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
Name: "test_get_shared_nodes_2",
NamespaceID: n2.ID,
Registered: true,
RegisterMethod: "authKey",
IPAddress: "100.64.0.2",
AuthKeyID: uint(pak2.ID),
}
h.db.Save(&m2)
_, err = h.GetMachine(n2.Name, m2.Name)
c.Assert(err, check.IsNil)
m3 := Machine{
ID: 2,
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
Name: "test_get_shared_nodes_3",
NamespaceID: n3.ID,
Registered: true,
RegisterMethod: "authKey",
IPAddress: "100.64.0.3",
AuthKeyID: uint(pak3.ID),
}
h.db.Save(&m3)
_, err = h.GetMachine(n3.Name, m3.Name)
c.Assert(err, check.IsNil)
m4 := Machine{
ID: 3,
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
Name: "test_get_shared_nodes_4",
NamespaceID: n1.ID,
Registered: true,
RegisterMethod: "authKey",
IPAddress: "100.64.0.4",
AuthKeyID: uint(pak4.ID),
}
h.db.Save(&m4)
_, err = h.GetMachine(n1.Name, m4.Name)
c.Assert(err, check.IsNil)
p1s, err := h.getPeers(m1)
c.Assert(err, check.IsNil)
c.Assert(len(*p1s), check.Equals, 1) // nodes 1 and 4
err = h.AddSharedMachineToNamespace(&m2, n1)
c.Assert(err, check.IsNil)
p1sAfter, err := h.getPeers(m1)
c.Assert(err, check.IsNil)
c.Assert(len(*p1sAfter), check.Equals, 2) // nodes 1, 2, 4
pAlone, err := h.getPeers(m3)
c.Assert(err, check.IsNil)
c.Assert(len(*pAlone), check.Equals, 0) // node 3 is alone
}

View File

@@ -0,0 +1,127 @@
{
// Declare static groups of users beyond those in the identity service.
"Groups": {
"group:example": [
"user1@example.com",
"user2@example.com",
],
"group:example2": [
"user1@example.com",
"user2@example.com",
],
},
// Declare hostname aliases to use in place of IP addresses or subnets.
"Hosts": {
"example-host-1": "100.100.100.100",
"example-host-2": "100.100.101.100/24",
},
// Define who is allowed to use which tags.
"TagOwners": {
// Everyone in the montreal-admins or global-admins group are
// allowed to tag servers as montreal-webserver.
"tag:montreal-webserver": [
"group:example",
],
// Only a few admins are allowed to create API servers.
"tag:production": [
"group:example",
"president@example.com",
],
},
// Access control lists.
"ACLs": [
// Engineering users, plus the president, can access port 22 (ssh)
// and port 3389 (remote desktop protocol) on all servers, and all
// ports on git-server or ci-server.
{
"Action": "accept",
"Users": [
"group:example2",
"192.168.1.0/24"
],
"Ports": [
"*:22,3389",
"git-server:*",
"ci-server:*"
],
},
// Allow engineer users to access any port on a device tagged with
// tag:production.
{
"Action": "accept",
"Users": [
"group:example"
],
"Ports": [
"tag:production:*"
],
},
// Allow servers in the my-subnet host and 192.168.1.0/24 to access hosts
// on both networks.
{
"Action": "accept",
"Users": [
"example-host-2",
],
"Ports": [
"example-host-1:*",
"192.168.1.0/24:*"
],
},
// Allow every user of your network to access anything on the network.
// Comment out this section if you want to define specific ACL
// restrictions above.
{
"Action": "accept",
"Users": [
"*"
],
"Ports": [
"*:*"
],
},
// All users in Montreal are allowed to access the Montreal web
// servers.
{
"Action": "accept",
"Users": [
"example-host-1"
],
"Ports": [
"tag:montreal-webserver:80,443"
],
},
// Montreal web servers are allowed to make outgoing connections to
// the API servers, but only on https port 443.
// In contrast, this doesn't grant API servers the right to initiate
// any connections.
{
"Action": "accept",
"Users": [
"tag:montreal-webserver"
],
"Ports": [
"tag:api-server:443"
],
},
],
// Declare tests to check functionality of ACL rules
"Tests": [
{
"User": "user1@example.com",
"Allow": [
"example-host-1:22",
"example-host-2:80"
],
"Deny": [
"exapmle-host-2:100"
],
},
{
"User": "user2@example.com",
"Allow": [
"100.60.3.4:22"
],
},
],
}

View File

@@ -0,0 +1,24 @@
// This ACL is a very basic example to validate the
// expansion of hosts
{
"Hosts": {
"host-1": "100.100.100.100",
"subnet-1": "100.100.101.100/24",
},
"ACLs": [
{
"Action": "accept",
"Users": [
"subnet-1",
"192.168.1.0/24"
],
"Ports": [
"*:22,3389",
"host-1:*",
],
},
],
}

View File

@@ -0,0 +1,26 @@
// This ACL is used to test group expansion
{
"Groups": {
"group:example": [
"testnamespace",
],
},
"Hosts": {
"host-1": "100.100.100.100",
"subnet-1": "100.100.101.100/24",
},
"ACLs": [
{
"Action": "accept",
"Users": [
"group:example",
],
"Ports": [
"host-1:*",
],
},
],
}

View File

@@ -0,0 +1,20 @@
// This ACL is used to test namespace expansion
{
"Hosts": {
"host-1": "100.100.100.100",
"subnet-1": "100.100.101.100/24",
},
"ACLs": [
{
"Action": "accept",
"Users": [
"testnamespace",
],
"Ports": [
"host-1:*",
],
},
],
}

View File

@@ -0,0 +1,20 @@
// This ACL is used to test the port range expansion
{
"Hosts": {
"host-1": "100.100.100.100",
"subnet-1": "100.100.101.100/24",
},
"ACLs": [
{
"Action": "accept",
"Users": [
"subnet-1",
],
"Ports": [
"host-1:5400-5500",
],
},
],
}

View File

@@ -0,0 +1,20 @@
// This ACL is used to test wildcards
{
"Hosts": {
"host-1": "100.100.100.100",
"subnet-1": "100.100.101.100/24",
},
"ACLs": [
{
"Action": "accept",
"Users": [
"*",
],
"Ports": [
"host-1:*",
],
},
],
}

View File

@@ -0,0 +1,125 @@
{
// Declare static groups of users beyond those in the identity service.
"Groups": {
"group:example": [
"user1@example.com",
"user2@example.com",
],
},
// Declare hostname aliases to use in place of IP addresses or subnets.
"Hosts": {
"example-host-1": "100.100.100.100",
"example-host-2": "100.100.101.100/24",
},
// Define who is allowed to use which tags.
"TagOwners": {
// Everyone in the montreal-admins or global-admins group are
// allowed to tag servers as montreal-webserver.
"tag:montreal-webserver": [
"group:montreal-admins",
"group:global-admins",
],
// Only a few admins are allowed to create API servers.
"tag:api-server": [
"group:global-admins",
"example-host-1",
],
},
// Access control lists.
"ACLs": [
// Engineering users, plus the president, can access port 22 (ssh)
// and port 3389 (remote desktop protocol) on all servers, and all
// ports on git-server or ci-server.
{
"Action": "accept",
"Users": [
"group:engineering",
"president@example.com"
],
"Ports": [
"*:22,3389",
"git-server:*",
"ci-server:*"
],
},
// Allow engineer users to access any port on a device tagged with
// tag:production.
{
"Action": "accept",
"Users": [
"group:engineers"
],
"Ports": [
"tag:production:*"
],
},
// Allow servers in the my-subnet host and 192.168.1.0/24 to access hosts
// on both networks.
{
"Action": "accept",
"Users": [
"my-subnet",
"192.168.1.0/24"
],
"Ports": [
"my-subnet:*",
"192.168.1.0/24:*"
],
},
// Allow every user of your network to access anything on the network.
// Comment out this section if you want to define specific ACL
// restrictions above.
{
"Action": "accept",
"Users": [
"*"
],
"Ports": [
"*:*"
],
},
// All users in Montreal are allowed to access the Montreal web
// servers.
{
"Action": "accept",
"Users": [
"group:montreal-users"
],
"Ports": [
"tag:montreal-webserver:80,443"
],
},
// Montreal web servers are allowed to make outgoing connections to
// the API servers, but only on https port 443.
// In contrast, this doesn't grant API servers the right to initiate
// any connections.
{
"Action": "accept",
"Users": [
"tag:montreal-webserver"
],
"Ports": [
"tag:api-server:443"
],
},
],
// Declare tests to check functionality of ACL rules
"Tests": [
{
"User": "user1@example.com",
"Allow": [
"example-host-1:22",
"example-host-2:80"
],
"Deny": [
"exapmle-host-2:100"
],
},
{
"User": "user2@example.com",
"Allow": [
"100.60.3.4:22"
],
},
],
}

1
tests/acls/broken.hujson Normal file
View File

@@ -0,0 +1 @@
{

View File

@@ -0,0 +1,4 @@
{
"valid_json": true,
"but_a_policy_though": false
}

125
utils.go
View File

@@ -7,18 +7,15 @@ package headscale
import (
"crypto/rand"
"encoding/binary"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"time"
mathrand "math/rand"
"strings"
"golang.org/x/crypto/nacl/box"
"tailscale.com/wgengine/wgcfg"
"inet.af/netaddr"
"tailscale.com/tailcfg"
"tailscale.com/types/wgkey"
)
// Error is used to compare errors as per https://dave.cheney.net/2016/04/07/constant-errors
@@ -26,11 +23,11 @@ type Error string
func (e Error) Error() string { return string(e) }
func decode(msg []byte, v interface{}, pubKey *wgcfg.Key, privKey *wgcfg.PrivateKey) error {
func decode(msg []byte, v interface{}, pubKey *wgkey.Key, privKey *wgkey.Private) error {
return decodeMsg(msg, v, pubKey, privKey)
}
func decodeMsg(msg []byte, v interface{}, pubKey *wgcfg.Key, privKey *wgcfg.PrivateKey) error {
func decodeMsg(msg []byte, v interface{}, pubKey *wgkey.Key, privKey *wgkey.Private) error {
decrypted, err := decryptMsg(msg, pubKey, privKey)
if err != nil {
return err
@@ -42,7 +39,7 @@ func decodeMsg(msg []byte, v interface{}, pubKey *wgcfg.Key, privKey *wgcfg.Priv
return nil
}
func decryptMsg(msg []byte, pubKey *wgcfg.Key, privKey *wgcfg.PrivateKey) ([]byte, error) {
func decryptMsg(msg []byte, pubKey *wgkey.Key, privKey *wgkey.Private) ([]byte, error) {
var nonce [24]byte
if len(msg) < len(nonce)+1 {
return nil, fmt.Errorf("response missing nonce, len=%d", len(msg))
@@ -58,15 +55,16 @@ func decryptMsg(msg []byte, pubKey *wgcfg.Key, privKey *wgcfg.PrivateKey) ([]byt
return decrypted, nil
}
func encode(v interface{}, pubKey *wgcfg.Key, privKey *wgcfg.PrivateKey) ([]byte, error) {
func encode(v interface{}, pubKey *wgkey.Key, privKey *wgkey.Private) ([]byte, error) {
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
return encodeMsg(b, pubKey, privKey)
}
func encodeMsg(b []byte, pubKey *wgcfg.Key, privKey *wgcfg.PrivateKey) ([]byte, error) {
func encodeMsg(b []byte, pubKey *wgkey.Key, privKey *wgkey.Private) ([]byte, error) {
var nonce [24]byte
if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
panic(err)
@@ -76,52 +74,85 @@ func encodeMsg(b []byte, pubKey *wgcfg.Key, privKey *wgcfg.PrivateKey) ([]byte,
return msg, nil
}
func (h *Headscale) getAvailableIP() (*net.IP, error) {
db, err := h.db()
func (h *Headscale) getAvailableIP() (*netaddr.IP, error) {
ipPrefix := h.cfg.IPPrefix
usedIps, err := h.getUsedIPs()
if err != nil {
return nil, err
}
defer db.Close()
i := 0
// Get the first IP in our prefix
ip := ipPrefix.IP()
for {
ip, err := getRandomIP()
if err != nil {
return nil, err
if !ipPrefix.Contains(ip) {
return nil, fmt.Errorf("could not find any suitable IP in %s", ipPrefix)
}
m := Machine{}
if db.First(&m, "ip_address = ?", ip.String()).RecordNotFound() {
return ip, nil
// Some OS (including Linux) does not like when IPs ends with 0 or 255, which
// is typically called network or broadcast. Lets avoid them and continue
// to look when we get one of those traditionally reserved IPs.
ipRaw := ip.As4()
if ipRaw[3] == 0 || ipRaw[3] == 255 {
ip = ip.Next()
continue
}
i++
if i == 100 { // really random number
break
if ip.IsZero() &&
ip.IsLoopback() {
ip = ip.Next()
continue
}
if !containsIPs(usedIps, ip) {
return &ip, nil
}
ip = ip.Next()
}
return nil, errors.New("Could not find an available IP address in 100.64.0.0/10")
}
func getRandomIP() (*net.IP, error) {
mathrand.Seed(time.Now().Unix())
ipo, ipnet, err := net.ParseCIDR("100.64.0.0/10")
if err == nil {
ip := ipo.To4()
// fmt.Println("In Randomize IPAddr: IP ", ip, " IPNET: ", ipnet)
// fmt.Println("Final address is ", ip)
// fmt.Println("Broadcast address is ", ipb)
// fmt.Println("Network address is ", ipn)
r := mathrand.Uint32()
ipRaw := make([]byte, 4)
binary.LittleEndian.PutUint32(ipRaw, r)
// ipRaw[3] = 254
// fmt.Println("ipRaw is ", ipRaw)
for i, v := range ipRaw {
// fmt.Println("IP Before: ", ip[i], " v is ", v, " Mask is: ", ipnet.Mask[i])
ip[i] = ip[i] + (v &^ ipnet.Mask[i])
// fmt.Println("IP After: ", ip[i])
func (h *Headscale) getUsedIPs() ([]netaddr.IP, error) {
var addresses []string
h.db.Model(&Machine{}).Pluck("ip_address", &addresses)
ips := make([]netaddr.IP, len(addresses))
for index, addr := range addresses {
if addr != "" {
ip, err := netaddr.ParseIP(addr)
if err != nil {
return nil, fmt.Errorf("failed to parse ip from database, %w", err)
}
ips[index] = ip
}
// fmt.Println("FINAL IP: ", ip.String())
return &ip, nil
}
return nil, err
return ips, nil
}
func containsIPs(ips []netaddr.IP, ip netaddr.IP) bool {
for _, v := range ips {
if v == ip {
return true
}
}
return false
}
func tailNodesToString(nodes []*tailcfg.Node) string {
temp := make([]string, len(nodes))
for index, node := range nodes {
temp[index] = node.Name
}
return fmt.Sprintf("[ %s ](%d)", strings.Join(temp, ", "), len(temp))
}
func tailMapResponseToString(resp tailcfg.MapResponse) string {
return fmt.Sprintf("{ Node: %s, Peers: %s }", resp.Node.Name, tailNodesToString(resp.Peers))
}

155
utils_test.go Normal file
View File

@@ -0,0 +1,155 @@
package headscale
import (
"gopkg.in/check.v1"
"inet.af/netaddr"
)
func (s *Suite) TestGetAvailableIp(c *check.C) {
ip, err := h.getAvailableIP()
c.Assert(err, check.IsNil)
expected := netaddr.MustParseIP("10.27.0.1")
c.Assert(ip.String(), check.Equals, expected.String())
}
func (s *Suite) TestGetUsedIps(c *check.C) {
ip, err := h.getAvailableIP()
c.Assert(err, check.IsNil)
n, err := h.CreateNamespace("test_ip")
c.Assert(err, check.IsNil)
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = h.GetMachine("test", "testmachine")
c.Assert(err, check.NotNil)
m := Machine{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Name: "testmachine",
NamespaceID: n.ID,
Registered: true,
RegisterMethod: "authKey",
AuthKeyID: uint(pak.ID),
IPAddress: ip.String(),
}
h.db.Save(&m)
ips, err := h.getUsedIPs()
c.Assert(err, check.IsNil)
expected := netaddr.MustParseIP("10.27.0.1")
c.Assert(ips[0], check.Equals, expected)
m1, err := h.GetMachineByID(0)
c.Assert(err, check.IsNil)
c.Assert(m1.IPAddress, check.Equals, expected.String())
}
func (s *Suite) TestGetMultiIp(c *check.C) {
n, err := h.CreateNamespace("test-ip-multi")
c.Assert(err, check.IsNil)
for i := 1; i <= 350; i++ {
ip, err := h.getAvailableIP()
c.Assert(err, check.IsNil)
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = h.GetMachine("test", "testmachine")
c.Assert(err, check.NotNil)
m := Machine{
ID: uint64(i),
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Name: "testmachine",
NamespaceID: n.ID,
Registered: true,
RegisterMethod: "authKey",
AuthKeyID: uint(pak.ID),
IPAddress: ip.String(),
}
h.db.Save(&m)
}
ips, err := h.getUsedIPs()
c.Assert(err, check.IsNil)
c.Assert(len(ips), check.Equals, 350)
c.Assert(ips[0], check.Equals, netaddr.MustParseIP("10.27.0.1"))
c.Assert(ips[9], check.Equals, netaddr.MustParseIP("10.27.0.10"))
c.Assert(ips[300], check.Equals, netaddr.MustParseIP("10.27.1.47"))
// Check that we can read back the IPs
m1, err := h.GetMachineByID(1)
c.Assert(err, check.IsNil)
c.Assert(m1.IPAddress, check.Equals, netaddr.MustParseIP("10.27.0.1").String())
m50, err := h.GetMachineByID(50)
c.Assert(err, check.IsNil)
c.Assert(m50.IPAddress, check.Equals, netaddr.MustParseIP("10.27.0.50").String())
expectedNextIP := netaddr.MustParseIP("10.27.1.97")
nextIP, err := h.getAvailableIP()
c.Assert(err, check.IsNil)
c.Assert(nextIP.String(), check.Equals, expectedNextIP.String())
// If we call get Available again, we should receive
// the same IP, as it has not been reserved.
nextIP2, err := h.getAvailableIP()
c.Assert(err, check.IsNil)
c.Assert(nextIP2.String(), check.Equals, expectedNextIP.String())
}
func (s *Suite) TestGetAvailableIpMachineWithoutIP(c *check.C) {
ip, err := h.getAvailableIP()
c.Assert(err, check.IsNil)
expected := netaddr.MustParseIP("10.27.0.1")
c.Assert(ip.String(), check.Equals, expected.String())
n, err := h.CreateNamespace("test_ip")
c.Assert(err, check.IsNil)
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
c.Assert(err, check.IsNil)
_, err = h.GetMachine("test", "testmachine")
c.Assert(err, check.NotNil)
m := Machine{
ID: 0,
MachineKey: "foo",
NodeKey: "bar",
DiscoKey: "faa",
Name: "testmachine",
NamespaceID: n.ID,
Registered: true,
RegisterMethod: "authKey",
AuthKeyID: uint(pak.ID),
}
h.db.Save(&m)
ip2, err := h.getAvailableIP()
c.Assert(err, check.IsNil)
c.Assert(ip2.String(), check.Equals, expected.String())
}