mirror of
https://github.com/juanfont/headscale.git
synced 2025-08-16 01:18:28 +00:00
Compare commits
1074 Commits
v0.12.0-be
...
tmp-fix-in
Author | SHA1 | Date | |
---|---|---|---|
![]() |
5539ef1f8f | ||
![]() |
100f7190f3 | ||
![]() |
f9c4d577e2 | ||
![]() |
9826b518bd | ||
![]() |
32a8f06486 | ||
![]() |
2ab2b8656b | ||
![]() |
d9ab98e47f | ||
![]() |
9d584bb0d3 | ||
![]() |
4f725ba9e1 | ||
![]() |
b75a113c91 | ||
![]() |
75af83bb81 | ||
![]() |
0f6f0c3b6b | ||
![]() |
b344524a6d | ||
![]() |
6f4d5a532e | ||
![]() |
2d83c70173 | ||
![]() |
c90e862460 | ||
![]() |
c46a34e6b8 | ||
![]() |
693f59ba2f | ||
![]() |
abae078855 | ||
![]() |
0212db3fad | ||
![]() |
49354f678e | ||
![]() |
dc94570c4a | ||
![]() |
51b1027aec | ||
![]() |
936adb7d2c | ||
![]() |
581d1f3bfa | ||
![]() |
7c87ef6c86 | ||
![]() |
1a9a9b718d | ||
![]() |
6c9f3420e2 | ||
![]() |
a4d0efbe8d | ||
![]() |
56858a56db | ||
![]() |
395caaad42 | ||
![]() |
3f0639c87d | ||
![]() |
889eff265f | ||
![]() |
c6eb7be7fb | ||
![]() |
02c7a46b97 | ||
![]() |
ea7b3baa8b | ||
![]() |
5724f4607c | ||
![]() |
b755d47652 | ||
![]() |
96221cc4f7 | ||
![]() |
34d261179e | ||
![]() |
091b05f155 | ||
![]() |
aca5646032 | ||
![]() |
7e9abbeaec | ||
![]() |
c6aaa37f2d | ||
![]() |
b8c3387892 | ||
![]() |
c50d3aa9bd | ||
![]() |
4ccff8bf28 | ||
![]() |
5b5298b025 | ||
![]() |
8e0939f403 | ||
![]() |
cf3fc85196 | ||
![]() |
e0b15c18ce | ||
![]() |
566b8c3df3 | ||
![]() |
32a6151df9 | ||
![]() |
3777de7133 | ||
![]() |
8cae4f80d7 | ||
![]() |
911c5bddce | ||
![]() |
4a200c308b | ||
![]() |
625e45b1cb | ||
![]() |
8551b0dde0 | ||
![]() |
050782aff3 | ||
![]() |
00885dffe1 | ||
![]() |
ffcc72876c | ||
![]() |
fa91ece5b4 | ||
![]() |
c810b24eb9 | ||
![]() |
03ced0ecfe | ||
![]() |
c859bea0cf | ||
![]() |
a913d1b521 | ||
![]() |
2464c92572 | ||
![]() |
10cd87e5a2 | ||
![]() |
58c336e7f4 | ||
![]() |
bb4a9583a7 | ||
![]() |
7ae38346e5 | ||
![]() |
7604c0f691 | ||
![]() |
f2f4c3f684 | ||
![]() |
34f489b1f4 | ||
![]() |
72d1d2630e | ||
![]() |
d559e23bc6 | ||
![]() |
4637400d29 | ||
![]() |
0fa943e4b7 | ||
![]() |
9707b1f540 | ||
![]() |
657fb208d6 | ||
![]() |
647972c7cf | ||
![]() |
39b58f7d4c | ||
![]() |
c8378e8b7d | ||
![]() |
d404ba102d | ||
![]() |
5e9004c407 | ||
![]() |
8e63b53b0c | ||
![]() |
116bef25a7 | ||
![]() |
294975ba87 | ||
![]() |
51b8c659f1 | ||
![]() |
082fbead66 | ||
![]() |
73c16ffc65 | ||
![]() |
dec51348e6 | ||
![]() |
b0b919efb0 | ||
![]() |
396c3ecdf7 | ||
![]() |
53e5c05b0a | ||
![]() |
dedeb4c181 | ||
![]() |
e611063669 | ||
![]() |
6c9c9a401f | ||
![]() |
6da4396faa | ||
![]() |
d89fb68a7a | ||
![]() |
8d9462147c | ||
![]() |
89b7fa6b06 | ||
![]() |
d4a550bb4c | ||
![]() |
d5e331a2fb | ||
![]() |
367da0fcc2 | ||
![]() |
8111b0aa83 | ||
![]() |
735440d1a3 | ||
![]() |
3ae340527f | ||
![]() |
bfa9ed814d | ||
![]() |
1e4678c02f | ||
![]() |
66fffd69ce | ||
![]() |
e3f99d670e | ||
![]() |
360488abb4 | ||
![]() |
8dda44105e | ||
![]() |
2215e17223 | ||
![]() |
157db307f9 | ||
![]() |
0bd39b2c5e | ||
![]() |
8f31ed51e1 | ||
![]() |
d2d1f92836 | ||
![]() |
c02819ab9f | ||
![]() |
28a3a5bd61 | ||
![]() |
891815634b | ||
![]() |
8650328922 | ||
![]() |
7bd07e3b9b | ||
![]() |
76195bb3ac | ||
![]() |
6afd492095 | ||
![]() |
c95bce4aea | ||
![]() |
fd3a1c13e3 | ||
![]() |
95824ac2ec | ||
![]() |
a050158d11 | ||
![]() |
e0ef601123 | ||
![]() |
9c5d485fdd | ||
![]() |
cb88b16207 | ||
![]() |
257c025975 | ||
![]() |
50bdf9d3b9 | ||
![]() |
8d58894daa | ||
![]() |
43fa7f9fd5 | ||
![]() |
f2a8bfeb9f | ||
![]() |
06bbeea37f | ||
![]() |
e5f26f819a | ||
![]() |
a058f17946 | ||
![]() |
a4b4fc8b6c | ||
![]() |
ab35baaa29 | ||
![]() |
883bb92991 | ||
![]() |
bfb58de7b8 | ||
![]() |
6faf2d63d0 | ||
![]() |
569f3caab9 | ||
![]() |
7cd0f5e8a4 | ||
![]() |
02cc6bcc05 | ||
![]() |
9ff09b73ad | ||
![]() |
f93cf4b980 | ||
![]() |
3d7be5b287 | ||
![]() |
cdf41bd500 | ||
![]() |
735a6aaa39 | ||
![]() |
0c2648c188 | ||
![]() |
7e6291c21c | ||
![]() |
3f7749c6d4 | ||
![]() |
586c5411f1 | ||
![]() |
2be16b581c | ||
![]() |
06e22bf878 | ||
![]() |
0b4b530809 | ||
![]() |
efca3daa5c | ||
![]() |
fdefe46c40 | ||
![]() |
34be10840c | ||
![]() |
80ad1db228 | ||
![]() |
e918ea89a3 | ||
![]() |
19b968849f | ||
![]() |
5bc11891f5 | ||
![]() |
818d26b5f9 | ||
![]() |
c47354bdc3 | ||
![]() |
86ce0e0c66 | ||
![]() |
39f03b86c8 | ||
![]() |
8287ba24b9 | ||
![]() |
ab1aac9f3e | ||
![]() |
3e353004b8 | ||
![]() |
bcb04d38a5 | ||
![]() |
de0e2bf828 | ||
![]() |
8fed47a2be | ||
![]() |
17d4968425 | ||
![]() |
54acee6880 | ||
![]() |
a4e05d4db3 | ||
![]() |
b0acbed329 | ||
![]() |
1b2967320b | ||
![]() |
90f6be0c98 | ||
![]() |
78ed610b50 | ||
![]() |
af891808f6 | ||
![]() |
0c5a402206 | ||
![]() |
8744eeeb19 | ||
![]() |
ce13596077 | ||
![]() |
402a29e50c | ||
![]() |
0363e58467 | ||
![]() |
c8a14ccabb | ||
![]() |
1de29fd4e6 | ||
![]() |
75a0155f73 | ||
![]() |
adb55bcfe9 | ||
![]() |
2201ec8905 | ||
![]() |
39f6fdef1a | ||
![]() |
699aa5cf38 | ||
![]() |
1486adb25a | ||
![]() |
2653c2f5e8 | ||
![]() |
7b7244dac2 | ||
![]() |
571ce2b0b9 | ||
![]() |
c3db5ed749 | ||
![]() |
0797148076 | ||
![]() |
24c9530eee | ||
![]() |
679cf7c0d7 | ||
![]() |
19b6405332 | ||
![]() |
aee8aa1c61 | ||
![]() |
5514a862dc | ||
![]() |
1ea8bb782c | ||
![]() |
35722cd5aa | ||
![]() |
533ecee252 | ||
![]() |
f1db2d0c8e | ||
![]() |
6f6fb4dcd6 | ||
![]() |
b1ba7ba685 | ||
![]() |
6dccfee862 | ||
![]() |
6f32b80b2b | ||
![]() |
2feed18b28 | ||
![]() |
36dca3516a | ||
![]() |
06129277ed | ||
![]() |
6b1482daee | ||
![]() |
24e4787a64 | ||
![]() |
5bfae22c8f | ||
![]() |
3e078f0494 | ||
![]() |
0b4f59b82b | ||
![]() |
a19af04582 | ||
![]() |
0676aa11a9 | ||
![]() |
be25bbce92 | ||
![]() |
5ecfbbaf5d | ||
![]() |
7f7cd737dc | ||
![]() |
b472e5a689 | ||
![]() |
25c674ed32 | ||
![]() |
3d93cf9e2d | ||
![]() |
f7edea5f40 | ||
![]() |
d26e220fb9 | ||
![]() |
d860270733 | ||
![]() |
a09633e859 | ||
![]() |
a1837a4d69 | ||
![]() |
52cc3bc8eb | ||
![]() |
9175aca094 | ||
![]() |
848727a21d | ||
![]() |
df7d5fa2b9 | ||
![]() |
86dfc91dd5 | ||
![]() |
7f66d9184b | ||
![]() |
ff5f31b87e | ||
![]() |
a0c465c2eb | ||
![]() |
d11279e615 | ||
![]() |
266aac9e61 | ||
![]() |
4ffd3eacb0 | ||
![]() |
a443255b3e | ||
![]() |
a992840c9b | ||
![]() |
dbc1d981c9 | ||
![]() |
9993f51b5e | ||
![]() |
3a3fc0a4be | ||
![]() |
5316dd9c27 | ||
![]() |
59a1a85a2b | ||
![]() |
fc502e1e79 | ||
![]() |
405de9e0f8 | ||
![]() |
6eac5046c6 | ||
![]() |
f7f722af52 | ||
![]() |
583f6eeedd | ||
![]() |
bec35b4965 | ||
![]() |
e596d8287c | ||
![]() |
6c903d2d93 | ||
![]() |
914431b94a | ||
![]() |
11da7436c7 | ||
![]() |
0f532aa5c1 | ||
![]() |
835828fe92 | ||
![]() |
fff1011ed8 | ||
![]() |
ef497caa1b | ||
![]() |
4f3f0542d4 | ||
![]() |
5fa987519d | ||
![]() |
77ceeaf5fd | ||
![]() |
4a9d3bedf9 | ||
![]() |
802eb931d1 | ||
![]() |
9ebeb3d7e4 | ||
![]() |
e631c6f7e0 | ||
![]() |
163e5c29e4 | ||
![]() |
4aae917f74 | ||
![]() |
9b393eb861 | ||
![]() |
5fa3016703 | ||
![]() |
03cccd60a6 | ||
![]() |
177c21b294 | ||
![]() |
f4873d9387 | ||
![]() |
747d64cdae | ||
![]() |
c9efd5c132 | ||
![]() |
546ddd2a84 | ||
![]() |
2edb5428f9 | ||
![]() |
9f082125fa | ||
![]() |
11582105ab | ||
![]() |
c4e69fe2c3 | ||
![]() |
4435a4f19d | ||
![]() |
02ae7a0563 | ||
![]() |
852dc0f4de | ||
![]() |
844ad15109 | ||
![]() |
522e892099 | ||
![]() |
0445f404ec | ||
![]() |
bc1909fa22 | ||
![]() |
ca71830963 | ||
![]() |
a28eebfca3 | ||
![]() |
0d31ea08c3 | ||
![]() |
614c003704 | ||
![]() |
b511295349 | ||
![]() |
fcdc292647 | ||
![]() |
09836cd150 | ||
![]() |
49ec9943b9 | ||
![]() |
72c1edaaa4 | ||
![]() |
294ed7a751 | ||
![]() |
31c0062d5e | ||
![]() |
63d920510d | ||
![]() |
16f9691e80 | ||
![]() |
209d003832 | ||
![]() |
62cfd60e38 | ||
![]() |
fdbc9657bc | ||
![]() |
ad4401aa40 | ||
![]() |
c26280c331 | ||
![]() |
b028a7dfc9 | ||
![]() |
41cd0d30eb | ||
![]() |
8be9e9655c | ||
![]() |
31bdba7456 | ||
![]() |
d6e1d10b12 | ||
![]() |
21268f7abe | ||
![]() |
91b95ff707 | ||
![]() |
6ed79b7bb8 | ||
![]() |
b4f5ed6618 | ||
![]() |
ed46491a3d | ||
![]() |
dc8c20e002 | ||
![]() |
68417cc888 | ||
![]() |
a2fb5b2b9d | ||
![]() |
3fbfc5a649 | ||
![]() |
00535a2016 | ||
![]() |
fd452d52ca | ||
![]() |
7cc58af932 | ||
![]() |
ddb87af5ce | ||
![]() |
b9ea83fed8 | ||
![]() |
e279224484 | ||
![]() |
12d8f0f4b0 | ||
![]() |
6ba68d150c | ||
![]() |
1b3a7bbf03 | ||
![]() |
4e686f8b77 | ||
![]() |
62c780a448 | ||
![]() |
bc055edf12 | ||
![]() |
47c72a4e2e | ||
![]() |
02a78e5a45 | ||
![]() |
01d9a2f589 | ||
![]() |
5403f215bc | ||
![]() |
96e2955ba7 | ||
![]() |
03659c4175 | ||
![]() |
843e2bd9b6 | ||
![]() |
28efd92fca | ||
![]() |
7bb87a7300 | ||
![]() |
fec8cda16a | ||
![]() |
2c448d4a5c | ||
![]() |
3d302441b6 | ||
![]() |
8061abe279 | ||
![]() |
ea9aaa6022 | ||
![]() |
cc9eeda889 | ||
![]() |
25f1dcf724 | ||
![]() |
31debf7055 | ||
![]() |
db8db0299e | ||
![]() |
e80954b6c8 | ||
![]() |
8504d0d8ba | ||
![]() |
7ef8cd881c | ||
![]() |
79704dc9b0 | ||
![]() |
06c928bc52 | ||
![]() |
62808cbd86 | ||
![]() |
14994cb6cc | ||
![]() |
6b79679cb4 | ||
![]() |
caf79f6910 | ||
![]() |
6e2768097a | ||
![]() |
8845938881 | ||
![]() |
a23035aee7 | ||
![]() |
e51e6f487f | ||
![]() |
f78deaebb6 | ||
![]() |
4d2949bda9 | ||
![]() |
cb0899b534 | ||
![]() |
ecf5259693 | ||
![]() |
3a90079ab8 | ||
![]() |
970dea5d68 | ||
![]() |
cd9807a1d3 | ||
![]() |
613dc61339 | ||
![]() |
b9fee36f6e | ||
![]() |
17d6624bb9 | ||
![]() |
f53bb63b2d | ||
![]() |
ea7bcfffbb | ||
![]() |
3023323528 | ||
![]() |
2dfd8a9098 | ||
![]() |
c8ed1f0f43 | ||
![]() |
f9e2ce2c8c | ||
![]() |
886e95c00d | ||
![]() |
6dd9e93346 | ||
![]() |
2dacf839dc | ||
![]() |
8f6952acee | ||
![]() |
235a90276f | ||
![]() |
5c285afda5 | ||
![]() |
db930af50e | ||
![]() |
ffa570e877 | ||
![]() |
96ae78f422 | ||
![]() |
580c72bf16 | ||
![]() |
9254afff2d | ||
![]() |
7ce0bd053c | ||
![]() |
41a8c14acb | ||
![]() |
be2487f4c0 | ||
![]() |
4651c44dde | ||
![]() |
4fcc5e253c | ||
![]() |
89a1a56328 | ||
![]() |
db1528bc73 | ||
![]() |
587bdc75de | ||
![]() |
98f54c9f7f | ||
![]() |
cd1d10761f | ||
![]() |
9de9bc23f8 | ||
![]() |
02f68ebac8 | ||
![]() |
dd3f24b83f | ||
![]() |
bc63c577a9 | ||
![]() |
57c81e4153 | ||
![]() |
556ca5fec7 | ||
![]() |
93682ab708 | ||
![]() |
6eeee8e5c7 | ||
![]() |
d195847d8f | ||
![]() |
3d8dc9d2bf | ||
![]() |
8601dd1f42 | ||
![]() |
3abdc870d8 | ||
![]() |
367f8489db | ||
![]() |
c312f8bf4a | ||
![]() |
1f43c39f93 | ||
![]() |
9f03a012fb | ||
![]() |
22dd61d849 | ||
![]() |
a92f6abc6e | ||
![]() |
9cdaa9730b | ||
![]() |
5d67ed0ce1 | ||
![]() |
62d774b6ee | ||
![]() |
a14f50eeca | ||
![]() |
98e98a8adb | ||
![]() |
fa7ef3df2f | ||
![]() |
c3324371d6 | ||
![]() |
6e08241712 | ||
![]() |
c07dd3f14f | ||
![]() |
b2ae9b6cac | ||
![]() |
57536b020e | ||
![]() |
0003e30084 | ||
![]() |
23be13b113 | ||
![]() |
5e44266292 | ||
![]() |
32522cb482 | ||
![]() |
6d296a195d | ||
![]() |
3272febfb3 | ||
![]() |
7dae780be1 | ||
![]() |
73f1c06f65 | ||
![]() |
b60727b205 | ||
![]() |
8cee31d8d7 | ||
![]() |
b5aace6d3a | ||
![]() |
7e286c570e | ||
![]() |
52fd13bfc4 | ||
![]() |
b8e4aeede8 | ||
![]() |
9a632c17d1 | ||
![]() |
8758ee1c4d | ||
![]() |
150ae1846a | ||
![]() |
452286552c | ||
![]() |
631cf58ff0 | ||
![]() |
8a2c0e88f4 | ||
![]() |
af6a47fdd3 | ||
![]() |
94d910557f | ||
![]() |
a8a683d3cc | ||
![]() |
a1caa5b45c | ||
![]() |
f42868f67f | ||
![]() |
a6455653c0 | ||
![]() |
c8aa653275 | ||
![]() |
91e5cbd793 | ||
![]() |
79fc74c7a4 | ||
![]() |
c8503075e0 | ||
![]() |
4068a7b00b | ||
![]() |
daae2fe549 | ||
![]() |
47bbb85a20 | ||
![]() |
739653fa71 | ||
![]() |
304109a6c5 | ||
![]() |
c29af96a19 | ||
![]() |
d21e9d29d1 | ||
![]() |
b65bd5baa8 | ||
![]() |
0165b89941 | ||
![]() |
53b62f3f39 | ||
![]() |
cd2914ab3b | ||
![]() |
e85b97143c | ||
![]() |
1eafe960b8 | ||
![]() |
749c92954c | ||
![]() |
db9ba17920 | ||
![]() |
d5ce7d7523 | ||
![]() |
2e6687209b | ||
![]() |
2e04abf4bb | ||
![]() |
882c0c34c1 | ||
![]() |
61ebb713f2 | ||
![]() |
ac5ad42474 | ||
![]() |
d68d7d5a6f | ||
![]() |
bff9036f14 | ||
![]() |
8b08c2a918 | ||
![]() |
b9f0fabb5c | ||
![]() |
9d4822b8c7 | ||
![]() |
466d03d574 | ||
![]() |
d43fec7f96 | ||
![]() |
62f4c205f5 | ||
![]() |
003c19004d | ||
![]() |
70274d528c | ||
![]() |
6d41279781 | ||
![]() |
b781446e86 | ||
![]() |
1c9b1c0579 | ||
![]() |
ade9552736 | ||
![]() |
68403cb76e | ||
![]() |
537ecb8db0 | ||
![]() |
8f5875efe4 | ||
![]() |
98ac88d5ef | ||
![]() |
d13338a9fb | ||
![]() |
1579ffb66a | ||
![]() |
0bfa5302a7 | ||
![]() |
b8aad5451d | ||
![]() |
60ee04674d | ||
![]() |
9901d6b2e7 | ||
![]() |
663e8384a3 | ||
![]() |
61440c42d3 | ||
![]() |
18ee6274e1 | ||
![]() |
0abfbdc18a | ||
![]() |
082a852c5e | ||
![]() |
af081e9fd3 | ||
![]() |
8b5e8b7dfc | ||
![]() |
1e7d7e510e | ||
![]() |
a806694d23 | ||
![]() |
62d7fae056 | ||
![]() |
06d85688fd | ||
![]() |
dd219d0ff6 | ||
![]() |
6087e1cf6f | ||
![]() |
c47fb1ae54 | ||
![]() |
48cec3cd90 | ||
![]() |
e54c508c10 | ||
![]() |
941e9d9b0f | ||
![]() |
11ccae8e52 | ||
![]() |
b803240dc1 | ||
![]() |
bdbf620ece | ||
![]() |
e5d22b8a70 | ||
![]() |
05c5e2280b | ||
![]() |
b41d89946a | ||
![]() |
cc0c88a63a | ||
![]() |
c06689dec1 | ||
![]() |
b85dd7abbd | ||
![]() |
6aeaff43aa | ||
![]() |
dd26cbd193 | ||
![]() |
9a60eeaf86 | ||
![]() |
b0ae3240fd | ||
![]() |
41efe98953 | ||
![]() |
2b68c90778 | ||
![]() |
f19c048569 | ||
![]() |
6cc8bbc24f | ||
![]() |
c24de595f6 | ||
![]() |
63641a7b17 | ||
![]() |
a6570d33a6 | ||
![]() |
124d8a3424 | ||
![]() |
5de9de14a9 | ||
![]() |
15f8cb5034 | ||
![]() |
03452a8dca | ||
![]() |
15ed71315c | ||
![]() |
05df8e947a | ||
![]() |
b3fa66dbd2 | ||
![]() |
a27b386123 | ||
![]() |
580db9b58f | ||
![]() |
1114449601 | ||
![]() |
b47de07eea | ||
![]() |
e1fcf0da26 | ||
![]() |
dcf3ea567c | ||
![]() |
de2ea83b3b | ||
![]() |
eb06054a7b | ||
![]() |
eb500155e8 | ||
![]() |
dc909ba6d7 | ||
![]() |
70910c4595 | ||
![]() |
54c3e00a1f | ||
![]() |
e78c002f5a | ||
![]() |
237f7f1027 | ||
![]() |
992efbd84a | ||
![]() |
e9eb90fa76 | ||
![]() |
88378c22fb | ||
![]() |
b742379627 | ||
![]() |
df37d1a639 | ||
![]() |
758b1ba1cb | ||
![]() |
435ee36d78 | ||
![]() |
35efd8f95a | ||
![]() |
09d78c7a05 | ||
![]() |
60655c5242 | ||
![]() |
22d2443281 | ||
![]() |
a70669fca7 | ||
![]() |
0720473033 | ||
![]() |
e799307e74 | ||
![]() |
575f33d183 | ||
![]() |
607c1eb316 | ||
![]() |
d69dada8ff | ||
![]() |
f9e0c13890 | ||
![]() |
12a50ac8ac | ||
![]() |
b342cf0240 | ||
![]() |
e3ff87b7ef | ||
![]() |
745696b310 | ||
![]() |
23cde8445f | ||
![]() |
9d43f589ae | ||
![]() |
897d480f4d | ||
![]() |
6f172a6e4c | ||
![]() |
44a5372c53 | ||
![]() |
f2ea6fb30f | ||
![]() |
4a4952899b | ||
![]() |
b72a8aa7d1 | ||
![]() |
e301d0d1df | ||
![]() |
75ca91b0f7 | ||
![]() |
e208ccc982 | ||
![]() |
71a62697aa | ||
![]() |
f9c0597875 | ||
![]() |
aa3eb5171a | ||
![]() |
dcc46af8de | ||
![]() |
b61500670c | ||
![]() |
ccec534e19 | ||
![]() |
9b10457209 | ||
![]() |
9a8f605cba | ||
![]() |
1246267ead | ||
![]() |
a0a56d43f8 | ||
![]() |
63d87110f6 | ||
![]() |
7c99d963e2 | ||
![]() |
a614f158be | ||
![]() |
2b6a5173da | ||
![]() |
32ac690494 | ||
![]() |
0835bffc3c | ||
![]() |
c80e364f02 | ||
![]() |
5b169010be | ||
![]() |
eeded85d9c | ||
![]() |
e4d81bbb16 | ||
![]() |
1f8c7f427b | ||
![]() |
ef422e6988 | ||
![]() |
ec4dc68524 | ||
![]() |
86ade72c19 | ||
![]() |
0c0653df8b | ||
![]() |
12b3b5f8f1 | ||
![]() |
052dbfe440 | ||
![]() |
5310f8692b | ||
![]() |
aff6b84250 | ||
![]() |
21eee912a3 | ||
![]() |
dbb2af0238 | ||
![]() |
77fe0b01f7 | ||
![]() |
361b4f7f4f | ||
![]() |
dec4ee5f73 | ||
![]() |
b2dca80e7a | ||
![]() |
a455a874ad | ||
![]() |
49cd761bf6 | ||
![]() |
6477e6a583 | ||
![]() |
8a95fe517a | ||
![]() |
a9d4fa89dc | ||
![]() |
94c5474212 | ||
![]() |
d34d617935 | ||
![]() |
573008757d | ||
![]() |
4c74043f72 | ||
![]() |
0551b34de5 | ||
![]() |
105812421e | ||
![]() |
4a9fd3a680 | ||
![]() |
1cb39d914c | ||
![]() |
5157f356cb | ||
![]() |
7c63412df5 | ||
![]() |
82cb6b9ddc | ||
![]() |
379017602c | ||
![]() |
8bef04d8df | ||
![]() |
5e92ddad43 | ||
![]() |
e64bee778f | ||
![]() |
5e1b12948e | ||
![]() |
eea8e7ba6f | ||
![]() |
78251ce8ec | ||
![]() |
a8649d83c4 | ||
![]() |
16b21e8158 | ||
![]() |
35616eb861 | ||
![]() |
e7bef56718 | ||
![]() |
c6b87de959 | ||
![]() |
50053e616a | ||
![]() |
54cc3c067f | ||
![]() |
402a76070f | ||
![]() |
9a61725e9f | ||
![]() |
6126d6d9b5 | ||
![]() |
469551bc5d | ||
![]() |
1caa6f5d69 | ||
![]() |
ecc26432fd | ||
![]() |
caffbd8956 | ||
![]() |
fd1e4a1dcd | ||
![]() |
acb945841c | ||
![]() |
c58ce6f60c | ||
![]() |
d6f6939c54 | ||
![]() |
e0b9a317f4 | ||
![]() |
c159eb7541 | ||
![]() |
8a3a0b6403 | ||
![]() |
67d6c8f946 | ||
![]() |
06e6c29a5b | ||
![]() |
a9122c3de3 | ||
![]() |
b1bd17f316 | ||
![]() |
b39faa124a | ||
![]() |
8689a39c96 | ||
![]() |
bae8ed3e70 | ||
![]() |
08c7076667 | ||
![]() |
91b50550ee | ||
![]() |
2c7064462a | ||
![]() |
d9e7f37280 | ||
![]() |
e03b3d558f | ||
![]() |
2fd36dd254 | ||
![]() |
381598663d | ||
![]() |
6d699d3c29 | ||
![]() |
ebe59a5a27 | ||
![]() |
d55c79e75b | ||
![]() |
eda0a9f88a | ||
![]() |
47e8442d91 | ||
![]() |
f9ce32fe1a | ||
![]() |
189e883f91 | ||
![]() |
aa506503e2 | ||
![]() |
9c2c09fce7 | ||
![]() |
5596a0acef | ||
![]() |
9687e6768d | ||
![]() |
fb85c78e8a | ||
![]() |
d27f2bc538 | ||
![]() |
8c33907655 | ||
![]() |
afb67b6e75 | ||
![]() |
69f220fe5c | ||
![]() |
c46dfd761c | ||
![]() |
95453cba75 | ||
![]() |
ed2175706c | ||
![]() |
686e45cf27 | ||
![]() |
ae6a20e4d9 | ||
![]() |
046116656b | ||
![]() |
972bef1194 | ||
![]() |
4f1f235a2e | ||
![]() |
7e4709c13f | ||
![]() |
cef0a2b0b3 | ||
![]() |
fcdbe7c510 | ||
![]() |
995731a29c | ||
![]() |
45727dbb21 | ||
![]() |
f0a73632e0 | ||
![]() |
823cc493f0 | ||
![]() |
a86b33f1ff | ||
![]() |
28c2bbeb27 | ||
![]() |
d4761da27c | ||
![]() |
b0c7ebeb7d | ||
![]() |
5f375d69b5 | ||
![]() |
9eb705a4fb | ||
![]() |
1b87396a8c | ||
![]() |
bb14bcd4d2 | ||
![]() |
48c866b058 | ||
![]() |
fe0b43eaaf | ||
![]() |
afd4a3706e | ||
![]() |
717250adb3 | ||
![]() |
67f5c32b49 | ||
![]() |
0191ea93ff | ||
![]() |
92ffac625e | ||
![]() |
bfbcea35a0 | ||
![]() |
638a84adb9 | ||
![]() |
ec58979ce0 | ||
![]() |
7e6e093f17 | ||
![]() |
4962335860 | ||
![]() |
a37339fa54 | ||
![]() |
f7eeb979fb | ||
![]() |
f2f8d834e8 | ||
![]() |
fe2f75d13d | ||
![]() |
52db6188df | ||
![]() |
8dca40535f | ||
![]() |
f4c302f1fb | ||
![]() |
4ca8181dcb | ||
![]() |
24a8e198a1 | ||
![]() |
9411ec47c3 | ||
![]() |
1e8f4dbdff | ||
![]() |
9399754489 | ||
![]() |
9d1752acbc | ||
![]() |
6da2a19d10 | ||
![]() |
9ceac5c0fc | ||
![]() |
f562ad579a | ||
![]() |
bbadeb567a | ||
![]() |
69cdfbb56f | ||
![]() |
d971f0f0e6 | ||
![]() |
650108c7c7 | ||
![]() |
baae266db0 | ||
![]() |
50af44bc2f | ||
![]() |
e3bcc88880 | ||
![]() |
14e49885fb | ||
![]() |
fbc1843889 | ||
![]() |
45d5ab30ff | ||
![]() |
d5fd7a5c00 | ||
![]() |
b5a59d4e7a | ||
![]() |
211fe4034a | ||
![]() |
daa75da277 | ||
![]() |
25550f8866 | ||
![]() |
4bbe0051f6 | ||
![]() |
5ab62378ae | ||
![]() |
f006860136 | ||
![]() |
9c6ce02554 | ||
![]() |
960412a335 | ||
![]() |
ecb3ee6bfa | ||
![]() |
5242025ab3 | ||
![]() |
b3d0fb7a93 | ||
![]() |
5e167cc00a | ||
![]() |
d00251c63e | ||
![]() |
4f9ece14c5 | ||
![]() |
7bf2a91dd0 | ||
![]() |
385dd9cc34 | ||
![]() |
602291df61 | ||
![]() |
5245f1accc | ||
![]() |
91babb5130 | ||
![]() |
8798efd353 | ||
![]() |
66a12004e7 | ||
![]() |
74621e2750 | ||
![]() |
74c3c6bb60 | ||
![]() |
84b98e716a | ||
![]() |
e9f13b6031 | ||
![]() |
fe6d47030f | ||
![]() |
a19550adbf | ||
![]() |
3db88d27de | ||
![]() |
a6b7bc5939 | ||
![]() |
397b6fc4bf | ||
![]() |
7d5e6d3f0f | ||
![]() |
7a90c2fba1 | ||
![]() |
5cf215a44b | ||
![]() |
7916fa8b45 | ||
![]() |
5fbef07627 | ||
![]() |
21df798f07 | ||
![]() |
67bb1fc9dd | ||
![]() |
61bfa79be2 | ||
![]() |
f073d8f43c | ||
![]() |
5f642eef76 | ||
![]() |
d8c4c3163b | ||
![]() |
9cedbbafd4 | ||
![]() |
aceaba60f1 | ||
![]() |
7b5ba9f781 | ||
![]() |
de59946447 | ||
![]() |
97eac3b938 | ||
![]() |
fb45138fc1 | ||
![]() |
e9949b4c70 | ||
![]() |
e482dfeed4 | ||
![]() |
9b7d657cbe | ||
![]() |
55d746d3f5 | ||
![]() |
73497382b7 | ||
![]() |
c364c2a382 | ||
![]() |
e540679dbd | ||
![]() |
86b329d8bf | ||
![]() |
b2b2954545 | ||
![]() |
a3360b082f | ||
![]() |
b721502147 | ||
![]() |
1869bff4ba | ||
![]() |
0b9dd19ec7 | ||
![]() |
b2889bc355 | ||
![]() |
28c824acaf | ||
![]() |
57f1da6dca | ||
![]() |
c9640b2f3e | ||
![]() |
546b1e8a05 | ||
![]() |
3b54a68f5c | ||
![]() |
1b1aac18d2 | ||
![]() |
f30ee3d2df | ||
![]() |
9f80349471 | ||
![]() |
14b23544e4 | ||
![]() |
4e54796384 | ||
![]() |
c3b68adfed | ||
![]() |
0018a78d5a | ||
![]() |
50f0270543 | ||
![]() |
bb80b679bc | ||
![]() |
6fa0903a8e | ||
![]() |
2bc8051ae5 | ||
![]() |
4841e16386 | ||
![]() |
d79ccfc05a | ||
![]() |
ead8b68a03 | ||
![]() |
3bb4c28c9a | ||
![]() |
2fbcc38f8f | ||
![]() |
315ff9daf0 | ||
![]() |
4078e75b50 | ||
![]() |
58bfea4e64 | ||
![]() |
e18078d7f8 | ||
![]() |
c73b57e7dc | ||
![]() |
531298fa59 | ||
![]() |
30a2ccd975 | ||
![]() |
59e48993f2 | ||
![]() |
bfc6f6e0eb | ||
![]() |
811d3d510c | ||
![]() |
2aba37d2ef | ||
![]() |
8853ccd5b4 | ||
![]() |
c794f32f58 | ||
![]() |
dd8bae8c61 | ||
![]() |
1b47ddd583 | ||
![]() |
20991d6883 | ||
![]() |
96f09e3f30 | ||
![]() |
8f40696f35 | ||
![]() |
c1845477ef | ||
![]() |
1d40de3095 | ||
![]() |
2357fb6f80 | ||
![]() |
ba8afdb7be | ||
![]() |
d9aaa0bdfc | ||
![]() |
66ff34c2dd | ||
![]() |
150652e939 | ||
![]() |
7bdd7748e4 | ||
![]() |
0426212348 | ||
![]() |
85cf443ac6 | ||
![]() |
1b2fff4337 | ||
![]() |
8c79165b0d | ||
![]() |
7b607b3fe8 | ||
![]() |
41fbe47cdf | ||
![]() |
af25aa75d9 | ||
![]() |
da5250ea32 | ||
![]() |
168b1bd579 | ||
![]() |
9de5c7f8b8 | ||
![]() |
52db80ab0d | ||
![]() |
0c3fd16113 | ||
![]() |
e05c5e0b93 | ||
![]() |
310e7b15c7 | ||
![]() |
9e3318ca27 | ||
![]() |
e9adfcd678 | ||
![]() |
d44b2a7c01 | ||
![]() |
5b5ecd52e1 | ||
![]() |
eddd62eee0 | ||
![]() |
38c27f6bf8 | ||
![]() |
90fb9aa4ed | ||
![]() |
3af1253a65 | ||
![]() |
eb1ce64b7c | ||
![]() |
2c9ed63021 | ||
![]() |
4c779d306b | ||
![]() |
0862f60ff0 | ||
![]() |
991175f2aa | ||
![]() |
1815040d98 | ||
![]() |
71ab4c9b2c | ||
![]() |
e0c22a414b | ||
![]() |
4e63bba4fe | ||
![]() |
445c04baf7 | ||
![]() |
ad4e3a89e0 | ||
![]() |
6f6018bad5 | ||
![]() |
ccd41b9a13 | ||
![]() |
d8ce440309 | ||
![]() |
0609c97459 | ||
![]() |
2f576b2fb1 | ||
![]() |
853a5288f1 | ||
![]() |
cd0df1e46f | ||
![]() |
b195c87418 | ||
![]() |
c98a559b4d | ||
![]() |
5935b13b67 | ||
![]() |
9e619fc020 | ||
![]() |
45bcf39894 | ||
![]() |
0a1db89d33 | ||
![]() |
dbfb9e16e0 | ||
![]() |
8aa2606853 | ||
![]() |
a238a8b33a | ||
![]() |
74f26d3685 | ||
![]() |
e66f8b0eeb | ||
![]() |
e7b69dbf91 | ||
![]() |
13f23d2e7e | ||
![]() |
7a86321252 | ||
![]() |
7aace7eb6b | ||
![]() |
7a6be36f46 | ||
![]() |
bb27c80bad | ||
![]() |
c0c3b7d511 | ||
![]() |
6220836050 | ||
![]() |
b122d06f12 | ||
![]() |
6f9ed958ca | ||
![]() |
39ce59fcb1 | ||
![]() |
052fccdc98 | ||
![]() |
17411b65f3 | ||
![]() |
bf7ee78324 | ||
![]() |
fbe5054a67 | ||
![]() |
761147ea3b | ||
![]() |
25ccf5ef18 | ||
![]() |
b4f8961e44 | ||
![]() |
726ccc8c1f | ||
![]() |
126e694f26 | ||
![]() |
ab45cd37f8 | ||
![]() |
f59071ff1c | ||
![]() |
537cd35cb2 | ||
![]() |
56b6528e3b | ||
![]() |
bae7ba46de | ||
![]() |
fa197cc183 | ||
![]() |
00c69ce50c | ||
![]() |
a6e22387fd | ||
![]() |
a730f007d8 | ||
![]() |
3393363a67 | ||
![]() |
8218ef96ef | ||
![]() |
e8e573de62 | ||
![]() |
05db1b7109 | ||
![]() |
6e14fdf0d3 | ||
![]() |
1fd57a3375 | ||
![]() |
b4259fcd79 | ||
![]() |
f9137f3bb0 | ||
![]() |
b1a9b1ada1 | ||
![]() |
b8e9024845 | ||
![]() |
70d82ea184 | ||
![]() |
9dc20580c7 | ||
![]() |
4d60aeae18 | ||
![]() |
67d1dd984f | ||
![]() |
b02f8dd45d | ||
![]() |
3837f1714a | ||
![]() |
ed5498ef86 | ||
![]() |
e2f8c69e2e | ||
![]() |
beb3e9abc2 | ||
![]() |
78039f4cea | ||
![]() |
ed39b91f71 | ||
![]() |
8f632e9062 | ||
![]() |
a32175f791 | ||
![]() |
d35fb8bba0 | ||
![]() |
115d0cbe85 | ||
![]() |
1a6e5d8770 | ||
![]() |
3a3aecb774 | ||
![]() |
8b40343277 | ||
![]() |
7ec8346179 | ||
![]() |
46cdce00af | ||
![]() |
86f3f26a18 | ||
![]() |
febbb6006f | ||
![]() |
1d68509463 | ||
![]() |
b6d0c4f2aa | ||
![]() |
2c057c2d89 | ||
![]() |
cf7effda1b | ||
![]() |
19effe7034 | ||
![]() |
41053482b3 | ||
![]() |
e463283a58 | ||
![]() |
99814b468b | ||
![]() |
163ecb2a6b | ||
![]() |
4660b265d9 | ||
![]() |
1b6bad0b63 | ||
![]() |
26623d794b | ||
![]() |
e9cc60e49c | ||
![]() |
be2a28dd61 | ||
![]() |
cec236ce24 | ||
![]() |
45d331da99 | ||
![]() |
897fa558b0 | ||
![]() |
d971cf1295 | ||
![]() |
42bed58329 | ||
![]() |
d9f52efe70 | ||
![]() |
25b5eb8d7f | ||
![]() |
81c60939c9 | ||
![]() |
4edc96d14d | ||
![]() |
6b7c74133d | ||
![]() |
8da029bd14 | ||
![]() |
1d01103b67 | ||
![]() |
5df100539c | ||
![]() |
11c86acbe3 | ||
![]() |
86f36f9a43 | ||
![]() |
271cb71754 | ||
![]() |
80d196cbfd | ||
![]() |
3ce3ccb559 | ||
![]() |
a11c6fd8b9 | ||
![]() |
a75c5a4cff | ||
![]() |
8d504c35bf | ||
![]() |
8a07a63b1c | ||
![]() |
74fd5de43d | ||
![]() |
f9e6722635 | ||
![]() |
0bd4250a53 | ||
![]() |
4b44aa2180 | ||
![]() |
f78984f2ef | ||
![]() |
3de311b7f4 | ||
![]() |
5192841016 | ||
![]() |
07384fd2bb | ||
![]() |
a795e7c0c9 | ||
![]() |
ebfbd4a37d | ||
![]() |
fb933b7d41 | ||
![]() |
1c7cb98042 | ||
![]() |
fb634cdfc2 | ||
![]() |
f60f62792a | ||
![]() |
418fde2731 | ||
![]() |
53108207be | ||
![]() |
3fb3db6f20 | ||
![]() |
5a504fa711 | ||
![]() |
b4cce22415 | ||
![]() |
54c2306637 | ||
![]() |
bc8f5f484d | ||
![]() |
686384ebb7 | ||
![]() |
3a85c4d367 | ||
![]() |
5a7f669505 | ||
![]() |
0c13d9da15 | ||
![]() |
58ec26ee89 | ||
![]() |
969bcf17c4 | ||
![]() |
04d81a0e5c | ||
![]() |
a6e99525ac | ||
![]() |
7e95b3501d | ||
![]() |
3435d95c80 | ||
![]() |
acaab7a3de | ||
![]() |
74ba452025 | ||
![]() |
500be2de58 | ||
![]() |
78eba97bf9 | ||
![]() |
6350d528a7 | ||
![]() |
42eb6b9e01 | ||
![]() |
2e2fb68715 | ||
![]() |
6fc6355d66 | ||
![]() |
48fc93bbdc |
@@ -3,6 +3,7 @@
|
||||
// development
|
||||
integration_test.go
|
||||
integration_test/
|
||||
!integration_test/etc_embedded_derp/tls/server.crt
|
||||
|
||||
Dockerfile*
|
||||
docker-compose*
|
||||
|
10
.github/CODEOWNERS
vendored
Normal file
10
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
* @juanfont @kradalby
|
||||
|
||||
*.md @ohdearaugustin
|
||||
*.yml @ohdearaugustin
|
||||
*.yaml @ohdearaugustin
|
||||
Dockerfile* @ohdearaugustin
|
||||
.goreleaser.yaml @ohdearaugustin
|
||||
/docs/ @ohdearaugustin
|
||||
/.github/workflows/ @ohdearaugustin
|
||||
/.github/renovate.json @ohdearaugustin
|
2
.github/FUNDING.yml
vendored
Normal file
2
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
ko_fi: kradalby
|
||||
github: [kradalby]
|
30
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
30
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
---
|
||||
name: "Bug report"
|
||||
about: "Create a bug report to help us improve"
|
||||
title: ""
|
||||
labels: ["bug"]
|
||||
assignees: ""
|
||||
---
|
||||
|
||||
<!-- Headscale is a multinational community across the globe. Our common language is English. Please consider raising the bug report in this language. -->
|
||||
|
||||
**Bug description**
|
||||
|
||||
<!-- A clear and concise description of what the bug is. Describe the expected bahavior
|
||||
and how it is currently different. If you are unsure if it is a bug, consider discussing
|
||||
it on our Discord server first. -->
|
||||
|
||||
**To Reproduce**
|
||||
|
||||
<!-- Steps to reproduce the behavior. -->
|
||||
|
||||
**Context info**
|
||||
|
||||
<!-- Please add relevant information about your system. For example:
|
||||
- Version of headscale used
|
||||
- Version of tailscale client
|
||||
- OS (e.g. Linux, Mac, Cygwin, WSL, etc.) and version
|
||||
- Kernel version
|
||||
- The relevant config parameters you used
|
||||
- Log output
|
||||
-->
|
11
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
11
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# Issues must have some content
|
||||
blank_issues_enabled: false
|
||||
|
||||
# Contact links
|
||||
contact_links:
|
||||
- name: "headscale usage documentation"
|
||||
url: "https://github.com/juanfont/headscale/blob/main/docs"
|
||||
about: "Find documentation about how to configure and run headscale."
|
||||
- name: "headscale Discord community"
|
||||
url: "https://discord.gg/xGj2TuqyxY"
|
||||
about: "Please ask and answer questions about usage of headscale here."
|
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
name: "Feature request"
|
||||
about: "Suggest an idea for headscale"
|
||||
title: ""
|
||||
labels: ["enhancement"]
|
||||
assignees: ""
|
||||
---
|
||||
|
||||
<!-- Headscale is a multinational community across the globe. Our common language is English. Please consider raising the feature request in this language. -->
|
||||
|
||||
**Feature request**
|
||||
|
||||
<!-- A clear and precise description of what new or changed feature you want. -->
|
||||
|
||||
<!-- Please include the reason, why you would need the feature. E.g. what problem
|
||||
does it solve? Or which workflow is currently frustrating and will be improved by
|
||||
this? -->
|
30
.github/ISSUE_TEMPLATE/other_issue.md
vendored
Normal file
30
.github/ISSUE_TEMPLATE/other_issue.md
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
---
|
||||
name: "Other issue"
|
||||
about: "Report a different issue"
|
||||
title: ""
|
||||
labels: ["bug"]
|
||||
assignees: ""
|
||||
---
|
||||
|
||||
<!-- Headscale is a multinational community across the globe. Our common language is English. Please consider raising the issue in this language. -->
|
||||
|
||||
<!-- If you have a question, please consider using our Discord for asking questions -->
|
||||
|
||||
**Issue description**
|
||||
|
||||
<!-- Please add your issue description. -->
|
||||
|
||||
**To Reproduce**
|
||||
|
||||
<!-- Steps to reproduce the behavior. -->
|
||||
|
||||
**Context info**
|
||||
|
||||
<!-- Please add relevant information about your system. For example:
|
||||
- Version of headscale used
|
||||
- Version of tailscale client
|
||||
- OS (e.g. Linux, Mac, Cygwin, WSL, etc.) and version
|
||||
- Kernel version
|
||||
- The relevant config parameters you used
|
||||
- Log output
|
||||
-->
|
10
.github/pull_request_template.md
vendored
Normal file
10
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
<!-- Please tick if the following things apply. You… -->
|
||||
|
||||
- [ ] read the [CONTRIBUTING guidelines](README.md#contributing)
|
||||
- [ ] raised a GitHub issue or discussed it on the projects chat beforehand
|
||||
- [ ] added unit tests
|
||||
- [ ] added integration tests
|
||||
- [ ] updated documentation if needed
|
||||
- [ ] updated CHANGELOG.md
|
||||
|
||||
<!-- If applicable, please reference the issue using `Fixes #XXX` and add tests to cover your new code. -->
|
38
.github/renovate.json
vendored
Normal file
38
.github/renovate.json
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
{
|
||||
"baseBranches": ["main"],
|
||||
"username": "renovate-release",
|
||||
"gitAuthor": "Renovate Bot <bot@renovateapp.com>",
|
||||
"branchPrefix": "renovateaction/",
|
||||
"onboarding": false,
|
||||
"extends": ["config:base", ":rebaseStalePrs"],
|
||||
"ignorePresets": [":prHourlyLimit2"],
|
||||
"enabledManagers": ["dockerfile", "gomod", "github-actions","regex" ],
|
||||
"includeForks": true,
|
||||
"repositories": ["juanfont/headscale"],
|
||||
"platform": "github",
|
||||
"packageRules": [
|
||||
{
|
||||
"matchDatasources": ["go"],
|
||||
"groupName": "Go modules",
|
||||
"groupSlug": "gomod",
|
||||
"separateMajorMinor": false
|
||||
},
|
||||
{
|
||||
"matchDatasources": ["docker"],
|
||||
"groupName": "Dockerfiles",
|
||||
"groupSlug": "dockerfiles"
|
||||
}
|
||||
],
|
||||
"regexManagers": [
|
||||
{
|
||||
"fileMatch": [
|
||||
".github/workflows/.*.yml$"
|
||||
],
|
||||
"matchStrings": [
|
||||
"\\s*go-version:\\s*\"?(?<currentValue>.*?)\"?\\n"
|
||||
],
|
||||
"datasourceTemplate": "golang-version",
|
||||
"depNameTemplate": "actions/go-version"
|
||||
}
|
||||
]
|
||||
}
|
29
.github/workflows/build.yml
vendored
29
.github/workflows/build.yml
vendored
@@ -14,22 +14,29 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17.3"
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
go version
|
||||
sudo apt update
|
||||
sudo apt install -y make
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v16
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run build
|
||||
run: make build
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: nix build
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: headscale-linux
|
||||
path: headscale
|
||||
path: result/bin/headscale
|
||||
|
15
.github/workflows/contributors.yml
vendored
15
.github/workflows/contributors.yml
vendored
@@ -4,13 +4,24 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
add-contributors:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: BobAnkh/add-contributors@master
|
||||
- name: Delete upstream contributor branch
|
||||
# Allow continue on failure to account for when the
|
||||
# upstream branch is deleted or does not exist.
|
||||
continue-on-error: true
|
||||
run: git push origin --delete update-contributors
|
||||
- name: Create up-to-date contributors branch
|
||||
run: git checkout -B update-contributors
|
||||
- name: Push empty contributors branch
|
||||
run: git push origin update-contributors
|
||||
- name: Switch back to main
|
||||
run: git checkout main
|
||||
- uses: BobAnkh/add-contributors@v0.2.2
|
||||
with:
|
||||
CONTRIBUTOR: "## Contributors"
|
||||
COLUMN_PER_ROW: "6"
|
||||
|
41
.github/workflows/lint.yml
vendored
41
.github/workflows/lint.yml
vendored
@@ -8,18 +8,57 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- name: golangci-lint
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
version: latest
|
||||
version: v1.46.1
|
||||
|
||||
# Only block PRs on new problems.
|
||||
# If this is not enabled, we will end up having PRs
|
||||
# blocked because new linters has appared and other
|
||||
# parts of the code is affected.
|
||||
only-new-issues: true
|
||||
|
||||
prettier-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
**/*.md
|
||||
**/*.yml
|
||||
**/*.yaml
|
||||
**/*.ts
|
||||
**/*.js
|
||||
**/*.sass
|
||||
**/*.css
|
||||
**/*.scss
|
||||
**/*.html
|
||||
|
||||
- name: Prettify code
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: creyD/prettier_action@v4.0
|
||||
with:
|
||||
prettier_options: >-
|
||||
|
79
.github/workflows/release.yml
vendored
79
.github/workflows/release.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.17
|
||||
go-version: 1.18.0
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
@@ -40,6 +40,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Set up QEMU for multiple platforms
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
@@ -63,6 +65,7 @@ jobs:
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value=latest
|
||||
type=sha
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
@@ -86,6 +89,8 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new
|
||||
build-args: |
|
||||
VERSION=${{ steps.meta.outputs.version }}
|
||||
- name: Prepare cache for next build
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
@@ -98,6 +103,8 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Set up QEMU for multiple platforms
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
@@ -148,7 +155,75 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache-debug
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-debug-new
|
||||
build-args: |
|
||||
VERSION=${{ steps.meta-debug.outputs.version }}
|
||||
- name: Prepare cache for next build
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
rm -rf /tmp/.buildx-cache-debug
|
||||
mv /tmp/.buildx-cache-debug-new /tmp/.buildx-cache-debug
|
||||
|
||||
docker-alpine-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Set up QEMU for multiple platforms
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: arm64,amd64
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache-alpine
|
||||
key: ${{ runner.os }}-buildx-alpine-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-alpine-
|
||||
- name: Docker meta
|
||||
id: meta-alpine
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/headscale
|
||||
ghcr.io/${{ github.repository_owner }}/headscale
|
||||
flavor: |
|
||||
latest=false
|
||||
tags: |
|
||||
type=semver,pattern={{version}}-alpine
|
||||
type=semver,pattern={{major}}.{{minor}}-alpine
|
||||
type=semver,pattern={{major}}-alpine
|
||||
type=raw,value=latest-alpine
|
||||
type=sha,suffix=-alpine
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
file: Dockerfile.alpine
|
||||
tags: ${{ steps.meta-alpine.outputs.tags }}
|
||||
labels: ${{ steps.meta-alpine.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache-alpine
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-alpine-new
|
||||
build-args: |
|
||||
VERSION=${{ steps.meta-alpine.outputs.version }}
|
||||
- name: Prepare cache for next build
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache-alpine
|
||||
mv /tmp/.buildx-cache-alpine-new /tmp/.buildx-cache-alpine
|
||||
|
27
.github/workflows/renovatebot.yml
vendored
Normal file
27
.github/workflows/renovatebot.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: Renovate
|
||||
on:
|
||||
schedule:
|
||||
- cron: "* * 5,20 * *" # Every 5th and 20th of the month
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
renovate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Get token
|
||||
id: get_token
|
||||
uses: machine-learning-apps/actions-app-token@master
|
||||
with:
|
||||
APP_PEM: ${{ secrets.RENOVATEBOT_SECRET }}
|
||||
APP_ID: ${{ secrets.RENOVATEBOT_APP_ID }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2.0.0
|
||||
|
||||
- name: Self-hosted Renovate
|
||||
uses: renovatebot/github-action@v31.81.3
|
||||
with:
|
||||
configurationFile: .github/renovate.json
|
||||
token: "x-access-token:${{ steps.get_token.outputs.app_token }}"
|
||||
# env:
|
||||
# LOG_LEVEL: "debug"
|
32
.github/workflows/test-integration.yml
vendored
32
.github/workflows/test-integration.yml
vendored
@@ -3,21 +3,33 @@ name: CI
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
# The "build" workflow
|
||||
integration-test:
|
||||
# The type of runner that the job will run on
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
# Setup Go
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17.3"
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v16
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run Integration tests
|
||||
run: go test -tags integration -timeout 30m
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: nick-fields/retry@v2
|
||||
with:
|
||||
timeout_minutes: 240
|
||||
max_attempts: 5
|
||||
retry_on: error
|
||||
command: nix develop --command -- make test_integration
|
||||
|
35
.github/workflows/test.yml
vendored
35
.github/workflows/test.yml
vendored
@@ -3,31 +3,28 @@ name: CI
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
# The "build" workflow
|
||||
test:
|
||||
# The type of runner that the job will run on
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
# Setup Go
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17.3" # The Go version to download (if necessary) and use.
|
||||
fetch-depth: 2
|
||||
|
||||
# Install all the dependencies
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
go version
|
||||
sudo apt update
|
||||
sudo apt install -y make
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v16
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
||||
|
||||
- name: Run build
|
||||
run: make
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: nix develop --check
|
||||
|
8
.gitignore
vendored
8
.gitignore
vendored
@@ -17,6 +17,8 @@
|
||||
/headscale
|
||||
config.json
|
||||
config.yaml
|
||||
derp.yaml
|
||||
*.hujson
|
||||
*.key
|
||||
/db.sqlite
|
||||
*.sqlite3
|
||||
@@ -25,3 +27,9 @@ config.yaml
|
||||
.idea
|
||||
|
||||
test_output/
|
||||
|
||||
# Nix build output
|
||||
result
|
||||
.direnv/
|
||||
|
||||
integration_test/etc/config.dump.yaml
|
||||
|
@@ -24,11 +24,14 @@ linters:
|
||||
- tagliatelle
|
||||
- godox
|
||||
- ireturn
|
||||
- execinquery
|
||||
- exhaustruct
|
||||
|
||||
# We should strive to enable these:
|
||||
- wrapcheck
|
||||
- dupl
|
||||
- makezero
|
||||
- maintidx
|
||||
|
||||
# We might want to enable this, but it might be a lot of work
|
||||
- cyclop
|
||||
@@ -48,6 +51,7 @@ linters-settings:
|
||||
- ip
|
||||
- ok
|
||||
- c
|
||||
- tt
|
||||
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
|
@@ -1,8 +1,7 @@
|
||||
# This is an example .goreleaser.yml file with some sane defaults.
|
||||
# Make sure to check the documentation at http://goreleaser.com
|
||||
---
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
- go mod tidy -compat=1.18
|
||||
|
||||
release:
|
||||
prerelease: auto
|
||||
@@ -11,63 +10,52 @@ builds:
|
||||
- id: darwin-amd64
|
||||
main: ./cmd/headscale/headscale.go
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- darwin
|
||||
goarch:
|
||||
- amd64
|
||||
env:
|
||||
- PKG_CONFIG_SYSROOT_DIR=/sysroot/macos/amd64
|
||||
- PKG_CONFIG_PATH=/sysroot/macos/amd64/usr/local/lib/pkgconfig
|
||||
- CC=o64-clang
|
||||
- CXX=o64-clang++
|
||||
flags:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
|
||||
- id: linux-armhf
|
||||
- id: darwin-arm64
|
||||
main: ./cmd/headscale/headscale.go
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- arm
|
||||
goarm:
|
||||
- 7
|
||||
env:
|
||||
- CC=arm-linux-gnueabihf-gcc
|
||||
- CXX=arm-linux-gnueabihf-g++
|
||||
- CGO_FLAGS=--sysroot=/sysroot/linux/armhf
|
||||
- CGO_LDFLAGS=--sysroot=/sysroot/linux/armhf
|
||||
- PKG_CONFIG_SYSROOT_DIR=/sysroot/linux/armhf
|
||||
- PKG_CONFIG_PATH=/sysroot/linux/armhf/opt/vc/lib/pkgconfig:/sysroot/linux/armhf/usr/lib/arm-linux-gnueabihf/pkgconfig:/sysroot/linux/armhf/usr/lib/pkgconfig:/sysroot/linux/armhf/usr/local/lib/pkgconfig
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- darwin
|
||||
goarch:
|
||||
- arm64
|
||||
flags:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
|
||||
- id: linux-amd64
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
main: ./cmd/headscale/headscale.go
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
ldflags:
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
|
||||
- id: linux-arm64
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- arm64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- CC=aarch64-linux-gnu-gcc
|
||||
main: ./cmd/headscale/headscale.go
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
ldflags:
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
|
||||
@@ -75,7 +63,7 @@ archives:
|
||||
- id: golang-cross
|
||||
builds:
|
||||
- darwin-amd64
|
||||
- linux-armhf
|
||||
- darwin-arm64
|
||||
- linux-amd64
|
||||
- linux-arm64
|
||||
name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||
|
175
CHANGELOG.md
Normal file
175
CHANGELOG.md
Normal file
@@ -0,0 +1,175 @@
|
||||
# CHANGELOG
|
||||
|
||||
## 0.17.0 (2022-xx-xx)
|
||||
|
||||
## 0.16.0 (2022-07-25)
|
||||
|
||||
**Note:** Take a backup of your database before upgrading.
|
||||
|
||||
### BREAKING
|
||||
|
||||
- Old ACL syntax is no longer supported ("users" & "ports" -> "src" & "dst"). Please check [the new syntax](https://tailscale.com/kb/1018/acls/).
|
||||
|
||||
### Changes
|
||||
|
||||
- **Drop** armhf (32-bit ARM) support. [#609](https://github.com/juanfont/headscale/pull/609)
|
||||
- Headscale fails to serve if the ACL policy file cannot be parsed [#537](https://github.com/juanfont/headscale/pull/537)
|
||||
- Fix labels cardinality error when registering unknown pre-auth key [#519](https://github.com/juanfont/headscale/pull/519)
|
||||
- Fix send on closed channel crash in polling [#542](https://github.com/juanfont/headscale/pull/542)
|
||||
- Fixed spurious calls to setLastStateChangeToNow from ephemeral nodes [#566](https://github.com/juanfont/headscale/pull/566)
|
||||
- Add command for moving nodes between namespaces [#362](https://github.com/juanfont/headscale/issues/362)
|
||||
- Added more configuration parameters for OpenID Connect (scopes, free-form paramters, domain and user allowlist)
|
||||
- Add command to set tags on a node [#525](https://github.com/juanfont/headscale/issues/525)
|
||||
- Add command to view tags of nodes [#356](https://github.com/juanfont/headscale/issues/356)
|
||||
- Add --all (-a) flag to enable routes command [#360](https://github.com/juanfont/headscale/issues/360)
|
||||
- Fix issue where nodes was not updated across namespaces [#560](https://github.com/juanfont/headscale/pull/560)
|
||||
- Add the ability to rename a nodes name [#560](https://github.com/juanfont/headscale/pull/560)
|
||||
- Node DNS names are now unique, a random suffix will be added when a node joins
|
||||
- This change contains database changes, remember to **backup** your database before upgrading
|
||||
- Add option to enable/disable logtail (Tailscale's logging infrastructure) [#596](https://github.com/juanfont/headscale/pull/596)
|
||||
- This change disables the logs by default
|
||||
- Use [Prometheus]'s duration parser, supporting days (`d`), weeks (`w`) and years (`y`) [#598](https://github.com/juanfont/headscale/pull/598)
|
||||
- Add support for reloading ACLs with SIGHUP [#601](https://github.com/juanfont/headscale/pull/601)
|
||||
- Use new ACL syntax [#618](https://github.com/juanfont/headscale/pull/618)
|
||||
- Add -c option to specify config file from command line [#285](https://github.com/juanfont/headscale/issues/285) [#612](https://github.com/juanfont/headscale/pull/601)
|
||||
- Add configuration option to allow Tailscale clients to use a random WireGuard port. [kb/1181/firewalls](https://tailscale.com/kb/1181/firewalls) [#624](https://github.com/juanfont/headscale/pull/624)
|
||||
- Improve obtuse UX regarding missing configuration (`ephemeral_node_inactivity_timeout` not set) [#639](https://github.com/juanfont/headscale/pull/639)
|
||||
- Fix nodes being shown as 'offline' in `tailscale status` [#648](https://github.com/juanfont/headscale/pull/648)
|
||||
- Improve shutdown behaviour [#651](https://github.com/juanfont/headscale/pull/651)
|
||||
- Drop Gin as web framework in Headscale [648](https://github.com/juanfont/headscale/pull/648) [677](https://github.com/juanfont/headscale/pull/677)
|
||||
- Make tailnet node updates check interval configurable [#675](https://github.com/juanfont/headscale/pull/675)
|
||||
- Fix regression with HTTP API [#684](https://github.com/juanfont/headscale/pull/684)
|
||||
- nodes ls now print both Hostname and Name(Issue [#647](https://github.com/juanfont/headscale/issues/647) PR [#687](https://github.com/juanfont/headscale/pull/687))
|
||||
|
||||
## 0.15.0 (2022-03-20)
|
||||
|
||||
**Note:** Take a backup of your database before upgrading.
|
||||
|
||||
### BREAKING
|
||||
|
||||
- Boundaries between Namespaces has been removed and all nodes can communicate by default [#357](https://github.com/juanfont/headscale/pull/357)
|
||||
- To limit access between nodes, use [ACLs](./docs/acls.md).
|
||||
- `/metrics` is now a configurable host:port endpoint: [#344](https://github.com/juanfont/headscale/pull/344). You must update your `config.yaml` file to include:
|
||||
```yaml
|
||||
metrics_listen_addr: 127.0.0.1:9090
|
||||
```
|
||||
|
||||
### Features
|
||||
|
||||
- Add support for writing ACL files with YAML [#359](https://github.com/juanfont/headscale/pull/359)
|
||||
- Users can now use emails in ACL's groups [#372](https://github.com/juanfont/headscale/issues/372)
|
||||
- Add shorthand aliases for commands and subcommands [#376](https://github.com/juanfont/headscale/pull/376)
|
||||
- Add `/windows` endpoint for Windows configuration instructions + registry file download [#392](https://github.com/juanfont/headscale/pull/392)
|
||||
- Added embedded DERP (and STUN) server into Headscale [#388](https://github.com/juanfont/headscale/pull/388)
|
||||
|
||||
### Changes
|
||||
|
||||
- Fix a bug were the same IP could be assigned to multiple hosts if joined in quick succession [#346](https://github.com/juanfont/headscale/pull/346)
|
||||
- Simplify the code behind registration of machines [#366](https://github.com/juanfont/headscale/pull/366)
|
||||
- Nodes are now only written to database if they are registrated successfully
|
||||
- Fix a limitation in the ACLs that prevented users to write rules with `*` as source [#374](https://github.com/juanfont/headscale/issues/374)
|
||||
- Reduce the overhead of marshal/unmarshal for Hostinfo, routes and endpoints by using specific types in Machine [#371](https://github.com/juanfont/headscale/pull/371)
|
||||
- Apply normalization function to FQDN on hostnames when hosts registers and retrieve informations [#363](https://github.com/juanfont/headscale/issues/363)
|
||||
- Fix a bug that prevented the use of `tailscale logout` with OIDC [#508](https://github.com/juanfont/headscale/issues/508)
|
||||
- Added Tailscale repo HEAD and unstable releases channel to the integration tests targets [#513](https://github.com/juanfont/headscale/pull/513)
|
||||
|
||||
## 0.14.0 (2022-02-24)
|
||||
|
||||
**UPCOMING ### BREAKING
|
||||
From the **next\*\* version (`0.15.0`), all machines will be able to communicate regardless of
|
||||
if they are in the same namespace. This means that the behaviour currently limited to ACLs
|
||||
will become default. From version `0.15.0`, all limitation of communications must be done
|
||||
with ACLs.
|
||||
|
||||
This is a part of aligning `headscale`'s behaviour with Tailscale's upstream behaviour.
|
||||
|
||||
### BREAKING
|
||||
|
||||
- ACLs have been rewritten to align with the bevaviour Tailscale Control Panel provides. **NOTE:** This is only active if you use ACLs
|
||||
- Namespaces are now treated as Users
|
||||
- All machines can communicate with all machines by default
|
||||
- Tags should now work correctly and adding a host to Headscale should now reload the rules.
|
||||
- The documentation have a [fictional example](docs/acls.md) that should cover some use cases of the ACLs features
|
||||
|
||||
### Features
|
||||
|
||||
- Add support for configurable mTLS [docs](docs/tls.md#configuring-mutual-tls-authentication-mtls) [#297](https://github.com/juanfont/headscale/pull/297)
|
||||
|
||||
### Changes
|
||||
|
||||
- Remove dependency on CGO (switch from CGO SQLite to pure Go) [#346](https://github.com/juanfont/headscale/pull/346)
|
||||
|
||||
**0.13.0 (2022-02-18):**
|
||||
|
||||
### Features
|
||||
|
||||
- Add IPv6 support to the prefix assigned to namespaces
|
||||
- Add API Key support
|
||||
- Enable remote control of `headscale` via CLI [docs](docs/remote-cli.md)
|
||||
- Enable HTTP API (beta, subject to change)
|
||||
- OpenID Connect users will be mapped per namespaces
|
||||
- Each user will get its own namespace, created if it does not exist
|
||||
- `oidc.domain_map` option has been removed
|
||||
- `strip_email_domain` option has been added (see [config-example.yaml](./config_example.yaml))
|
||||
|
||||
### Changes
|
||||
|
||||
- `ip_prefix` is now superseded by `ip_prefixes` in the configuration [#208](https://github.com/juanfont/headscale/pull/208)
|
||||
- Upgrade `tailscale` (1.20.4) and other dependencies to latest [#314](https://github.com/juanfont/headscale/pull/314)
|
||||
- fix swapped machine<->namespace labels in `/metrics` [#312](https://github.com/juanfont/headscale/pull/312)
|
||||
- remove key-value based update mechanism for namespace changes [#316](https://github.com/juanfont/headscale/pull/316)
|
||||
|
||||
**0.12.4 (2022-01-29):**
|
||||
|
||||
### Changes
|
||||
|
||||
- Make gRPC Unix Socket permissions configurable [#292](https://github.com/juanfont/headscale/pull/292)
|
||||
- Trim whitespace before reading Private Key from file [#289](https://github.com/juanfont/headscale/pull/289)
|
||||
- Add new command to generate a private key for `headscale` [#290](https://github.com/juanfont/headscale/pull/290)
|
||||
- Fixed issue where hosts deleted from control server may be written back to the database, as long as they are connected to the control server [#278](https://github.com/juanfont/headscale/pull/278)
|
||||
|
||||
## 0.12.3 (2022-01-13)
|
||||
|
||||
### Changes
|
||||
|
||||
- Added Alpine container [#270](https://github.com/juanfont/headscale/pull/270)
|
||||
- Minor updates in dependencies [#271](https://github.com/juanfont/headscale/pull/271)
|
||||
|
||||
## 0.12.2 (2022-01-11)
|
||||
|
||||
Happy New Year!
|
||||
|
||||
### Changes
|
||||
|
||||
- Fix Docker release [#258](https://github.com/juanfont/headscale/pull/258)
|
||||
- Rewrite main docs [#262](https://github.com/juanfont/headscale/pull/262)
|
||||
- Improve Docker docs [#263](https://github.com/juanfont/headscale/pull/263)
|
||||
|
||||
## 0.12.1 (2021-12-24)
|
||||
|
||||
(We are skipping 0.12.0 to correct a mishap done weeks ago with the version tagging)
|
||||
|
||||
### BREAKING
|
||||
|
||||
- Upgrade to Tailscale 1.18 [#229](https://github.com/juanfont/headscale/pull/229)
|
||||
- This change requires a new format for private key, private keys are now generated automatically:
|
||||
1. Delete your current key
|
||||
2. Restart `headscale`, a new key will be generated.
|
||||
3. Restart all Tailscale clients to fetch the new key
|
||||
|
||||
### Changes
|
||||
|
||||
- Unify configuration example [#197](https://github.com/juanfont/headscale/pull/197)
|
||||
- Add stricter linting and formatting [#223](https://github.com/juanfont/headscale/pull/223)
|
||||
|
||||
### Features
|
||||
|
||||
- Add gRPC and HTTP API (HTTP API is currently disabled) [#204](https://github.com/juanfont/headscale/pull/204)
|
||||
- Use gRPC between the CLI and the server [#206](https://github.com/juanfont/headscale/pull/206), [#212](https://github.com/juanfont/headscale/pull/212)
|
||||
- Beta OpenID Connect support [#126](https://github.com/juanfont/headscale/pull/126), [#227](https://github.com/juanfont/headscale/pull/227)
|
||||
|
||||
## 0.11.0 (2021-10-25)
|
||||
|
||||
### BREAKING
|
||||
|
||||
- Make headscale fetch DERP map from URL and file [#196](https://github.com/juanfont/headscale/pull/196)
|
@@ -1,5 +1,6 @@
|
||||
# Builder image
|
||||
FROM golang:1.17.1-bullseye AS build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/golang:1.18.0-bullseye AS build
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
@@ -7,8 +8,8 @@ COPY go.mod go.sum /go/src/headscale/
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN go install -a -ldflags="-extldflags=-static" -tags netgo,sqlite_omit_load_extension ./cmd/headscale
|
||||
ARG TARGETOS TARGETARCH
|
||||
RUN CGO_ENABLED=0 GOOS=$TARGETOS GOARCH=$TARGETARCH go build -o /go/bin/headscale -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
# Production image
|
||||
|
24
Dockerfile.alpine
Normal file
24
Dockerfile.alpine
Normal file
@@ -0,0 +1,24 @@
|
||||
# Builder image
|
||||
FROM --platform=$BUILDPLATFORM docker.io/golang:1.18.0-alpine AS build
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
COPY go.mod go.sum /go/src/headscale/
|
||||
RUN apk add gcc musl-dev
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
ARG TARGETOS TARGETARCH
|
||||
RUN CGO_ENABLED=0 GOOS=$TARGETOS GOARCH=$TARGETARCH go build -o /go/bin/headscale -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
# Production image
|
||||
FROM docker.io/alpine:latest
|
||||
|
||||
COPY --from=build /go/bin/headscale /bin/headscale
|
||||
ENV TZ UTC
|
||||
|
||||
EXPOSE 8080/tcp
|
||||
CMD ["headscale"]
|
@@ -1,5 +1,6 @@
|
||||
# Builder image
|
||||
FROM golang:1.17.1-bullseye AS build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/golang:1.18.0-bullseye AS build
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
@@ -8,7 +9,8 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN go install -a -ldflags="-extldflags=-static" -tags netgo,sqlite_omit_load_extension ./cmd/headscale
|
||||
ARG TARGETOS TARGETARCH
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
# Debug image
|
||||
|
@@ -1,11 +1,17 @@
|
||||
FROM ubuntu:latest
|
||||
|
||||
ARG TAILSCALE_VERSION
|
||||
ARG TAILSCALE_VERSION=*
|
||||
ARG TAILSCALE_CHANNEL=stable
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y gnupg curl \
|
||||
&& curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.gpg | apt-key add - \
|
||||
&& curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.list | tee /etc/apt/sources.list.d/tailscale.list \
|
||||
&& curl -fsSL https://pkgs.tailscale.com/${TAILSCALE_CHANNEL}/ubuntu/focal.gpg | apt-key add - \
|
||||
&& curl -fsSL https://pkgs.tailscale.com/${TAILSCALE_CHANNEL}/ubuntu/focal.list | tee /etc/apt/sources.list.d/tailscale.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y tailscale=${TAILSCALE_VERSION} \
|
||||
&& apt-get install -y ca-certificates tailscale=${TAILSCALE_VERSION} dnsutils \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ADD integration_test/etc_embedded_derp/tls/server.crt /usr/local/share/ca-certificates/
|
||||
RUN chmod 644 /usr/local/share/ca-certificates/server.crt
|
||||
|
||||
RUN update-ca-certificates
|
||||
|
21
Dockerfile.tailscale-HEAD
Normal file
21
Dockerfile.tailscale-HEAD
Normal file
@@ -0,0 +1,21 @@
|
||||
FROM golang:latest
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y ca-certificates dnsutils git iptables \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
RUN git clone https://github.com/tailscale/tailscale.git
|
||||
|
||||
WORKDIR tailscale
|
||||
|
||||
RUN sh build_dist.sh tailscale.com/cmd/tailscale
|
||||
RUN sh build_dist.sh tailscale.com/cmd/tailscaled
|
||||
|
||||
RUN cp tailscale /usr/local/bin/
|
||||
RUN cp tailscaled /usr/local/bin/
|
||||
|
||||
ADD integration_test/etc_embedded_derp/tls/server.crt /usr/local/share/ca-certificates/
|
||||
RUN chmod 644 /usr/local/share/ca-certificates/server.crt
|
||||
|
||||
RUN update-ca-certificates
|
21
Dockerfile.tmp-integration
Normal file
21
Dockerfile.tmp-integration
Normal file
@@ -0,0 +1,21 @@
|
||||
# Builder image
|
||||
FROM docker.io/golang:1.18.0-bullseye AS build
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
COPY go.mod go.sum /go/src/headscale/
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
RUN CGO_ENABLED=0 go build -o /go/bin/headscale -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
# Production image
|
||||
FROM gcr.io/distroless/base-debian11
|
||||
|
||||
COPY --from=build /go/bin/headscale /bin/headscale
|
||||
ENV TZ UTC
|
||||
|
||||
EXPOSE 8080/tcp
|
||||
CMD ["headscale"]
|
20
Makefile
20
Makefile
@@ -1,8 +1,15 @@
|
||||
# Calculate version
|
||||
version = $(shell ./scripts/version-at-commit.sh)
|
||||
version ?= $(shell git describe --always --tags --dirty)
|
||||
|
||||
rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d))
|
||||
|
||||
# Determine if OS supports pie
|
||||
GOOS ?= $(shell uname | tr '[:upper:]' '[:lower:]')
|
||||
ifeq ($(filter $(GOOS), openbsd netbsd soloaris plan9), )
|
||||
pieflags = -buildmode=pie
|
||||
else
|
||||
endif
|
||||
|
||||
# GO_SOURCES = $(wildcard *.go)
|
||||
# PROTO_SOURCES = $(wildcard **/*.proto)
|
||||
GO_SOURCES = $(call rwildcard,,*.go)
|
||||
@@ -10,7 +17,7 @@ PROTO_SOURCES = $(call rwildcard,,*.proto)
|
||||
|
||||
|
||||
build:
|
||||
go build -ldflags "-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$(version)" cmd/headscale/headscale.go
|
||||
GOOS=$(GOOS) CGO_ENABLED=0 go build -trimpath $(pieflags) -mod=readonly -ldflags "-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$(version)" cmd/headscale/headscale.go
|
||||
|
||||
dev: lint test build
|
||||
|
||||
@@ -18,11 +25,14 @@ test:
|
||||
@go test -coverprofile=coverage.out ./...
|
||||
|
||||
test_integration:
|
||||
go test -tags integration -timeout 30m ./...
|
||||
go test -failfast -tags integration -timeout 30m -count=1 ./...
|
||||
|
||||
test_integration_cli:
|
||||
go test -tags integration -v integration_cli_test.go integration_common_test.go
|
||||
|
||||
test_integration_derp:
|
||||
go test -tags integration -v integration_embedded_derp_test.go integration_common_test.go
|
||||
|
||||
coverprofile_func:
|
||||
go tool cover -func=coverage.out
|
||||
|
||||
@@ -38,14 +48,14 @@ fmt:
|
||||
clang-format -style="{BasedOnStyle: Google, IndentWidth: 4, AlignConsecutiveDeclarations: true, AlignConsecutiveAssignments: true, ColumnLimit: 0}" -i $(PROTO_SOURCES)
|
||||
|
||||
proto-lint:
|
||||
cd proto/ && buf lint
|
||||
cd proto/ && go run github.com/bufbuild/buf/cmd/buf lint
|
||||
|
||||
compress: build
|
||||
upx --brute headscale
|
||||
|
||||
generate:
|
||||
rm -rf gen
|
||||
buf generate proto
|
||||
go run github.com/bufbuild/buf/cmd/buf generate proto
|
||||
|
||||
install-protobuf-plugins:
|
||||
go install \
|
||||
|
449
README.md
449
README.md
@@ -2,38 +2,68 @@
|
||||
|
||||

|
||||
|
||||
An open source, self-hosted implementation of the Tailscale coordination server.
|
||||
An open source, self-hosted implementation of the Tailscale control server.
|
||||
|
||||
Join our [Discord](https://discord.gg/XcQxk2VHjx) server for a chat.
|
||||
Join our [Discord](https://discord.gg/c84AZQhmpx) server for a chat.
|
||||
|
||||
**Note:** Always select the same GitHub tag as the released version you use to ensure you have the correct example configuration and documentation. The `main` branch might contain unreleased changes.
|
||||
**Note:** Always select the same GitHub tag as the released version you use
|
||||
to ensure you have the correct example configuration and documentation.
|
||||
The `main` branch might contain unreleased changes.
|
||||
|
||||
## Overview
|
||||
## What is Tailscale
|
||||
|
||||
Tailscale is [a modern VPN](https://tailscale.com/) built on top of [Wireguard](https://www.wireguard.com/). It [works like an overlay network](https://tailscale.com/blog/how-tailscale-works/) between the computers of your networks - using all kinds of [NAT traversal sorcery](https://tailscale.com/blog/how-nat-traversal-works/).
|
||||
Tailscale is [a modern VPN](https://tailscale.com/) built on top of
|
||||
[Wireguard](https://www.wireguard.com/).
|
||||
It [works like an overlay network](https://tailscale.com/blog/how-tailscale-works/)
|
||||
between the computers of your networks - using
|
||||
[NAT traversal](https://tailscale.com/blog/how-nat-traversal-works/).
|
||||
|
||||
Everything in Tailscale is Open Source, except the GUI clients for proprietary OS (Windows and macOS/iOS), and the 'coordination/control server'.
|
||||
Everything in Tailscale is Open Source, except the GUI clients for proprietary OS
|
||||
(Windows and macOS/iOS), and the control server.
|
||||
|
||||
The control server works as an exchange point of Wireguard public keys for the nodes in the Tailscale network. It also assigns the IP addresses of the clients, creates the boundaries between each user, enables sharing machines between users, and exposes the advertised routes of your nodes.
|
||||
The control server works as an exchange point of Wireguard public keys for the
|
||||
nodes in the Tailscale network. It assigns the IP addresses of the clients,
|
||||
creates the boundaries between each user, enables sharing machines between users,
|
||||
and exposes the advertised routes of your nodes.
|
||||
|
||||
headscale implements this coordination server.
|
||||
A [Tailscale network (tailnet)](https://tailscale.com/kb/1136/tailnet/) is private
|
||||
network which Tailscale assigns to a user in terms of private users or an
|
||||
organisation.
|
||||
|
||||
## Status
|
||||
## Design goal
|
||||
|
||||
- [x] Base functionality (nodes can communicate with each other)
|
||||
- [x] Node registration through the web flow
|
||||
- [x] Network changes are relayed to the nodes
|
||||
- [x] Namespaces support (~tailnets in Tailscale.com naming)
|
||||
- [x] Routing (advertise & accept, including exit nodes)
|
||||
- [x] Node registration via pre-auth keys (including reusable keys, and ephemeral node support)
|
||||
- [x] JSON-formatted output
|
||||
- [x] ACLs
|
||||
- [x] Taildrop (File Sharing)
|
||||
- [x] Support for alternative IP ranges in the tailnets (default Tailscale's 100.64.0.0/10)
|
||||
- [x] DNS (passing DNS servers to nodes)
|
||||
- [x] Single-Sign-On (via Open ID Connect)
|
||||
- [x] Share nodes between namespaces
|
||||
- [x] MagicDNS (see `docs/`)
|
||||
`headscale` aims to implement a self-hosted, open source alternative to the Tailscale
|
||||
control server. `headscale` has a narrower scope and an instance of `headscale`
|
||||
implements a _single_ Tailnet, which is typically what a single organisation, or
|
||||
home/personal setup would use.
|
||||
|
||||
`headscale` uses terms that maps to Tailscale's control server, consult the
|
||||
[glossary](./docs/glossary.md) for explainations.
|
||||
|
||||
## Support
|
||||
|
||||
If you like `headscale` and find it useful, there is a sponsorship and donation
|
||||
buttons available in the repo.
|
||||
|
||||
If you would like to sponsor features, bugs or prioritisation, reach out to
|
||||
one of the maintainers.
|
||||
|
||||
## Features
|
||||
|
||||
- Full "base" support of Tailscale's features
|
||||
- Configurable DNS
|
||||
- [Split DNS](https://tailscale.com/kb/1054/dns/#using-dns-settings-in-the-admin-console)
|
||||
- Node registration
|
||||
- Single-Sign-On (via Open ID Connect)
|
||||
- Pre authenticated key
|
||||
- Taildrop (File Sharing)
|
||||
- [Access control lists](https://tailscale.com/kb/1018/acls/)
|
||||
- [MagicDNS](https://tailscale.com/kb/1081/magicdns)
|
||||
- Support for multiple IP ranges in the tailnet
|
||||
- Dual stack (IPv4 and IPv6)
|
||||
- Routing advertising (including exit nodes)
|
||||
- Ephemeral nodes
|
||||
- Embedded [DERP server](https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp)
|
||||
|
||||
## Client OS support
|
||||
|
||||
@@ -41,15 +71,12 @@ headscale implements this coordination server.
|
||||
| ------- | ----------------------------------------------------------------------------------------------------------------- |
|
||||
| Linux | Yes |
|
||||
| OpenBSD | Yes |
|
||||
| FreeBSD | Yes |
|
||||
| macOS | Yes (see `/apple` on your headscale for more information) |
|
||||
| Windows | Yes |
|
||||
| Windows | Yes [docs](./docs/windows-client.md) |
|
||||
| Android | [You need to compile the client yourself](https://github.com/juanfont/headscale/issues/58#issuecomment-885255270) |
|
||||
| iOS | Not yet |
|
||||
|
||||
## Roadmap 🤷
|
||||
|
||||
Suggestions/PRs welcomed!
|
||||
|
||||
## Running headscale
|
||||
|
||||
Please have a look at the documentation under [`docs/`](docs/).
|
||||
@@ -57,15 +84,23 @@ Please have a look at the documentation under [`docs/`](docs/).
|
||||
## Disclaimer
|
||||
|
||||
1. We have nothing to do with Tailscale, or Tailscale Inc.
|
||||
2. The purpose of writing this was to learn how Tailscale works.
|
||||
2. The purpose of Headscale is maintaining a working, self-hosted Tailscale control panel.
|
||||
|
||||
## Contributing
|
||||
|
||||
To contribute to Headscale you would need the lastest version of [Go](https://golang.org) and [Buf](https://buf.build)(Protobuf generator).
|
||||
To contribute to headscale you would need the lastest version of [Go](https://golang.org)
|
||||
and [Buf](https://buf.build)(Protobuf generator).
|
||||
|
||||
We recommend using [Nix](https://nixos.org/) to setup a development environment. This can
|
||||
be done with `nix develop`, which will install the tools and give you a shell.
|
||||
This guarantees that you will have the same dev env as `headscale` maintainers.
|
||||
|
||||
PRs and suggestions are welcome.
|
||||
|
||||
### Code style
|
||||
|
||||
To ensure we have some consistency with a growing number of contributes, this project has adopted linting and style/formatting rules:
|
||||
To ensure we have some consistency with a growing number of contributions,
|
||||
this project has adopted linting and style/formatting rules:
|
||||
|
||||
The **Go** code is linted with [`golangci-lint`](https://golangci-lint.run) and
|
||||
formatted with [`golines`](https://github.com/segmentio/golines) (width 88) and
|
||||
@@ -76,7 +111,7 @@ run `make lint` and `make fmt` before committing any code.
|
||||
The **Proto** code is linted with [`buf`](https://docs.buf.build/lint/overview) and
|
||||
formatted with [`clang-format`](https://clang.llvm.org/docs/ClangFormat.html).
|
||||
|
||||
The **rest** (markdown, yaml, etc) is formatted with [`prettier`](https://prettier.io).
|
||||
The **rest** (Markdown, YAML, etc) is formatted with [`prettier`](https://prettier.io).
|
||||
|
||||
Check out the `.golangci.yaml` and `Makefile` to see the specific configuration.
|
||||
|
||||
@@ -84,15 +119,18 @@ Check out the `.golangci.yaml` and `Makefile` to see the specific configuration.
|
||||
|
||||
- Go
|
||||
- Buf
|
||||
- Protobuf tools:
|
||||
- Protobuf tools
|
||||
|
||||
Install and activate:
|
||||
|
||||
```shell
|
||||
make install-protobuf-plugins
|
||||
nix develop
|
||||
```
|
||||
|
||||
### Testing and building
|
||||
|
||||
Some parts of the project requires the generation of Go code from Protobuf (if changes is made in `proto/`) and it must be (re-)generated with:
|
||||
Some parts of the project require the generation of Go code from Protobuf
|
||||
(if changes are made in `proto/`) and it must be (re-)generated with:
|
||||
|
||||
```shell
|
||||
make generate
|
||||
@@ -108,6 +146,12 @@ make test
|
||||
|
||||
To build the program:
|
||||
|
||||
```shell
|
||||
nix build
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```shell
|
||||
make build
|
||||
```
|
||||
@@ -116,6 +160,13 @@ make build
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/kradalby>
|
||||
<img src=https://avatars.githubusercontent.com/u/98431?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Kristoffer Dalby/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Kristoffer Dalby</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/juanfont>
|
||||
<img src=https://avatars.githubusercontent.com/u/181059?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Juan Font/>
|
||||
@@ -124,10 +175,10 @@ make build
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/kradalby>
|
||||
<img src=https://avatars.githubusercontent.com/u/98431?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Kristoffer Dalby/>
|
||||
<a href=https://github.com/restanrm>
|
||||
<img src=https://avatars.githubusercontent.com/u/4344371?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Adrien Raffin-Caboisse/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Kristoffer Dalby</b></sub>
|
||||
<sub style="font-size:14px"><b>Adrien Raffin-Caboisse</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
@@ -138,10 +189,40 @@ make build
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ohdearaugustin>
|
||||
<img src=https://avatars.githubusercontent.com/u/14001491?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ohdearaugustin/>
|
||||
<a href=https://github.com/huskyii>
|
||||
<img src=https://avatars.githubusercontent.com/u/5499746?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jiang Zhu/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>ohdearaugustin</b></sub>
|
||||
<sub style="font-size:14px"><b>Jiang Zhu</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/reynico>
|
||||
<img src=https://avatars.githubusercontent.com/u/715768?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Nico/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Nico</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/e-zk>
|
||||
<img src=https://avatars.githubusercontent.com/u/58356365?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=e-zk/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>e-zk</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/arch4ngel>
|
||||
<img src=https://avatars.githubusercontent.com/u/11574161?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Justin Angel/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Justin Angel</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ItalyPaleAle>
|
||||
<img src=https://avatars.githubusercontent.com/u/43508?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Alessandro (Ale) Segala/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Alessandro (Ale) Segala</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
@@ -151,6 +232,36 @@ make build
|
||||
<sub style="font-size:14px"><b>unreality</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/mpldr>
|
||||
<img src=https://avatars.githubusercontent.com/u/33086936?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Moritz Poldrack/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Moritz Poldrack</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ohdearaugustin>
|
||||
<img src=https://avatars.githubusercontent.com/u/14001491?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ohdearaugustin/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>ohdearaugustin</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Niek>
|
||||
<img src=https://avatars.githubusercontent.com/u/213140?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Niek van der Maas/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Niek van der Maas</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/negbie>
|
||||
<img src=https://avatars.githubusercontent.com/u/20154956?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Eugen Biegler/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Eugen Biegler</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/qbit>
|
||||
<img src=https://avatars.githubusercontent.com/u/68368?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Aaron Bieber/>
|
||||
@@ -158,6 +269,71 @@ make build
|
||||
<sub style="font-size:14px"><b>Aaron Bieber</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/iSchluff>
|
||||
<img src=https://avatars.githubusercontent.com/u/1429641?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Anton Schubert/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Anton Schubert</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/fdelucchijr>
|
||||
<img src=https://avatars.githubusercontent.com/u/69133647?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Fernando De Lucchi/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Fernando De Lucchi</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/GrigoriyMikhalkin>
|
||||
<img src=https://avatars.githubusercontent.com/u/3637857?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=GrigoriyMikhalkin/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>GrigoriyMikhalkin</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/hdhoang>
|
||||
<img src=https://avatars.githubusercontent.com/u/12537?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Hoàng Đức Hiếu/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Hoàng Đức Hiếu</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/bravechamp>
|
||||
<img src=https://avatars.githubusercontent.com/u/48980452?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=bravechamp/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>bravechamp</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/deonthomasgy>
|
||||
<img src=https://avatars.githubusercontent.com/u/150036?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Deon Thomas/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Deon Thomas</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ChibangLW>
|
||||
<img src=https://avatars.githubusercontent.com/u/22293464?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ChibangLW/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>ChibangLW</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/mevansam>
|
||||
<img src=https://avatars.githubusercontent.com/u/403630?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Mevan Samaratunga/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Mevan Samaratunga</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/dragetd>
|
||||
<img src=https://avatars.githubusercontent.com/u/3639577?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Michael G./>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Michael G.</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
@@ -167,6 +343,27 @@ make build
|
||||
<sub style="font-size:14px"><b>Paul Tötterman</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/samson4649>
|
||||
<img src=https://avatars.githubusercontent.com/u/12725953?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Samuel Lock/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Samuel Lock</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/majst01>
|
||||
<img src=https://avatars.githubusercontent.com/u/410110?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Stefan Majer/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Stefan Majer</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/artemklevtsov>
|
||||
<img src=https://avatars.githubusercontent.com/u/603798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Artem Klevtsov/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Artem Klevtsov</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/cmars>
|
||||
<img src=https://avatars.githubusercontent.com/u/23741?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Casey Marshall/>
|
||||
@@ -174,6 +371,15 @@ make build
|
||||
<sub style="font-size:14px"><b>Casey Marshall</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/pvinis>
|
||||
<img src=https://avatars.githubusercontent.com/u/100233?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pavlos Vinieratos/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Pavlos Vinieratos</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/SilverBut>
|
||||
<img src=https://avatars.githubusercontent.com/u/6560655?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Silver Bullet/>
|
||||
@@ -181,6 +387,13 @@ make build
|
||||
<sub style="font-size:14px"><b>Silver Bullet</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/lachy2849>
|
||||
<img src=https://avatars.githubusercontent.com/u/98844035?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=lachy2849/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>lachy2849</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/t56k>
|
||||
<img src=https://avatars.githubusercontent.com/u/12165422?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=thomas/>
|
||||
@@ -188,6 +401,29 @@ make build
|
||||
<sub style="font-size:14px"><b>thomas</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/aberoham>
|
||||
<img src=https://avatars.githubusercontent.com/u/586805?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Abraham Ingersoll/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Abraham Ingersoll</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/apognu>
|
||||
<img src=https://avatars.githubusercontent.com/u/3017182?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Antoine POPINEAU/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Antoine POPINEAU</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/aofei>
|
||||
<img src=https://avatars.githubusercontent.com/u/5037285?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Aofei Sheng/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Aofei Sheng</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/awoimbee>
|
||||
<img src=https://avatars.githubusercontent.com/u/22431493?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Arthur Woimbée/>
|
||||
@@ -195,6 +431,27 @@ make build
|
||||
<sub style="font-size:14px"><b>Arthur Woimbée</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/stensonb>
|
||||
<img src=https://avatars.githubusercontent.com/u/933389?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Bryan Stenson/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Bryan Stenson</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/yangchuansheng>
|
||||
<img src=https://avatars.githubusercontent.com/u/15308462?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt= Carson Yang/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b> Carson Yang</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/kundel>
|
||||
<img src=https://avatars.githubusercontent.com/u/10158899?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=kundel/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>kundel</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/fkr>
|
||||
<img src=https://avatars.githubusercontent.com/u/51063?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Felix Kronlage-Dammers/>
|
||||
@@ -202,8 +459,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Felix Kronlage-Dammers</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/felixonmars>
|
||||
<img src=https://avatars.githubusercontent.com/u/1006477?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Felix Yan/>
|
||||
@@ -211,6 +466,59 @@ make build
|
||||
<sub style="font-size:14px"><b>Felix Yan</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/JJGadgets>
|
||||
<img src=https://avatars.githubusercontent.com/u/5709019?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=JJGadgets/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>JJGadgets</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/madjam002>
|
||||
<img src=https://avatars.githubusercontent.com/u/679137?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jamie Greeff/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Jamie Greeff</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/jimt>
|
||||
<img src=https://avatars.githubusercontent.com/u/180326?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Jim Tittsler/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Jim Tittsler</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/piec>
|
||||
<img src=https://avatars.githubusercontent.com/u/781471?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Pierre Carru/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Pierre Carru</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/rcursaru>
|
||||
<img src=https://avatars.githubusercontent.com/u/16259641?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=rcursaru/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>rcursaru</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/renovate-bot>
|
||||
<img src=https://avatars.githubusercontent.com/u/25180681?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=WhiteSource Renovate/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>WhiteSource Renovate</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ryanfowler>
|
||||
<img src=https://avatars.githubusercontent.com/u/2668821?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ryan Fowler/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Ryan Fowler</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/shaananc>
|
||||
<img src=https://avatars.githubusercontent.com/u/2287839?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Shaanan Cohney/>
|
||||
@@ -218,6 +526,13 @@ make build
|
||||
<sub style="font-size:14px"><b>Shaanan Cohney</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/m-tanner-dev0>
|
||||
<img src=https://avatars.githubusercontent.com/u/97977342?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tanner/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Tanner</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Teteros>
|
||||
<img src=https://avatars.githubusercontent.com/u/5067989?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Teteros/>
|
||||
@@ -239,6 +554,8 @@ make build
|
||||
<sub style="font-size:14px"><b>Tianon Gravi</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/woudsma>
|
||||
<img src=https://avatars.githubusercontent.com/u/6162978?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tjerk Woudsma/>
|
||||
@@ -246,8 +563,13 @@ make build
|
||||
<sub style="font-size:14px"><b>Tjerk Woudsma</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/y0ngb1n>
|
||||
<img src=https://avatars.githubusercontent.com/u/25719408?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Yang Bin/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Yang Bin</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/zekker6>
|
||||
<img src=https://avatars.githubusercontent.com/u/1367798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zakhar Bessarab/>
|
||||
@@ -255,6 +577,13 @@ make build
|
||||
<sub style="font-size:14px"><b>Zakhar Bessarab</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Bpazy>
|
||||
<img src=https://avatars.githubusercontent.com/u/9838749?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ziyuan Han/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Ziyuan Han</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/derelm>
|
||||
<img src=https://avatars.githubusercontent.com/u/465155?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=derelm/>
|
||||
@@ -262,6 +591,15 @@ make build
|
||||
<sub style="font-size:14px"><b>derelm</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/nning>
|
||||
<img src=https://avatars.githubusercontent.com/u/557430?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=henning mueller/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>henning mueller</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ignoramous>
|
||||
<img src=https://avatars.githubusercontent.com/u/852289?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ignoramous/>
|
||||
@@ -269,6 +607,27 @@ make build
|
||||
<sub style="font-size:14px"><b>ignoramous</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/lion24>
|
||||
<img src=https://avatars.githubusercontent.com/u/1382102?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=lion24/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>lion24</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/pernila>
|
||||
<img src=https://avatars.githubusercontent.com/u/12460060?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=pernila/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>pernila</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Wakeful-Cloud>
|
||||
<img src=https://avatars.githubusercontent.com/u/38930607?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Wakeful-Cloud/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Wakeful-Cloud</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/xpzouying>
|
||||
<img src=https://avatars.githubusercontent.com/u/3946563?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=zy/>
|
||||
|
427
acls.go
427
acls.go
@@ -2,38 +2,65 @@ package headscale
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/tailscale/hujson"
|
||||
"gopkg.in/yaml.v3"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
const (
|
||||
errEmptyPolicy = Error("empty policy")
|
||||
errInvalidAction = Error("invalid action")
|
||||
errInvalidUserSection = Error("invalid user section")
|
||||
errInvalidGroup = Error("invalid group")
|
||||
errInvalidTag = Error("invalid tag")
|
||||
errInvalidNamespace = Error("invalid namespace")
|
||||
errInvalidPortFormat = Error("invalid port format")
|
||||
errEmptyPolicy = Error("empty policy")
|
||||
errInvalidAction = Error("invalid action")
|
||||
errInvalidGroup = Error("invalid group")
|
||||
errInvalidTag = Error("invalid tag")
|
||||
errInvalidPortFormat = Error("invalid port format")
|
||||
errWildcardIsNeeded = Error("wildcard as port is required for the protocol")
|
||||
)
|
||||
|
||||
const (
|
||||
Base8 = 8
|
||||
Base10 = 10
|
||||
BitSize16 = 16
|
||||
BitSize32 = 32
|
||||
BitSize64 = 64
|
||||
portRangeBegin = 0
|
||||
portRangeEnd = 65535
|
||||
expectedTokenItems = 2
|
||||
)
|
||||
|
||||
// For some reason golang.org/x/net/internal/iana is an internal package.
|
||||
const (
|
||||
protocolICMP = 1 // Internet Control Message
|
||||
protocolIGMP = 2 // Internet Group Management
|
||||
protocolIPv4 = 4 // IPv4 encapsulation
|
||||
protocolTCP = 6 // Transmission Control
|
||||
protocolEGP = 8 // Exterior Gateway Protocol
|
||||
protocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP)
|
||||
protocolUDP = 17 // User Datagram
|
||||
protocolGRE = 47 // Generic Routing Encapsulation
|
||||
protocolESP = 50 // Encap Security Payload
|
||||
protocolAH = 51 // Authentication Header
|
||||
protocolIPv6ICMP = 58 // ICMP for IPv6
|
||||
protocolSCTP = 132 // Stream Control Transmission Protocol
|
||||
ProtocolFC = 133 // Fibre Channel
|
||||
)
|
||||
|
||||
// LoadACLPolicy loads the ACL policy from the specify path, and generates the ACL rules.
|
||||
func (h *Headscale) LoadACLPolicy(path string) error {
|
||||
log.Debug().
|
||||
Str("func", "LoadACLPolicy").
|
||||
Str("path", path).
|
||||
Msg("Loading ACL policy from path")
|
||||
|
||||
policyFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -46,25 +73,51 @@ func (h *Headscale) LoadACLPolicy(path string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
ast, err := hujson.Parse(policyBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ast.Standardize()
|
||||
policyBytes = ast.Pack()
|
||||
err = json.Unmarshal(policyBytes, &policy)
|
||||
if err != nil {
|
||||
return err
|
||||
switch filepath.Ext(path) {
|
||||
case ".yml", ".yaml":
|
||||
log.Debug().
|
||||
Str("path", path).
|
||||
Bytes("file", policyBytes).
|
||||
Msg("Loading ACLs from YAML")
|
||||
|
||||
err := yaml.Unmarshal(policyBytes, &policy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Interface("policy", policy).
|
||||
Msg("Loaded policy from YAML")
|
||||
|
||||
default:
|
||||
ast, err := hujson.Parse(policyBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ast.Standardize()
|
||||
policyBytes = ast.Pack()
|
||||
err = json.Unmarshal(policyBytes, &policy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if policy.IsZero() {
|
||||
return errEmptyPolicy
|
||||
}
|
||||
|
||||
h.aclPolicy = &policy
|
||||
|
||||
return h.UpdateACLRules()
|
||||
}
|
||||
|
||||
func (h *Headscale) UpdateACLRules() error {
|
||||
rules, err := h.generateACLRules()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Trace().Interface("ACL", rules).Msg("ACL rules generated")
|
||||
h.aclRules = rules
|
||||
|
||||
return nil
|
||||
@@ -73,32 +126,46 @@ func (h *Headscale) LoadACLPolicy(path string) error {
|
||||
func (h *Headscale) generateACLRules() ([]tailcfg.FilterRule, error) {
|
||||
rules := []tailcfg.FilterRule{}
|
||||
|
||||
if h.aclPolicy == nil {
|
||||
return nil, errEmptyPolicy
|
||||
}
|
||||
|
||||
machines, err := h.ListMachines()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for index, acl := range h.aclPolicy.ACLs {
|
||||
if acl.Action != "accept" {
|
||||
return nil, errInvalidAction
|
||||
}
|
||||
|
||||
filterRule := tailcfg.FilterRule{}
|
||||
|
||||
srcIPs := []string{}
|
||||
for innerIndex, user := range acl.Users {
|
||||
srcs, err := h.generateACLPolicySrcIP(user)
|
||||
for innerIndex, src := range acl.Sources {
|
||||
srcs, err := h.generateACLPolicySrcIP(machines, *h.aclPolicy, src)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing ACL %d, User %d", index, innerIndex)
|
||||
Msgf("Error parsing ACL %d, Source %d", index, innerIndex)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
srcIPs = append(srcIPs, srcs...)
|
||||
}
|
||||
filterRule.SrcIPs = srcIPs
|
||||
|
||||
protocols, needsWildcard, err := parseProtocol(acl.Protocol)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing ACL %d. protocol unknown %s", index, acl.Protocol)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
destPorts := []tailcfg.NetPortRange{}
|
||||
for innerIndex, ports := range acl.Ports {
|
||||
dests, err := h.generateACLPolicyDestPorts(ports)
|
||||
for innerIndex, dest := range acl.Destinations {
|
||||
dests, err := h.generateACLPolicyDest(machines, *h.aclPolicy, dest, needsWildcard)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing ACL %d, Port %d", index, innerIndex)
|
||||
Msgf("Error parsing ACL %d, Destination %d", index, innerIndex)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
@@ -108,20 +175,28 @@ func (h *Headscale) generateACLRules() ([]tailcfg.FilterRule, error) {
|
||||
rules = append(rules, tailcfg.FilterRule{
|
||||
SrcIPs: srcIPs,
|
||||
DstPorts: destPorts,
|
||||
IPProto: protocols,
|
||||
})
|
||||
}
|
||||
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) generateACLPolicySrcIP(u string) ([]string, error) {
|
||||
return h.expandAlias(u)
|
||||
func (h *Headscale) generateACLPolicySrcIP(
|
||||
machines []Machine,
|
||||
aclPolicy ACLPolicy,
|
||||
src string,
|
||||
) ([]string, error) {
|
||||
return expandAlias(machines, aclPolicy, src, h.cfg.OIDC.StripEmaildomain)
|
||||
}
|
||||
|
||||
func (h *Headscale) generateACLPolicyDestPorts(
|
||||
d string,
|
||||
func (h *Headscale) generateACLPolicyDest(
|
||||
machines []Machine,
|
||||
aclPolicy ACLPolicy,
|
||||
dest string,
|
||||
needsWildcard bool,
|
||||
) ([]tailcfg.NetPortRange, error) {
|
||||
tokens := strings.Split(d, ":")
|
||||
tokens := strings.Split(dest, ":")
|
||||
if len(tokens) < expectedTokenItems || len(tokens) > 3 {
|
||||
return nil, errInvalidPortFormat
|
||||
}
|
||||
@@ -139,11 +214,16 @@ func (h *Headscale) generateACLPolicyDestPorts(
|
||||
alias = fmt.Sprintf("%s:%s", tokens[0], tokens[1])
|
||||
}
|
||||
|
||||
expanded, err := h.expandAlias(alias)
|
||||
expanded, err := expandAlias(
|
||||
machines,
|
||||
aclPolicy,
|
||||
alias,
|
||||
h.cfg.OIDC.StripEmaildomain,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ports, err := h.expandPorts(tokens[len(tokens)-1])
|
||||
ports, err := expandPorts(tokens[len(tokens)-1], needsWildcard)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -162,23 +242,83 @@ func (h *Headscale) generateACLPolicyDestPorts(
|
||||
return dests, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) expandAlias(alias string) ([]string, error) {
|
||||
// parseProtocol reads the proto field of the ACL and generates a list of
|
||||
// protocols that will be allowed, following the IANA IP protocol number
|
||||
// https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
|
||||
//
|
||||
// If the ACL proto field is empty, it allows ICMPv4, ICMPv6, TCP, and UDP,
|
||||
// as per Tailscale behaviour (see tailcfg.FilterRule).
|
||||
//
|
||||
// Also returns a boolean indicating if the protocol
|
||||
// requires all the destinations to use wildcard as port number (only TCP,
|
||||
// UDP and SCTP support specifying ports).
|
||||
func parseProtocol(protocol string) ([]int, bool, error) {
|
||||
switch protocol {
|
||||
case "":
|
||||
return []int{protocolICMP, protocolIPv6ICMP, protocolTCP, protocolUDP}, false, nil
|
||||
case "igmp":
|
||||
return []int{protocolIGMP}, true, nil
|
||||
case "ipv4", "ip-in-ip":
|
||||
return []int{protocolIPv4}, true, nil
|
||||
case "tcp":
|
||||
return []int{protocolTCP}, false, nil
|
||||
case "egp":
|
||||
return []int{protocolEGP}, true, nil
|
||||
case "igp":
|
||||
return []int{protocolIGP}, true, nil
|
||||
case "udp":
|
||||
return []int{protocolUDP}, false, nil
|
||||
case "gre":
|
||||
return []int{protocolGRE}, true, nil
|
||||
case "esp":
|
||||
return []int{protocolESP}, true, nil
|
||||
case "ah":
|
||||
return []int{protocolAH}, true, nil
|
||||
case "sctp":
|
||||
return []int{protocolSCTP}, false, nil
|
||||
case "icmp":
|
||||
return []int{protocolICMP, protocolIPv6ICMP}, true, nil
|
||||
|
||||
default:
|
||||
protocolNumber, err := strconv.Atoi(protocol)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
needsWildcard := protocolNumber != protocolTCP && protocolNumber != protocolUDP && protocolNumber != protocolSCTP
|
||||
|
||||
return []int{protocolNumber}, needsWildcard, nil
|
||||
}
|
||||
}
|
||||
|
||||
// expandalias has an input of either
|
||||
// - a namespace
|
||||
// - a group
|
||||
// - a tag
|
||||
// and transform these in IPAddresses.
|
||||
func expandAlias(
|
||||
machines []Machine,
|
||||
aclPolicy ACLPolicy,
|
||||
alias string,
|
||||
stripEmailDomain bool,
|
||||
) ([]string, error) {
|
||||
ips := []string{}
|
||||
if alias == "*" {
|
||||
return []string{"*"}, nil
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Str("alias", alias).
|
||||
Msg("Expanding")
|
||||
|
||||
if strings.HasPrefix(alias, "group:") {
|
||||
if _, ok := h.aclPolicy.Groups[alias]; !ok {
|
||||
return nil, errInvalidGroup
|
||||
namespaces, err := expandGroup(aclPolicy, alias, stripEmailDomain)
|
||||
if err != nil {
|
||||
return ips, err
|
||||
}
|
||||
ips := []string{}
|
||||
for _, n := range h.aclPolicy.Groups[alias] {
|
||||
nodes, err := h.ListMachinesInNamespace(n)
|
||||
if err != nil {
|
||||
return nil, errInvalidNamespace
|
||||
}
|
||||
for _, n := range namespaces {
|
||||
nodes := filterMachinesByNamespace(machines, n)
|
||||
for _, node := range nodes {
|
||||
ips = append(ips, node.IPAddress)
|
||||
ips = append(ips, node.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -186,36 +326,38 @@ func (h *Headscale) expandAlias(alias string) ([]string, error) {
|
||||
}
|
||||
|
||||
if strings.HasPrefix(alias, "tag:") {
|
||||
if _, ok := h.aclPolicy.TagOwners[alias]; !ok {
|
||||
return nil, errInvalidTag
|
||||
}
|
||||
|
||||
// This will have HORRIBLE performance.
|
||||
// We need to change the data model to better store tags
|
||||
machines := []Machine{}
|
||||
if err := h.db.Where("registered").Find(&machines).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ips := []string{}
|
||||
// check for forced tags
|
||||
for _, machine := range machines {
|
||||
hostinfo := tailcfg.Hostinfo{}
|
||||
if len(machine.HostInfo) != 0 {
|
||||
hi, err := machine.HostInfo.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = json.Unmarshal(hi, &hostinfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if contains(machine.ForcedTags, alias) {
|
||||
ips = append(ips, machine.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
}
|
||||
|
||||
// find tag owners
|
||||
owners, err := expandTagOwners(aclPolicy, alias, stripEmailDomain)
|
||||
if err != nil {
|
||||
if errors.Is(err, errInvalidTag) {
|
||||
if len(ips) == 0 {
|
||||
return ips, fmt.Errorf(
|
||||
"%w. %v isn't owned by a TagOwner and no forced tags are defined",
|
||||
errInvalidTag,
|
||||
alias,
|
||||
)
|
||||
}
|
||||
|
||||
// FIXME: Check TagOwners allows this
|
||||
for _, t := range hostinfo.RequestTags {
|
||||
if alias[4:] == t {
|
||||
ips = append(ips, machine.IPAddress)
|
||||
return ips, nil
|
||||
} else {
|
||||
return ips, err
|
||||
}
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
// filter out machines per tag owner
|
||||
for _, namespace := range owners {
|
||||
machines := filterMachinesByNamespace(machines, namespace)
|
||||
for _, machine := range machines {
|
||||
hi := machine.GetHostInfo()
|
||||
if contains(hi.RequestTags, alias) {
|
||||
ips = append(ips, machine.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -223,44 +365,88 @@ func (h *Headscale) expandAlias(alias string) ([]string, error) {
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
n, err := h.GetNamespace(alias)
|
||||
if err == nil {
|
||||
nodes, err := h.ListMachinesInNamespace(n.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ips := []string{}
|
||||
for _, n := range nodes {
|
||||
ips = append(ips, n.IPAddress)
|
||||
}
|
||||
// if alias is a namespace
|
||||
nodes := filterMachinesByNamespace(machines, alias)
|
||||
nodes = excludeCorrectlyTaggedNodes(aclPolicy, nodes, alias)
|
||||
|
||||
for _, n := range nodes {
|
||||
ips = append(ips, n.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
if len(ips) > 0 {
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
if h, ok := h.aclPolicy.Hosts[alias]; ok {
|
||||
// if alias is an host
|
||||
if h, ok := aclPolicy.Hosts[alias]; ok {
|
||||
return []string{h.String()}, nil
|
||||
}
|
||||
|
||||
// if alias is an IP
|
||||
ip, err := netaddr.ParseIP(alias)
|
||||
if err == nil {
|
||||
return []string{ip.String()}, nil
|
||||
}
|
||||
|
||||
// if alias is an CIDR
|
||||
cidr, err := netaddr.ParseIPPrefix(alias)
|
||||
if err == nil {
|
||||
return []string{cidr.String()}, nil
|
||||
}
|
||||
|
||||
return nil, errInvalidUserSection
|
||||
log.Warn().Msgf("No IPs found with the alias %v", alias)
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) expandPorts(portsStr string) (*[]tailcfg.PortRange, error) {
|
||||
// excludeCorrectlyTaggedNodes will remove from the list of input nodes the ones
|
||||
// that are correctly tagged since they should not be listed as being in the namespace
|
||||
// we assume in this function that we only have nodes from 1 namespace.
|
||||
func excludeCorrectlyTaggedNodes(
|
||||
aclPolicy ACLPolicy,
|
||||
nodes []Machine,
|
||||
namespace string,
|
||||
) []Machine {
|
||||
out := []Machine{}
|
||||
tags := []string{}
|
||||
for tag, ns := range aclPolicy.TagOwners {
|
||||
if contains(ns, namespace) {
|
||||
tags = append(tags, tag)
|
||||
}
|
||||
}
|
||||
// for each machine if tag is in tags list, don't append it.
|
||||
for _, machine := range nodes {
|
||||
hi := machine.GetHostInfo()
|
||||
|
||||
found := false
|
||||
for _, t := range hi.RequestTags {
|
||||
if contains(tags, t) {
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(machine.ForcedTags) > 0 {
|
||||
found = true
|
||||
}
|
||||
if !found {
|
||||
out = append(out, machine)
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func expandPorts(portsStr string, needsWildcard bool) (*[]tailcfg.PortRange, error) {
|
||||
if portsStr == "*" {
|
||||
return &[]tailcfg.PortRange{
|
||||
{First: portRangeBegin, Last: portRangeEnd},
|
||||
}, nil
|
||||
}
|
||||
|
||||
if needsWildcard {
|
||||
return nil, errWildcardIsNeeded
|
||||
}
|
||||
|
||||
ports := []tailcfg.PortRange{}
|
||||
for _, portStr := range strings.Split(portsStr, ",") {
|
||||
rang := strings.Split(portStr, "-")
|
||||
@@ -296,3 +482,82 @@ func (h *Headscale) expandPorts(portsStr string) (*[]tailcfg.PortRange, error) {
|
||||
|
||||
return &ports, nil
|
||||
}
|
||||
|
||||
func filterMachinesByNamespace(machines []Machine, namespace string) []Machine {
|
||||
out := []Machine{}
|
||||
for _, machine := range machines {
|
||||
if machine.Namespace.Name == namespace {
|
||||
out = append(out, machine)
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// expandTagOwners will return a list of namespace. An owner can be either a namespace or a group
|
||||
// a group cannot be composed of groups.
|
||||
func expandTagOwners(
|
||||
aclPolicy ACLPolicy,
|
||||
tag string,
|
||||
stripEmailDomain bool,
|
||||
) ([]string, error) {
|
||||
var owners []string
|
||||
ows, ok := aclPolicy.TagOwners[tag]
|
||||
if !ok {
|
||||
return []string{}, fmt.Errorf(
|
||||
"%w. %v isn't owned by a TagOwner. Please add one first. https://tailscale.com/kb/1018/acls/#tag-owners",
|
||||
errInvalidTag,
|
||||
tag,
|
||||
)
|
||||
}
|
||||
for _, owner := range ows {
|
||||
if strings.HasPrefix(owner, "group:") {
|
||||
gs, err := expandGroup(aclPolicy, owner, stripEmailDomain)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
owners = append(owners, gs...)
|
||||
} else {
|
||||
owners = append(owners, owner)
|
||||
}
|
||||
}
|
||||
|
||||
return owners, nil
|
||||
}
|
||||
|
||||
// expandGroup will return the list of namespace inside the group
|
||||
// after some validation.
|
||||
func expandGroup(
|
||||
aclPolicy ACLPolicy,
|
||||
group string,
|
||||
stripEmailDomain bool,
|
||||
) ([]string, error) {
|
||||
outGroups := []string{}
|
||||
aclGroups, ok := aclPolicy.Groups[group]
|
||||
if !ok {
|
||||
return []string{}, fmt.Errorf(
|
||||
"group %v isn't registered. %w",
|
||||
group,
|
||||
errInvalidGroup,
|
||||
)
|
||||
}
|
||||
for _, group := range aclGroups {
|
||||
if strings.HasPrefix(group, "group:") {
|
||||
return []string{}, fmt.Errorf(
|
||||
"%w. A group cannot be composed of groups. https://tailscale.com/kb/1018/acls/#groups",
|
||||
errInvalidGroup,
|
||||
)
|
||||
}
|
||||
grp, err := NormalizeToFQDNRules(group, stripEmailDomain)
|
||||
if err != nil {
|
||||
return []string{}, fmt.Errorf(
|
||||
"failed to normalize group %q, err: %w",
|
||||
group,
|
||||
errInvalidGroup,
|
||||
)
|
||||
}
|
||||
outGroups = append(outGroups, grp)
|
||||
}
|
||||
|
||||
return outGroups, nil
|
||||
}
|
||||
|
1275
acls_test.go
1275
acls_test.go
File diff suppressed because it is too large
Load Diff
@@ -5,23 +5,25 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/tailscale/hujson"
|
||||
"gopkg.in/yaml.v3"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
|
||||
// ACLPolicy represents a Tailscale ACL Policy.
|
||||
type ACLPolicy struct {
|
||||
Groups Groups `json:"Groups"`
|
||||
Hosts Hosts `json:"Hosts"`
|
||||
TagOwners TagOwners `json:"TagOwners"`
|
||||
ACLs []ACL `json:"ACLs"`
|
||||
Tests []ACLTest `json:"Tests"`
|
||||
Groups Groups `json:"groups" yaml:"groups"`
|
||||
Hosts Hosts `json:"hosts" yaml:"hosts"`
|
||||
TagOwners TagOwners `json:"tagOwners" yaml:"tagOwners"`
|
||||
ACLs []ACL `json:"acls" yaml:"acls"`
|
||||
Tests []ACLTest `json:"tests" yaml:"tests"`
|
||||
}
|
||||
|
||||
// ACL is a basic rule for the ACL Policy.
|
||||
type ACL struct {
|
||||
Action string `json:"Action"`
|
||||
Users []string `json:"Users"`
|
||||
Ports []string `json:"Ports"`
|
||||
Action string `json:"action" yaml:"action"`
|
||||
Protocol string `json:"proto" yaml:"proto"`
|
||||
Sources []string `json:"src" yaml:"src"`
|
||||
Destinations []string `json:"dst" yaml:"dst"`
|
||||
}
|
||||
|
||||
// Groups references a series of alias in the ACL rules.
|
||||
@@ -35,9 +37,9 @@ type TagOwners map[string][]string
|
||||
|
||||
// ACLTest is not implemented, but should be use to check if a certain rule is allowed.
|
||||
type ACLTest struct {
|
||||
User string `json:"User"`
|
||||
Allow []string `json:"Allow"`
|
||||
Deny []string `json:"Deny,omitempty"`
|
||||
Source string `json:"src" yaml:"src"`
|
||||
Accept []string `json:"accept" yaml:"accept"`
|
||||
Deny []string `json:"deny,omitempty" yaml:"deny,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON allows to parse the Hosts directly into netaddr objects.
|
||||
@@ -69,6 +71,27 @@ func (hosts *Hosts) UnmarshalJSON(data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalYAML allows to parse the Hosts directly into netaddr objects.
|
||||
func (hosts *Hosts) UnmarshalYAML(data []byte) error {
|
||||
newHosts := Hosts{}
|
||||
hostIPPrefixMap := make(map[string]string)
|
||||
|
||||
err := yaml.Unmarshal(data, &hostIPPrefixMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for host, prefixStr := range hostIPPrefixMap {
|
||||
prefix, err := netaddr.ParseIPPrefix(prefixStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newHosts[host] = prefix
|
||||
}
|
||||
*hosts = newHosts
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsZero is perhaps a bit naive here.
|
||||
func (policy ACLPolicy) IsZero() bool {
|
||||
if len(policy.Groups) == 0 && len(policy.Hosts) == 0 && len(policy.ACLs) == 0 {
|
||||
|
606
api.go
606
api.go
@@ -1,16 +1,18 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
@@ -20,7 +22,7 @@ import (
|
||||
|
||||
const (
|
||||
reservedResponseHeaderSize = 4
|
||||
RegisterMethodAuthKey = "authKey"
|
||||
RegisterMethodAuthKey = "authkey"
|
||||
RegisterMethodOIDC = "oidc"
|
||||
RegisterMethodCLI = "cli"
|
||||
ErrRegisterMethodCLIDoesNotSupportExpire = Error(
|
||||
@@ -28,51 +30,153 @@ const (
|
||||
)
|
||||
)
|
||||
|
||||
// KeyHandler provides the Headscale pub key
|
||||
// Listens in /key.
|
||||
func (h *Headscale) KeyHandler(ctx *gin.Context) {
|
||||
ctx.Data(
|
||||
http.StatusOK,
|
||||
"text/plain; charset=utf-8",
|
||||
[]byte(MachinePublicKeyStripPrefix(h.privateKey.Public())),
|
||||
)
|
||||
}
|
||||
func (h *Headscale) HealthHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
respond := func(err error) {
|
||||
writer.Header().Set("Content-Type", "application/health+json; charset=utf-8")
|
||||
|
||||
// RegisterWebAPI shows a simple message in the browser to point to the CLI
|
||||
// Listens in /register.
|
||||
func (h *Headscale) RegisterWebAPI(ctx *gin.Context) {
|
||||
machineKeyStr := ctx.Query("key")
|
||||
if machineKeyStr == "" {
|
||||
ctx.String(http.StatusBadRequest, "Wrong params")
|
||||
res := struct {
|
||||
Status string `json:"status"`
|
||||
}{
|
||||
Status: "pass",
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
log.Error().Caller().Err(err).Msg("health check failed")
|
||||
res.Status = "fail"
|
||||
}
|
||||
|
||||
buf, err := json.Marshal(res)
|
||||
if err != nil {
|
||||
log.Error().Caller().Err(err).Msg("marshal failed")
|
||||
}
|
||||
_, err = writer.Write(buf)
|
||||
if err != nil {
|
||||
log.Error().Caller().Err(err).Msg("write failed")
|
||||
}
|
||||
}
|
||||
|
||||
if err := h.pingDB(); err != nil {
|
||||
respond(err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Data(http.StatusOK, "text/html; charset=utf-8", []byte(fmt.Sprintf(`
|
||||
<html>
|
||||
respond(nil)
|
||||
}
|
||||
|
||||
// KeyHandler provides the Headscale pub key
|
||||
// Listens in /key.
|
||||
func (h *Headscale) KeyHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err := writer.Write([]byte(MachinePublicKeyStripPrefix(h.privateKey.Public())))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
type registerWebAPITemplateConfig struct {
|
||||
Key string
|
||||
}
|
||||
|
||||
var registerWebAPITemplate = template.Must(
|
||||
template.New("registerweb").Parse(`
|
||||
<html>
|
||||
<head>
|
||||
<title>Registration - Headscale</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>headscale</h1>
|
||||
<p>
|
||||
Run the command below in the headscale server to add this machine to your network:
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<code>
|
||||
<b>headscale -n NAMESPACE nodes register --key %s</b>
|
||||
</code>
|
||||
</p>
|
||||
|
||||
<h1>headscale</h1>
|
||||
<h2>Machine registration</h2>
|
||||
<p>
|
||||
Run the command below in the headscale server to add this machine to your network:
|
||||
</p>
|
||||
<pre><code>headscale -n NAMESPACE nodes register --key {{.Key}}</code></pre>
|
||||
</body>
|
||||
</html>
|
||||
</html>
|
||||
`))
|
||||
|
||||
`, machineKeyStr)))
|
||||
// RegisterWebAPI shows a simple message in the browser to point to the CLI
|
||||
// Listens in /register.
|
||||
func (h *Headscale) RegisterWebAPI(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
machineKeyStr := req.URL.Query().Get("key")
|
||||
if machineKeyStr == "" {
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusBadRequest)
|
||||
_, err := writer.Write([]byte("Wrong params"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
var content bytes.Buffer
|
||||
if err := registerWebAPITemplate.Execute(&content, registerWebAPITemplateConfig{
|
||||
Key: machineKeyStr,
|
||||
}); err != nil {
|
||||
log.Error().
|
||||
Str("func", "RegisterWebAPI").
|
||||
Err(err).
|
||||
Msg("Could not render register web API template")
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
_, err = writer.Write([]byte("Could not render register web API template"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
writer.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err := writer.Write(content.Bytes())
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
// RegistrationHandler handles the actual registration process of a machine
|
||||
// Endpoint /machine/:id.
|
||||
func (h *Headscale) RegistrationHandler(ctx *gin.Context) {
|
||||
body, _ := io.ReadAll(ctx.Request.Body)
|
||||
machineKeyStr := ctx.Param("id")
|
||||
// Endpoint /machine/:mkey.
|
||||
func (h *Headscale) RegistrationHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
vars := mux.Vars(req)
|
||||
machineKeyStr, ok := vars["mkey"]
|
||||
if !ok || machineKeyStr == "" {
|
||||
log.Error().
|
||||
Str("handler", "RegistrationHandler").
|
||||
Msg("No machine ID in request")
|
||||
http.Error(writer, "No machine ID in request", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
body, _ := io.ReadAll(req.Body)
|
||||
|
||||
var machineKey key.MachinePublic
|
||||
err := machineKey.UnmarshalText([]byte(MachinePublicKeyEnsurePrefix(machineKeyStr)))
|
||||
@@ -82,19 +186,19 @@ func (h *Headscale) RegistrationHandler(ctx *gin.Context) {
|
||||
Err(err).
|
||||
Msg("Cannot parse machine key")
|
||||
machineRegistrations.WithLabelValues("unknown", "web", "error", "unknown").Inc()
|
||||
ctx.String(http.StatusInternalServerError, "Sad!")
|
||||
http.Error(writer, "Cannot parse machine key", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
req := tailcfg.RegisterRequest{}
|
||||
err = decode(body, &req, &machineKey, h.privateKey)
|
||||
registerRequest := tailcfg.RegisterRequest{}
|
||||
err = decode(body, ®isterRequest, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot decode message")
|
||||
machineRegistrations.WithLabelValues("unknown", "web", "error", "unknown").Inc()
|
||||
ctx.String(http.StatusInternalServerError, "Very sad!")
|
||||
http.Error(writer, "Cannot decode message", http.StatusBadRequest)
|
||||
|
||||
return
|
||||
}
|
||||
@@ -102,36 +206,73 @@ func (h *Headscale) RegistrationHandler(ctx *gin.Context) {
|
||||
now := time.Now().UTC()
|
||||
machine, err := h.GetMachineByMachineKey(machineKey)
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
log.Info().Str("machine", req.Hostinfo.Hostname).Msg("New machine")
|
||||
newMachine := Machine{
|
||||
Expiry: &time.Time{},
|
||||
MachineKey: MachinePublicKeyStripPrefix(machineKey),
|
||||
Name: req.Hostinfo.Hostname,
|
||||
}
|
||||
if err := h.db.Create(&newMachine).Error; err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Could not create row")
|
||||
machineRegistrations.WithLabelValues("unknown", "web", "error", machine.Namespace.Name).
|
||||
Inc()
|
||||
log.Info().Str("machine", registerRequest.Hostinfo.Hostname).Msg("New machine")
|
||||
|
||||
machineKeyStr := MachinePublicKeyStripPrefix(machineKey)
|
||||
|
||||
// If the machine has AuthKey set, handle registration via PreAuthKeys
|
||||
if registerRequest.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(writer, req, machineKey, registerRequest)
|
||||
|
||||
return
|
||||
}
|
||||
machine = &newMachine
|
||||
|
||||
givenName, err := h.GenerateGivenName(registerRequest.Hostinfo.Hostname)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "RegistrationHandler").
|
||||
Str("hostinfo.name", registerRequest.Hostinfo.Hostname).
|
||||
Err(err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// The machine did not have a key to authenticate, which means
|
||||
// that we rely on a method that calls back some how (OpenID or CLI)
|
||||
// We create the machine and then keep it around until a callback
|
||||
// happens
|
||||
newMachine := Machine{
|
||||
MachineKey: machineKeyStr,
|
||||
Hostname: registerRequest.Hostinfo.Hostname,
|
||||
GivenName: givenName,
|
||||
NodeKey: NodePublicKeyStripPrefix(registerRequest.NodeKey),
|
||||
LastSeen: &now,
|
||||
Expiry: &time.Time{},
|
||||
}
|
||||
|
||||
if !registerRequest.Expiry.IsZero() {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Time("expiry", registerRequest.Expiry).
|
||||
Msg("Non-zero expiry time requested")
|
||||
newMachine.Expiry = ®isterRequest.Expiry
|
||||
}
|
||||
|
||||
h.registrationCache.Set(
|
||||
machineKeyStr,
|
||||
newMachine,
|
||||
registerCacheExpiration,
|
||||
)
|
||||
|
||||
h.handleMachineRegistrationNew(writer, req, machineKey, registerRequest)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if machine.Registered {
|
||||
// The machine is already registered, so we need to pass through reauth or key update.
|
||||
if machine != nil {
|
||||
// If the NodeKey stored in headscale is the same as the key presented in a registration
|
||||
// request, then we have a node that is either:
|
||||
// - Trying to log out (sending a expiry in the past)
|
||||
// - A valid, registered machine, looking for the node map
|
||||
// - Expired machine wanting to reauthenticate
|
||||
if machine.NodeKey == NodePublicKeyStripPrefix(req.NodeKey) {
|
||||
if machine.NodeKey == NodePublicKeyStripPrefix(registerRequest.NodeKey) {
|
||||
// The client sends an Expiry in the past if the client is requesting to expire the key (aka logout)
|
||||
// https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go#L648
|
||||
if !req.Expiry.IsZero() && req.Expiry.UTC().Before(now) {
|
||||
h.handleMachineLogOut(ctx, machineKey, *machine)
|
||||
if !registerRequest.Expiry.IsZero() && registerRequest.Expiry.UTC().Before(now) {
|
||||
h.handleMachineLogOut(writer, req, machineKey, *machine)
|
||||
|
||||
return
|
||||
}
|
||||
@@ -139,44 +280,35 @@ func (h *Headscale) RegistrationHandler(ctx *gin.Context) {
|
||||
// If machine is not expired, and is register, we have a already accepted this machine,
|
||||
// let it proceed with a valid registration
|
||||
if !machine.isExpired() {
|
||||
h.handleMachineValidRegistration(ctx, machineKey, *machine)
|
||||
h.handleMachineValidRegistration(writer, req, machineKey, *machine)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The NodeKey we have matches OldNodeKey, which means this is a refresh after a key expiration
|
||||
if machine.NodeKey == NodePublicKeyStripPrefix(req.OldNodeKey) &&
|
||||
if machine.NodeKey == NodePublicKeyStripPrefix(registerRequest.OldNodeKey) &&
|
||||
!machine.isExpired() {
|
||||
h.handleMachineRefreshKey(ctx, machineKey, req, *machine)
|
||||
h.handleMachineRefreshKey(writer, req, machineKey, registerRequest, *machine)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// The machine has expired
|
||||
h.handleMachineExpired(ctx, machineKey, req, *machine)
|
||||
h.handleMachineExpired(writer, req, machineKey, registerRequest, *machine)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// If the machine has AuthKey set, handle registration via PreAuthKeys
|
||||
if req.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(ctx, machineKey, req, *machine)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
h.handleMachineRegistrationNew(ctx, machineKey, req, *machine)
|
||||
}
|
||||
|
||||
func (h *Headscale) getMapResponse(
|
||||
machineKey key.MachinePublic,
|
||||
req tailcfg.MapRequest,
|
||||
mapRequest tailcfg.MapRequest,
|
||||
machine *Machine,
|
||||
) ([]byte, error) {
|
||||
log.Trace().
|
||||
Str("func", "getMapResponse").
|
||||
Str("machine", req.Hostinfo.Hostname).
|
||||
Str("machine", mapRequest.Hostinfo.Hostname).
|
||||
Msg("Creating Map response")
|
||||
node, err := machine.toNode(h.cfg.BaseDomain, h.cfg.DNSConfig, true)
|
||||
if err != nil {
|
||||
@@ -229,17 +361,30 @@ func (h *Headscale) getMapResponse(
|
||||
PacketFilter: h.aclRules,
|
||||
DERPMap: h.DERPMap,
|
||||
UserProfiles: profiles,
|
||||
Debug: &tailcfg.Debug{
|
||||
DisableLogTail: !h.cfg.LogTail.Enabled,
|
||||
RandomizeClientPort: h.cfg.RandomizeClientPort,
|
||||
},
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("func", "getMapResponse").
|
||||
Str("machine", req.Hostinfo.Hostname).
|
||||
Str("machine", mapRequest.Hostinfo.Hostname).
|
||||
// Interface("payload", resp).
|
||||
Msgf("Generated map response: %s", tailMapResponseToString(resp))
|
||||
|
||||
var respBody []byte
|
||||
if req.Compress == "zstd" {
|
||||
src, _ := json.Marshal(resp)
|
||||
if mapRequest.Compress == "zstd" {
|
||||
src, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "getMapResponse").
|
||||
Err(err).
|
||||
Msg("Failed to marshal response for the client")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
encoder, _ := zstd.NewWriter(nil)
|
||||
srcCompressed := encoder.EncodeAll(src, nil)
|
||||
@@ -268,7 +413,16 @@ func (h *Headscale) getMapKeepAliveResponse(
|
||||
var respBody []byte
|
||||
var err error
|
||||
if mapRequest.Compress == "zstd" {
|
||||
src, _ := json.Marshal(mapResponse)
|
||||
src, err := json.Marshal(mapResponse)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "getMapKeepAliveResponse").
|
||||
Err(err).
|
||||
Msg("Failed to marshal keepalive response for the client")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
encoder, _ := zstd.NewWriter(nil)
|
||||
srcCompressed := encoder.EncodeAll(src, nil)
|
||||
respBody = h.privateKey.SealTo(machineKey, srcCompressed)
|
||||
@@ -286,17 +440,28 @@ func (h *Headscale) getMapKeepAliveResponse(
|
||||
}
|
||||
|
||||
func (h *Headscale) handleMachineLogOut(
|
||||
ctx *gin.Context,
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
machineKey key.MachinePublic,
|
||||
machine Machine,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
log.Info().
|
||||
Str("machine", machine.Name).
|
||||
Str("machine", machine.Hostname).
|
||||
Msg("Client requested logout")
|
||||
|
||||
h.ExpireMachine(&machine)
|
||||
err := h.ExpireMachine(&machine)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "handleMachineLogOut").
|
||||
Err(err).
|
||||
Msg("Failed to expire machine")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.MachineAuthorized = false
|
||||
@@ -307,15 +472,25 @@ func (h *Headscale) handleMachineLogOut(
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
ctx.String(http.StatusInternalServerError, "")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
ctx.Data(http.StatusOK, "application/json; charset=utf-8", respBody)
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) handleMachineValidRegistration(
|
||||
ctx *gin.Context,
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
machineKey key.MachinePublic,
|
||||
machine Machine,
|
||||
) {
|
||||
@@ -323,7 +498,7 @@ func (h *Headscale) handleMachineValidRegistration(
|
||||
|
||||
// The machine registration is valid, respond with redirect to /map
|
||||
log.Debug().
|
||||
Str("machine", machine.Name).
|
||||
Str("machine", machine.Hostname).
|
||||
Msg("Client is registered and we have the current NodeKey. All clear to /map")
|
||||
|
||||
resp.AuthURL = ""
|
||||
@@ -339,17 +514,27 @@ func (h *Headscale) handleMachineValidRegistration(
|
||||
Msg("Cannot encode message")
|
||||
machineRegistrations.WithLabelValues("update", "web", "error", machine.Namespace.Name).
|
||||
Inc()
|
||||
ctx.String(http.StatusInternalServerError, "")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
machineRegistrations.WithLabelValues("update", "web", "success", machine.Namespace.Name).
|
||||
Inc()
|
||||
ctx.Data(http.StatusOK, "application/json; charset=utf-8", respBody)
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) handleMachineExpired(
|
||||
ctx *gin.Context,
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
machineKey key.MachinePublic,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machine Machine,
|
||||
@@ -358,11 +543,11 @@ func (h *Headscale) handleMachineExpired(
|
||||
|
||||
// The client has registered before, but has expired
|
||||
log.Debug().
|
||||
Str("machine", machine.Name).
|
||||
Str("machine", machine.Hostname).
|
||||
Msg("Machine registration has expired. Sending a authurl to register")
|
||||
|
||||
if registerRequest.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(ctx, machineKey, registerRequest, machine)
|
||||
h.handleAuthKey(writer, req, machineKey, registerRequest)
|
||||
|
||||
return
|
||||
}
|
||||
@@ -383,17 +568,27 @@ func (h *Headscale) handleMachineExpired(
|
||||
Msg("Cannot encode message")
|
||||
machineRegistrations.WithLabelValues("reauth", "web", "error", machine.Namespace.Name).
|
||||
Inc()
|
||||
ctx.String(http.StatusInternalServerError, "")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
machineRegistrations.WithLabelValues("reauth", "web", "success", machine.Namespace.Name).
|
||||
Inc()
|
||||
ctx.Data(http.StatusOK, "application/json; charset=utf-8", respBody)
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) handleMachineRefreshKey(
|
||||
ctx *gin.Context,
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
machineKey key.MachinePublic,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machine Machine,
|
||||
@@ -401,10 +596,19 @@ func (h *Headscale) handleMachineRefreshKey(
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
log.Debug().
|
||||
Str("machine", machine.Name).
|
||||
Str("machine", machine.Hostname).
|
||||
Msg("We have the OldNodeKey in the database. This is a key refresh")
|
||||
machine.NodeKey = NodePublicKeyStripPrefix(registerRequest.NodeKey)
|
||||
h.db.Save(&machine)
|
||||
|
||||
if err := h.db.Save(&machine).Error; err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to update machine key in the database")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.User = *machine.Namespace.toUser()
|
||||
@@ -414,24 +618,33 @@ func (h *Headscale) handleMachineRefreshKey(
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
ctx.String(http.StatusInternalServerError, "Extremely sad!")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
ctx.Data(http.StatusOK, "application/json; charset=utf-8", respBody)
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) handleMachineRegistrationNew(
|
||||
ctx *gin.Context,
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
machineKey key.MachinePublic,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machine Machine,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
// The machine registration is new, redirect the client to the registration URL
|
||||
log.Debug().
|
||||
Str("machine", machine.Name).
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Msg("The node is sending us a new NodeKey, sending auth url")
|
||||
if h.cfg.OIDC.Issuer != "" {
|
||||
resp.AuthURL = fmt.Sprintf(
|
||||
@@ -444,54 +657,49 @@ func (h *Headscale) handleMachineRegistrationNew(
|
||||
strings.TrimSuffix(h.cfg.ServerURL, "/"), MachinePublicKeyStripPrefix(machineKey))
|
||||
}
|
||||
|
||||
if !registerRequest.Expiry.IsZero() {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("machine", machine.Name).
|
||||
Time("expiry", registerRequest.Expiry).
|
||||
Msg("Non-zero expiry time requested, adding to cache")
|
||||
h.requestedExpiryCache.Set(
|
||||
machineKey.String(),
|
||||
registerRequest.Expiry,
|
||||
requestedExpiryCacheExpiration,
|
||||
)
|
||||
}
|
||||
|
||||
machine.NodeKey = NodePublicKeyStripPrefix(registerRequest.NodeKey)
|
||||
|
||||
// save the NodeKey
|
||||
h.db.Save(&machine)
|
||||
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
ctx.String(http.StatusInternalServerError, "")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
ctx.Data(http.StatusOK, "application/json; charset=utf-8", respBody)
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: check if any locks are needed around IP allocation.
|
||||
func (h *Headscale) handleAuthKey(
|
||||
ctx *gin.Context,
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
machineKey key.MachinePublic,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machine Machine,
|
||||
) {
|
||||
machineKeyStr := MachinePublicKeyStripPrefix(machineKey)
|
||||
|
||||
log.Debug().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Msgf("Processing auth key for %s", registerRequest.Hostinfo.Hostname)
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
pak, err := h.checkKeyValidity(registerRequest.Auth.AuthKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", machine.Name).
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed authentication via AuthKey")
|
||||
resp.MachineAuthorized = false
|
||||
@@ -500,71 +708,126 @@ func (h *Headscale) handleAuthKey(
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", machine.Name).
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
ctx.String(http.StatusInternalServerError, "")
|
||||
machineRegistrations.WithLabelValues("new", "authkey", "error", machine.Namespace.Name).
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
machineRegistrations.WithLabelValues("new", RegisterMethodAuthKey, "error", pak.Namespace.Name).
|
||||
Inc()
|
||||
|
||||
return
|
||||
}
|
||||
ctx.Data(http.StatusUnauthorized, "application/json; charset=utf-8", respBody)
|
||||
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusUnauthorized)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", machine.Name).
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Msg("Failed authentication via AuthKey")
|
||||
machineRegistrations.WithLabelValues("new", "authkey", "error", machine.Namespace.Name).
|
||||
Inc()
|
||||
|
||||
if pak != nil {
|
||||
machineRegistrations.WithLabelValues("new", RegisterMethodAuthKey, "error", pak.Namespace.Name).
|
||||
Inc()
|
||||
} else {
|
||||
machineRegistrations.WithLabelValues("new", RegisterMethodAuthKey, "error", "unknown").Inc()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if machine.isRegistered() {
|
||||
log.Debug().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Msg("Authentication key was valid, proceeding to acquire IP addresses")
|
||||
|
||||
nodeKey := NodePublicKeyStripPrefix(registerRequest.NodeKey)
|
||||
|
||||
// retrieve machine information if it exist
|
||||
// The error is not important, because if it does not
|
||||
// exist, then this is a new machine and we will move
|
||||
// on to registration.
|
||||
machine, _ := h.GetMachineByMachineKey(machineKey)
|
||||
if machine != nil {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("machine", machine.Name).
|
||||
Msg("machine already registered, reauthenticating")
|
||||
Str("machine", machine.Hostname).
|
||||
Msg("machine already registered, refreshing with new auth key")
|
||||
|
||||
h.RefreshMachine(&machine, registerRequest.Expiry)
|
||||
} else {
|
||||
log.Debug().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", machine.Name).
|
||||
Msg("Authentication key was valid, proceeding to acquire an IP address")
|
||||
ip, err := h.getAvailableIP()
|
||||
machine.NodeKey = nodeKey
|
||||
machine.AuthKeyID = uint(pak.ID)
|
||||
err := h.RefreshMachine(machine, registerRequest.Expiry)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", machine.Name).
|
||||
Msg("Failed to find an available IP")
|
||||
machineRegistrations.WithLabelValues("new", "authkey", "error", machine.Namespace.Name).
|
||||
Inc()
|
||||
Str("machine", machine.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed to refresh machine")
|
||||
|
||||
return
|
||||
}
|
||||
log.Info().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", machine.Name).
|
||||
Str("ip", ip.String()).
|
||||
Msgf("Assigning %s to %s", ip, machine.Name)
|
||||
} else {
|
||||
now := time.Now().UTC()
|
||||
|
||||
machine.Expiry = ®isterRequest.Expiry
|
||||
machine.AuthKeyID = uint(pak.ID)
|
||||
machine.IPAddress = ip.String()
|
||||
machine.NamespaceID = pak.NamespaceID
|
||||
givenName, err := h.GenerateGivenName(registerRequest.Hostinfo.Hostname)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "RegistrationHandler").
|
||||
Str("hostinfo.name", registerRequest.Hostinfo.Hostname).
|
||||
Err(err)
|
||||
|
||||
machine.NodeKey = NodePublicKeyStripPrefix(registerRequest.NodeKey)
|
||||
// we update it just in case
|
||||
machine.Registered = true
|
||||
machine.RegisterMethod = RegisterMethodAuthKey
|
||||
h.db.Save(&machine)
|
||||
return
|
||||
}
|
||||
|
||||
machineToRegister := Machine{
|
||||
Hostname: registerRequest.Hostinfo.Hostname,
|
||||
GivenName: givenName,
|
||||
NamespaceID: pak.Namespace.ID,
|
||||
MachineKey: machineKeyStr,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
Expiry: ®isterRequest.Expiry,
|
||||
NodeKey: nodeKey,
|
||||
LastSeen: &now,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
}
|
||||
|
||||
machine, err = h.RegisterMachine(
|
||||
machineToRegister,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("could not register machine")
|
||||
machineRegistrations.WithLabelValues("new", RegisterMethodAuthKey, "error", pak.Namespace.Name).
|
||||
Inc()
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
pak.Used = true
|
||||
h.db.Save(&pak)
|
||||
err = h.UsePreAuthKey(pak)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to use pre-auth key")
|
||||
machineRegistrations.WithLabelValues("new", RegisterMethodAuthKey, "error", pak.Namespace.Name).
|
||||
Inc()
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
resp.MachineAuthorized = true
|
||||
resp.User = *pak.Namespace.toUser()
|
||||
@@ -573,21 +836,30 @@ func (h *Headscale) handleAuthKey(
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", machine.Name).
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
machineRegistrations.WithLabelValues("new", "authkey", "error", machine.Namespace.Name).
|
||||
machineRegistrations.WithLabelValues("new", RegisterMethodAuthKey, "error", pak.Namespace.Name).
|
||||
Inc()
|
||||
ctx.String(http.StatusInternalServerError, "Extremely sad!")
|
||||
http.Error(writer, "Internal server error", http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
machineRegistrations.WithLabelValues("new", "authkey", "success", machine.Namespace.Name).
|
||||
machineRegistrations.WithLabelValues("new", RegisterMethodAuthKey, "success", pak.Namespace.Name).
|
||||
Inc()
|
||||
ctx.Data(http.StatusOK, "application/json; charset=utf-8", respBody)
|
||||
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
_, err = writer.Write(respBody)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
log.Info().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", machine.Name).
|
||||
Str("ip", machine.IPAddress).
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Str("ips", strings.Join(machine.IPAddresses.ToStringSlice(), ", ")).
|
||||
Msg("Successfully authenticated via AuthKey")
|
||||
}
|
||||
|
157
api_key.go
Normal file
157
api_key.go
Normal file
@@ -0,0 +1,157 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const (
|
||||
apiPrefixLength = 7
|
||||
apiKeyLength = 32
|
||||
|
||||
errAPIKeyFailedToParse = Error("Failed to parse ApiKey")
|
||||
)
|
||||
|
||||
// APIKey describes the datamodel for API keys used to remotely authenticate with
|
||||
// headscale.
|
||||
type APIKey struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
Prefix string `gorm:"uniqueIndex"`
|
||||
Hash []byte
|
||||
|
||||
CreatedAt *time.Time
|
||||
Expiration *time.Time
|
||||
LastSeen *time.Time
|
||||
}
|
||||
|
||||
// CreateAPIKey creates a new ApiKey in a namespace, and returns it.
|
||||
func (h *Headscale) CreateAPIKey(
|
||||
expiration *time.Time,
|
||||
) (string, *APIKey, error) {
|
||||
prefix, err := GenerateRandomStringURLSafe(apiPrefixLength)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
toBeHashed, err := GenerateRandomStringURLSafe(apiKeyLength)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// Key to return to user, this will only be visible _once_
|
||||
keyStr := prefix + "." + toBeHashed
|
||||
|
||||
hash, err := bcrypt.GenerateFromPassword([]byte(toBeHashed), bcrypt.DefaultCost)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
key := APIKey{
|
||||
Prefix: prefix,
|
||||
Hash: hash,
|
||||
Expiration: expiration,
|
||||
}
|
||||
|
||||
if err := h.db.Save(&key).Error; err != nil {
|
||||
return "", nil, fmt.Errorf("failed to save API key to database: %w", err)
|
||||
}
|
||||
|
||||
return keyStr, &key, nil
|
||||
}
|
||||
|
||||
// ListAPIKeys returns the list of ApiKeys for a namespace.
|
||||
func (h *Headscale) ListAPIKeys() ([]APIKey, error) {
|
||||
keys := []APIKey{}
|
||||
if err := h.db.Find(&keys).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// GetAPIKey returns a ApiKey for a given key.
|
||||
func (h *Headscale) GetAPIKey(prefix string) (*APIKey, error) {
|
||||
key := APIKey{}
|
||||
if result := h.db.First(&key, "prefix = ?", prefix); result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
return &key, nil
|
||||
}
|
||||
|
||||
// GetAPIKeyByID returns a ApiKey for a given id.
|
||||
func (h *Headscale) GetAPIKeyByID(id uint64) (*APIKey, error) {
|
||||
key := APIKey{}
|
||||
if result := h.db.Find(&APIKey{ID: id}).First(&key); result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
return &key, nil
|
||||
}
|
||||
|
||||
// DestroyAPIKey destroys a ApiKey. Returns error if the ApiKey
|
||||
// does not exist.
|
||||
func (h *Headscale) DestroyAPIKey(key APIKey) error {
|
||||
if result := h.db.Unscoped().Delete(key); result.Error != nil {
|
||||
return result.Error
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExpireAPIKey marks a ApiKey as expired.
|
||||
func (h *Headscale) ExpireAPIKey(key *APIKey) error {
|
||||
if err := h.db.Model(&key).Update("Expiration", time.Now()).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Headscale) ValidateAPIKey(keyStr string) (bool, error) {
|
||||
prefix, hash, found := strings.Cut(keyStr, ".")
|
||||
if !found {
|
||||
return false, errAPIKeyFailedToParse
|
||||
}
|
||||
|
||||
key, err := h.GetAPIKey(prefix)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to validate api key: %w", err)
|
||||
}
|
||||
|
||||
if key.Expiration.Before(time.Now()) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err := bcrypt.CompareHashAndPassword(key.Hash, []byte(hash)); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (key *APIKey) toProto() *v1.ApiKey {
|
||||
protoKey := v1.ApiKey{
|
||||
Id: key.ID,
|
||||
Prefix: key.Prefix,
|
||||
}
|
||||
|
||||
if key.Expiration != nil {
|
||||
protoKey.Expiration = timestamppb.New(*key.Expiration)
|
||||
}
|
||||
|
||||
if key.CreatedAt != nil {
|
||||
protoKey.CreatedAt = timestamppb.New(*key.CreatedAt)
|
||||
}
|
||||
|
||||
if key.LastSeen != nil {
|
||||
protoKey.LastSeen = timestamppb.New(*key.LastSeen)
|
||||
}
|
||||
|
||||
return &protoKey
|
||||
}
|
89
api_key_test.go
Normal file
89
api_key_test.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
func (*Suite) TestCreateAPIKey(c *check.C) {
|
||||
apiKeyStr, apiKey, err := app.CreateAPIKey(nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(apiKey, check.NotNil)
|
||||
|
||||
// Did we get a valid key?
|
||||
c.Assert(apiKey.Prefix, check.NotNil)
|
||||
c.Assert(apiKey.Hash, check.NotNil)
|
||||
c.Assert(apiKeyStr, check.Not(check.Equals), "")
|
||||
|
||||
_, err = app.ListAPIKeys()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
keys, err := app.ListAPIKeys()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(keys), check.Equals, 1)
|
||||
}
|
||||
|
||||
func (*Suite) TestAPIKeyDoesNotExist(c *check.C) {
|
||||
key, err := app.GetAPIKey("does-not-exist")
|
||||
c.Assert(err, check.NotNil)
|
||||
c.Assert(key, check.IsNil)
|
||||
}
|
||||
|
||||
func (*Suite) TestValidateAPIKeyOk(c *check.C) {
|
||||
nowPlus2 := time.Now().Add(2 * time.Hour)
|
||||
apiKeyStr, apiKey, err := app.CreateAPIKey(&nowPlus2)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(apiKey, check.NotNil)
|
||||
|
||||
valid, err := app.ValidateAPIKey(apiKeyStr)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(valid, check.Equals, true)
|
||||
}
|
||||
|
||||
func (*Suite) TestValidateAPIKeyNotOk(c *check.C) {
|
||||
nowMinus2 := time.Now().Add(time.Duration(-2) * time.Hour)
|
||||
apiKeyStr, apiKey, err := app.CreateAPIKey(&nowMinus2)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(apiKey, check.NotNil)
|
||||
|
||||
valid, err := app.ValidateAPIKey(apiKeyStr)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(valid, check.Equals, false)
|
||||
|
||||
now := time.Now()
|
||||
apiKeyStrNow, apiKey, err := app.CreateAPIKey(&now)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(apiKey, check.NotNil)
|
||||
|
||||
validNow, err := app.ValidateAPIKey(apiKeyStrNow)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(validNow, check.Equals, false)
|
||||
|
||||
validSilly, err := app.ValidateAPIKey("nota.validkey")
|
||||
c.Assert(err, check.NotNil)
|
||||
c.Assert(validSilly, check.Equals, false)
|
||||
|
||||
validWithErr, err := app.ValidateAPIKey("produceerrorkey")
|
||||
c.Assert(err, check.NotNil)
|
||||
c.Assert(validWithErr, check.Equals, false)
|
||||
}
|
||||
|
||||
func (*Suite) TestExpireAPIKey(c *check.C) {
|
||||
nowPlus2 := time.Now().Add(2 * time.Hour)
|
||||
apiKeyStr, apiKey, err := app.CreateAPIKey(&nowPlus2)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(apiKey, check.NotNil)
|
||||
|
||||
valid, err := app.ValidateAPIKey(apiKeyStr)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(valid, check.Equals, true)
|
||||
|
||||
err = app.ExpireAPIKey(apiKey)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(apiKey.Expiration, check.NotNil)
|
||||
|
||||
notValid, err := app.ValidateAPIKey(apiKeyStr)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(notValid, check.Equals, false)
|
||||
}
|
28
app_test.go
28
app_test.go
@@ -5,7 +5,6 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"gopkg.in/check.v1"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
@@ -41,17 +40,15 @@ func (s *Suite) ResetDB(c *check.C) {
|
||||
c.Fatal(err)
|
||||
}
|
||||
cfg := Config{
|
||||
IPPrefix: netaddr.MustParseIPPrefix("10.27.0.0/23"),
|
||||
IPPrefixes: []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("10.27.0.0/23"),
|
||||
},
|
||||
}
|
||||
|
||||
app = Headscale{
|
||||
cfg: cfg,
|
||||
cfg: &cfg,
|
||||
dbType: "sqlite3",
|
||||
dbString: tmpDir + "/headscale_test.db",
|
||||
requestedExpiryCache: cache.New(
|
||||
requestedExpiryCacheExpiration,
|
||||
requestedExpiryCacheCleanupInterval,
|
||||
),
|
||||
}
|
||||
err = app.initDB()
|
||||
if err != nil {
|
||||
@@ -63,3 +60,20 @@ func (s *Suite) ResetDB(c *check.C) {
|
||||
}
|
||||
app.db = db
|
||||
}
|
||||
|
||||
// Enusre an error is returned when an invalid auth mode
|
||||
// is supplied.
|
||||
func (s *Suite) TestInvalidClientAuthMode(c *check.C) {
|
||||
_, isValid := LookupTLSClientAuthMode("invalid")
|
||||
c.Assert(isValid, check.Equals, false)
|
||||
}
|
||||
|
||||
// Ensure that all client auth modes return a nil error.
|
||||
func (s *Suite) TestAuthModes(c *check.C) {
|
||||
modes := []string{"disabled", "relaxed", "enforced"}
|
||||
|
||||
for _, v := range modes {
|
||||
_, isValid := LookupTLSClientAuthMode(v)
|
||||
c.Assert(isValid, check.Equals, true)
|
||||
}
|
||||
}
|
||||
|
@@ -1,266 +0,0 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"text/template"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/gofrs/uuid"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
// AppleMobileConfig shows a simple message in the browser to point to the CLI
|
||||
// Listens in /register.
|
||||
func (h *Headscale) AppleMobileConfig(ctx *gin.Context) {
|
||||
appleTemplate := template.Must(template.New("apple").Parse(`
|
||||
<html>
|
||||
<body>
|
||||
<h1>Apple configuration profiles</h1>
|
||||
<p>
|
||||
This page provides <a href="https://support.apple.com/guide/mdm/mdm-overview-mdmbf9e668/web">configuration profiles</a> for the official Tailscale clients for <a href="https://apps.apple.com/us/app/tailscale/id1470499037?ls=1">iOS</a> and <a href="https://apps.apple.com/ca/app/tailscale/id1475387142?mt=12">macOS</a>.
|
||||
</p>
|
||||
<p>
|
||||
The profiles will configure Tailscale.app to use {{.Url}} as its control server.
|
||||
</p>
|
||||
|
||||
<h3>Caution</h3>
|
||||
<p>You should always inspect the profile before installing it:</p>
|
||||
<!--
|
||||
<p><code>curl {{.Url}}/apple/ios</code></p>
|
||||
-->
|
||||
<p><code>curl {{.Url}}/apple/macos</code></p>
|
||||
|
||||
<h2>Profiles</h2>
|
||||
|
||||
<!--
|
||||
<h3>iOS</h3>
|
||||
<p>
|
||||
<a href="/apple/ios" download="headscale_ios.mobileconfig">iOS profile</a>
|
||||
</p>
|
||||
-->
|
||||
|
||||
<h3>macOS</h3>
|
||||
<p>Headscale can be set to the default server by installing a Headscale configuration profile:</p>
|
||||
<p>
|
||||
<a href="/apple/macos" download="headscale_macos.mobileconfig">macOS profile</a>
|
||||
</p>
|
||||
|
||||
<ol>
|
||||
<li>Download the profile, then open it. When it has been opened, there should be a notification that a profile can be installed</li>
|
||||
<li>Open System Preferences and go to "Profiles"</li>
|
||||
<li>Find and install the Headscale profile</li>
|
||||
<li>Restart Tailscale.app and log in</li>
|
||||
</ol>
|
||||
|
||||
<p>Or</p>
|
||||
<p>Use your terminal to configure the default setting for Tailscale by issuing:</p>
|
||||
<code>defaults write io.tailscale.ipn.macos ControlURL {{.URL}}</code>
|
||||
|
||||
<p>Restart Tailscale.app and log in.</p>
|
||||
|
||||
</body>
|
||||
</html>`))
|
||||
|
||||
config := map[string]interface{}{
|
||||
"URL": h.cfg.ServerURL,
|
||||
}
|
||||
|
||||
var payload bytes.Buffer
|
||||
if err := appleTemplate.Execute(&payload, config); err != nil {
|
||||
log.Error().
|
||||
Str("handler", "AppleMobileConfig").
|
||||
Err(err).
|
||||
Msg("Could not render Apple index template")
|
||||
ctx.Data(
|
||||
http.StatusInternalServerError,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Could not render Apple index template"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Data(http.StatusOK, "text/html; charset=utf-8", payload.Bytes())
|
||||
}
|
||||
|
||||
func (h *Headscale) ApplePlatformConfig(ctx *gin.Context) {
|
||||
platform := ctx.Param("platform")
|
||||
|
||||
id, err := uuid.NewV4()
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "ApplePlatformConfig").
|
||||
Err(err).
|
||||
Msg("Failed not create UUID")
|
||||
ctx.Data(
|
||||
http.StatusInternalServerError,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Failed to create UUID"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
contentID, err := uuid.NewV4()
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "ApplePlatformConfig").
|
||||
Err(err).
|
||||
Msg("Failed not create UUID")
|
||||
ctx.Data(
|
||||
http.StatusInternalServerError,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Failed to create UUID"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
platformConfig := AppleMobilePlatformConfig{
|
||||
UUID: contentID,
|
||||
URL: h.cfg.ServerURL,
|
||||
}
|
||||
|
||||
var payload bytes.Buffer
|
||||
|
||||
switch platform {
|
||||
case "macos":
|
||||
if err := macosTemplate.Execute(&payload, platformConfig); err != nil {
|
||||
log.Error().
|
||||
Str("handler", "ApplePlatformConfig").
|
||||
Err(err).
|
||||
Msg("Could not render Apple macOS template")
|
||||
ctx.Data(
|
||||
http.StatusInternalServerError,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Could not render Apple macOS template"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
case "ios":
|
||||
if err := iosTemplate.Execute(&payload, platformConfig); err != nil {
|
||||
log.Error().
|
||||
Str("handler", "ApplePlatformConfig").
|
||||
Err(err).
|
||||
Msg("Could not render Apple iOS template")
|
||||
ctx.Data(
|
||||
http.StatusInternalServerError,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Could not render Apple iOS template"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
default:
|
||||
ctx.Data(
|
||||
http.StatusOK,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Invalid platform, only ios and macos is supported"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
config := AppleMobileConfig{
|
||||
UUID: id,
|
||||
URL: h.cfg.ServerURL,
|
||||
Payload: payload.String(),
|
||||
}
|
||||
|
||||
var content bytes.Buffer
|
||||
if err := commonTemplate.Execute(&content, config); err != nil {
|
||||
log.Error().
|
||||
Str("handler", "ApplePlatformConfig").
|
||||
Err(err).
|
||||
Msg("Could not render Apple platform template")
|
||||
ctx.Data(
|
||||
http.StatusInternalServerError,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Could not render Apple platform template"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Data(
|
||||
http.StatusOK,
|
||||
"application/x-apple-aspen-config; charset=utf-8",
|
||||
content.Bytes(),
|
||||
)
|
||||
}
|
||||
|
||||
type AppleMobileConfig struct {
|
||||
UUID uuid.UUID
|
||||
URL string
|
||||
Payload string
|
||||
}
|
||||
|
||||
type AppleMobilePlatformConfig struct {
|
||||
UUID uuid.UUID
|
||||
URL string
|
||||
}
|
||||
|
||||
var commonTemplate = template.Must(
|
||||
template.New("mobileconfig").Parse(`<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>PayloadUUID</key>
|
||||
<string>{{.UUID}}</string>
|
||||
<key>PayloadDisplayName</key>
|
||||
<string>Headscale</string>
|
||||
<key>PayloadDescription</key>
|
||||
<string>Configure Tailscale login server to: {{.URL}}</string>
|
||||
<key>PayloadIdentifier</key>
|
||||
<string>com.github.juanfont.headscale</string>
|
||||
<key>PayloadRemovalDisallowed</key>
|
||||
<false/>
|
||||
<key>PayloadType</key>
|
||||
<string>Configuration</string>
|
||||
<key>PayloadVersion</key>
|
||||
<integer>1</integer>
|
||||
<key>PayloadContent</key>
|
||||
<array>
|
||||
{{.Payload}}
|
||||
</array>
|
||||
</dict>
|
||||
</plist>`),
|
||||
)
|
||||
|
||||
var iosTemplate = template.Must(template.New("iosTemplate").Parse(`
|
||||
<dict>
|
||||
<key>PayloadType</key>
|
||||
<string>io.tailscale.ipn.ios</string>
|
||||
<key>PayloadUUID</key>
|
||||
<string>{{.UUID}}</string>
|
||||
<key>PayloadIdentifier</key>
|
||||
<string>com.github.juanfont.headscale</string>
|
||||
<key>PayloadVersion</key>
|
||||
<integer>1</integer>
|
||||
<key>PayloadEnabled</key>
|
||||
<true/>
|
||||
|
||||
<key>ControlURL</key>
|
||||
<string>{{.URL}}</string>
|
||||
</dict>
|
||||
`))
|
||||
|
||||
var macosTemplate = template.Must(template.New("macosTemplate").Parse(`
|
||||
<dict>
|
||||
<key>PayloadType</key>
|
||||
<string>io.tailscale.ipn.macos</string>
|
||||
<key>PayloadUUID</key>
|
||||
<string>{{.UUID}}</string>
|
||||
<key>PayloadIdentifier</key>
|
||||
<string>com.github.juanfont.headscale</string>
|
||||
<key>PayloadVersion</key>
|
||||
<integer>1</integer>
|
||||
<key>PayloadEnabled</key>
|
||||
<true/>
|
||||
|
||||
<key>ControlURL</key>
|
||||
<string>{{.URL}}</string>
|
||||
</dict>
|
||||
`))
|
39
cli_test.go
39
cli_test.go
@@ -1,39 +0,0 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
func (s *Suite) TestRegisterMachine(c *check.C) {
|
||||
namespace, err := app.CreateNamespace("test")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
machine := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "8ce002a935f8c394e55e78fbbb410576575ff8ec5cfa2e627e4b807f1be15b0e",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
NamespaceID: namespace.ID,
|
||||
IPAddress: "10.0.0.1",
|
||||
Expiry: &now,
|
||||
}
|
||||
app.db.Save(&machine)
|
||||
|
||||
_, err = app.GetMachine("test", "testmachine")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineAfterRegistering, err := app.RegisterMachine(
|
||||
"8ce002a935f8c394e55e78fbbb410576575ff8ec5cfa2e627e4b807f1be15b0e",
|
||||
namespace.Name,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(machineAfterRegistering.Registered, check.Equals, true)
|
||||
|
||||
_, err = machineAfterRegistering.GetHostInfo()
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
199
cmd/headscale/cli/api_key.go
Normal file
199
cmd/headscale/cli/api_key.go
Normal file
@@ -0,0 +1,199 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const (
|
||||
// 90 days.
|
||||
DefaultAPIKeyExpiry = "90d"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(apiKeysCmd)
|
||||
apiKeysCmd.AddCommand(listAPIKeys)
|
||||
|
||||
createAPIKeyCmd.Flags().
|
||||
StringP("expiration", "e", DefaultAPIKeyExpiry, "Human-readable expiration of the key (e.g. 30m, 24h)")
|
||||
|
||||
apiKeysCmd.AddCommand(createAPIKeyCmd)
|
||||
|
||||
expireAPIKeyCmd.Flags().StringP("prefix", "p", "", "ApiKey prefix")
|
||||
err := expireAPIKeyCmd.MarkFlagRequired("prefix")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
apiKeysCmd.AddCommand(expireAPIKeyCmd)
|
||||
}
|
||||
|
||||
var apiKeysCmd = &cobra.Command{
|
||||
Use: "apikeys",
|
||||
Short: "Handle the Api keys in Headscale",
|
||||
Aliases: []string{"apikey", "api"},
|
||||
}
|
||||
|
||||
var listAPIKeys = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List the Api keys for headscale",
|
||||
Aliases: []string{"ls", "show"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ListApiKeysRequest{}
|
||||
|
||||
response, err := client.ListApiKeys(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting the list of keys: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.ApiKeys, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
tableData := pterm.TableData{
|
||||
{"ID", "Prefix", "Expiration", "Created"},
|
||||
}
|
||||
for _, key := range response.ApiKeys {
|
||||
expiration := "-"
|
||||
|
||||
if key.GetExpiration() != nil {
|
||||
expiration = ColourTime(key.Expiration.AsTime())
|
||||
}
|
||||
|
||||
tableData = append(tableData, []string{
|
||||
strconv.FormatUint(key.GetId(), headscale.Base10),
|
||||
key.GetPrefix(),
|
||||
expiration,
|
||||
key.GetCreatedAt().AsTime().Format(HeadscaleDateTimeFormat),
|
||||
})
|
||||
|
||||
}
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var createAPIKeyCmd = &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Creates a new Api key",
|
||||
Long: `
|
||||
Creates a new Api key, the Api key is only visible on creation
|
||||
and cannot be retrieved again.
|
||||
If you loose a key, create a new one and revoke (expire) the old one.`,
|
||||
Aliases: []string{"c", "new"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
log.Trace().
|
||||
Msg("Preparing to create ApiKey")
|
||||
|
||||
request := &v1.CreateApiKeyRequest{}
|
||||
|
||||
durationStr, _ := cmd.Flags().GetString("expiration")
|
||||
|
||||
duration, err := model.ParseDuration(durationStr)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Could not parse duration: %s\n", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
expiration := time.Now().UTC().Add(time.Duration(duration))
|
||||
|
||||
log.Trace().Dur("expiration", time.Duration(duration)).Msg("expiration has been set")
|
||||
|
||||
request.Expiration = timestamppb.New(expiration)
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
response, err := client.CreateApiKey(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot create Api Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.ApiKey, response.ApiKey, output)
|
||||
},
|
||||
}
|
||||
|
||||
var expireAPIKeyCmd = &cobra.Command{
|
||||
Use: "expire",
|
||||
Short: "Expire an ApiKey",
|
||||
Aliases: []string{"revoke", "exp", "e"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
prefix, err := cmd.Flags().GetString("prefix")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting prefix from CLI flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ExpireApiKeyRequest{
|
||||
Prefix: prefix,
|
||||
}
|
||||
|
||||
response, err := client.ExpireApiKey(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot expire Api Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response, "Key expired", output)
|
||||
},
|
||||
}
|
28
cmd/headscale/cli/dump_config.go
Normal file
28
cmd/headscale/cli/dump_config.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(dumpConfigCmd)
|
||||
}
|
||||
|
||||
var dumpConfigCmd = &cobra.Command{
|
||||
Use: "dumpConfig",
|
||||
Short: "dump current config to /etc/headscale/config.dump.yaml, integration test only",
|
||||
Hidden: true,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := viper.WriteConfigAs("/etc/headscale/config.dump.yaml")
|
||||
if err != nil {
|
||||
//nolint
|
||||
fmt.Println("Failed to dump config")
|
||||
}
|
||||
},
|
||||
}
|
42
cmd/headscale/cli/generate.go
Normal file
42
cmd/headscale/cli/generate.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(generateCmd)
|
||||
generateCmd.AddCommand(generatePrivateKeyCmd)
|
||||
}
|
||||
|
||||
var generateCmd = &cobra.Command{
|
||||
Use: "generate",
|
||||
Short: "Generate commands",
|
||||
Aliases: []string{"gen"},
|
||||
}
|
||||
|
||||
var generatePrivateKeyCmd = &cobra.Command{
|
||||
Use: "private-key",
|
||||
Short: "Generate a private key for the headscale server",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
machineKeyStr, err := machineKey.MarshalText()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine key from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
SuccessOutput(map[string]string{
|
||||
"private_key": string(machineKeyStr),
|
||||
},
|
||||
string(machineKeyStr), output)
|
||||
},
|
||||
}
|
@@ -25,13 +25,15 @@ const (
|
||||
)
|
||||
|
||||
var namespaceCmd = &cobra.Command{
|
||||
Use: "namespaces",
|
||||
Short: "Manage the namespaces of Headscale",
|
||||
Use: "namespaces",
|
||||
Short: "Manage the namespaces of Headscale",
|
||||
Aliases: []string{"namespace", "ns", "user", "users"},
|
||||
}
|
||||
|
||||
var createNamespaceCmd = &cobra.Command{
|
||||
Use: "create NAME",
|
||||
Short: "Creates a new namespace",
|
||||
Use: "create NAME",
|
||||
Short: "Creates a new namespace",
|
||||
Aliases: []string{"c", "new"},
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return errMissingParameter
|
||||
@@ -72,8 +74,9 @@ var createNamespaceCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
var destroyNamespaceCmd = &cobra.Command{
|
||||
Use: "destroy NAME",
|
||||
Short: "Destroys a namespace",
|
||||
Use: "destroy NAME",
|
||||
Short: "Destroys a namespace",
|
||||
Aliases: []string{"delete"},
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return errMissingParameter
|
||||
@@ -144,8 +147,9 @@ var destroyNamespaceCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
var listNamespacesCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all the namespaces",
|
||||
Use: "list",
|
||||
Short: "List all the namespaces",
|
||||
Aliases: []string{"ls", "show"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
@@ -197,8 +201,9 @@ var listNamespacesCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
var renameNamespaceCmd = &cobra.Command{
|
||||
Use: "rename OLD_NAME NEW_NAME",
|
||||
Short: "Renames a namespace",
|
||||
Use: "rename OLD_NAME NEW_NAME",
|
||||
Short: "Renames a namespace",
|
||||
Aliases: []string{"mv"},
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
expectedArguments := 2
|
||||
if len(args) < expectedArguments {
|
||||
|
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
survey "github.com/AlecAivazis/survey/v2"
|
||||
@@ -12,12 +13,14 @@ import (
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(nodeCmd)
|
||||
listNodesCmd.Flags().StringP("namespace", "n", "", "Filter by namespace")
|
||||
listNodesCmd.Flags().BoolP("tags", "t", false, "Show tags")
|
||||
nodeCmd.AddCommand(listNodesCmd)
|
||||
|
||||
registerNodeCmd.Flags().StringP("namespace", "n", "", "Namespace")
|
||||
@@ -39,6 +42,13 @@ func init() {
|
||||
}
|
||||
nodeCmd.AddCommand(expireNodeCmd)
|
||||
|
||||
renameNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = renameNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(renameNodeCmd)
|
||||
|
||||
deleteNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = deleteNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
@@ -46,34 +56,36 @@ func init() {
|
||||
}
|
||||
nodeCmd.AddCommand(deleteNodeCmd)
|
||||
|
||||
shareMachineCmd.Flags().StringP("namespace", "n", "", "Namespace")
|
||||
err = shareMachineCmd.MarkFlagRequired("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
shareMachineCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = shareMachineCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(shareMachineCmd)
|
||||
moveNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
|
||||
unshareMachineCmd.Flags().StringP("namespace", "n", "", "Namespace")
|
||||
err = unshareMachineCmd.MarkFlagRequired("namespace")
|
||||
err = moveNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
unshareMachineCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = unshareMachineCmd.MarkFlagRequired("identifier")
|
||||
|
||||
moveNodeCmd.Flags().StringP("namespace", "n", "", "New namespace")
|
||||
|
||||
err = moveNodeCmd.MarkFlagRequired("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(unshareMachineCmd)
|
||||
nodeCmd.AddCommand(moveNodeCmd)
|
||||
|
||||
tagCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
|
||||
err = tagCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
tagCmd.Flags().
|
||||
StringSliceP("tags", "t", []string{}, "List of tags to add to the node")
|
||||
nodeCmd.AddCommand(tagCmd)
|
||||
}
|
||||
|
||||
var nodeCmd = &cobra.Command{
|
||||
Use: "nodes",
|
||||
Short: "Manage the nodes of Headscale",
|
||||
Use: "nodes",
|
||||
Short: "Manage the nodes of Headscale",
|
||||
Aliases: []string{"node", "machine", "machines"},
|
||||
}
|
||||
|
||||
var registerNodeCmd = &cobra.Command{
|
||||
@@ -127,8 +139,9 @@ var registerNodeCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
var listNodesCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List nodes",
|
||||
Use: "list",
|
||||
Short: "List nodes",
|
||||
Aliases: []string{"ls", "show"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
namespace, err := cmd.Flags().GetString("namespace")
|
||||
@@ -137,6 +150,12 @@ var listNodesCmd = &cobra.Command{
|
||||
|
||||
return
|
||||
}
|
||||
showTags, err := cmd.Flags().GetBool("tags")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting tags flag: %s", err), output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
@@ -163,7 +182,7 @@ var listNodesCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
tableData, err := nodesToPtables(namespace, response.Machines)
|
||||
tableData, err := nodesToPtables(namespace, showTags, response.Machines)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
|
||||
@@ -187,7 +206,7 @@ var expireNodeCmd = &cobra.Command{
|
||||
Use: "expire",
|
||||
Short: "Expire (log out) a machine in your network",
|
||||
Long: "Expiring a node will keep the node in the database and force it to reauthenticate.",
|
||||
Aliases: []string{"logout"},
|
||||
Aliases: []string{"logout", "exp", "e"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
@@ -228,9 +247,58 @@ var expireNodeCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var renameNodeCmd = &cobra.Command{
|
||||
Use: "rename NEW_NAME",
|
||||
Short: "Renames a machine in your network",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
newName := ""
|
||||
if len(args) > 0 {
|
||||
newName = args[0]
|
||||
}
|
||||
request := &v1.RenameMachineRequest{
|
||||
MachineId: identifier,
|
||||
NewName: newName,
|
||||
}
|
||||
|
||||
response, err := client.RenameMachine(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot rename machine: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Machine renamed", output)
|
||||
},
|
||||
}
|
||||
|
||||
var deleteNodeCmd = &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "Delete a node",
|
||||
Use: "delete",
|
||||
Short: "Delete a node",
|
||||
Aliases: []string{"del"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
@@ -316,70 +384,29 @@ var deleteNodeCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
func sharingWorker(
|
||||
cmd *cobra.Command,
|
||||
) (string, *v1.Machine, *v1.Namespace, error) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
namespaceStr, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
|
||||
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting ID to integer: %s", err), output)
|
||||
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
machineRequest := &v1.GetMachineRequest{
|
||||
MachineId: identifier,
|
||||
}
|
||||
|
||||
machineResponse, err := client.GetMachine(ctx, machineRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting node node: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
namespaceRequest := &v1.GetNamespaceRequest{
|
||||
Name: namespaceStr,
|
||||
}
|
||||
|
||||
namespaceResponse, err := client.GetNamespace(ctx, namespaceRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting node node: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
return output, machineResponse.GetMachine(), namespaceResponse.GetNamespace(), nil
|
||||
}
|
||||
|
||||
var shareMachineCmd = &cobra.Command{
|
||||
Use: "share",
|
||||
Short: "Shares a node from the current namespace to the specified one",
|
||||
var moveNodeCmd = &cobra.Command{
|
||||
Use: "move",
|
||||
Short: "Move node to another namespace",
|
||||
Aliases: []string{"mv"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, machine, namespace, err := sharingWorker(cmd)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to fetch namespace or machine: %s", err),
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
namespace, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting namespace: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
@@ -390,82 +417,72 @@ var shareMachineCmd = &cobra.Command{
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ShareMachineRequest{
|
||||
MachineId: machine.Id,
|
||||
Namespace: namespace.Name,
|
||||
getRequest := &v1.GetMachineRequest{
|
||||
MachineId: identifier,
|
||||
}
|
||||
|
||||
response, err := client.ShareMachine(ctx, request)
|
||||
_, err = client.GetMachine(ctx, getRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error sharing node: %s", status.Convert(err).Message()),
|
||||
fmt.Sprintf(
|
||||
"Error getting node: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Node shared", output)
|
||||
},
|
||||
}
|
||||
moveRequest := &v1.MoveMachineRequest{
|
||||
MachineId: identifier,
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
var unshareMachineCmd = &cobra.Command{
|
||||
Use: "unshare",
|
||||
Short: "Unshares a node from the specified namespace",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, machine, namespace, err := sharingWorker(cmd)
|
||||
moveResponse, err := client.MoveMachine(ctx, moveRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to fetch namespace or machine: %s", err),
|
||||
fmt.Sprintf(
|
||||
"Error moving node: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.UnshareMachineRequest{
|
||||
MachineId: machine.Id,
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
|
||||
response, err := client.UnshareMachine(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error unsharing node: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Node unshared", output)
|
||||
SuccessOutput(moveResponse.Machine, "Node moved to another namespace", output)
|
||||
},
|
||||
}
|
||||
|
||||
func nodesToPtables(
|
||||
currentNamespace string,
|
||||
showTags bool,
|
||||
machines []*v1.Machine,
|
||||
) (pterm.TableData, error) {
|
||||
tableData := pterm.TableData{
|
||||
{
|
||||
"ID",
|
||||
"Name",
|
||||
"NodeKey",
|
||||
"Namespace",
|
||||
"IP address",
|
||||
"Ephemeral",
|
||||
"Last seen",
|
||||
"Online",
|
||||
"Expired",
|
||||
},
|
||||
tableHeader := []string{
|
||||
"ID",
|
||||
"Hostname",
|
||||
"Name",
|
||||
"NodeKey",
|
||||
"Namespace",
|
||||
"IP addresses",
|
||||
"Ephemeral",
|
||||
"Last seen",
|
||||
"Online",
|
||||
"Expired",
|
||||
}
|
||||
if showTags {
|
||||
tableHeader = append(tableHeader, []string{
|
||||
"ForcedTags",
|
||||
"InvalidTags",
|
||||
"ValidTags",
|
||||
}...)
|
||||
}
|
||||
tableData := pterm.TableData{tableHeader}
|
||||
|
||||
for _, machine := range machines {
|
||||
var ephemeral bool
|
||||
@@ -509,6 +526,26 @@ func nodesToPtables(
|
||||
expired = pterm.LightRed("yes")
|
||||
}
|
||||
|
||||
var forcedTags string
|
||||
for _, tag := range machine.ForcedTags {
|
||||
forcedTags += "," + tag
|
||||
}
|
||||
forcedTags = strings.TrimLeft(forcedTags, ",")
|
||||
var invalidTags string
|
||||
for _, tag := range machine.InvalidTags {
|
||||
if !contains(machine.ForcedTags, tag) {
|
||||
invalidTags += "," + pterm.LightRed(tag)
|
||||
}
|
||||
}
|
||||
invalidTags = strings.TrimLeft(invalidTags, ",")
|
||||
var validTags string
|
||||
for _, tag := range machine.ValidTags {
|
||||
if !contains(machine.ForcedTags, tag) {
|
||||
validTags += "," + pterm.LightGreen(tag)
|
||||
}
|
||||
}
|
||||
validTags = strings.TrimLeft(validTags, ",")
|
||||
|
||||
var namespace string
|
||||
if currentNamespace == "" || (currentNamespace == machine.Namespace.Name) {
|
||||
namespace = pterm.LightMagenta(machine.Namespace.Name)
|
||||
@@ -516,21 +553,95 @@ func nodesToPtables(
|
||||
// Shared into this namespace
|
||||
namespace = pterm.LightYellow(machine.Namespace.Name)
|
||||
}
|
||||
|
||||
var IPV4Address string
|
||||
var IPV6Address string
|
||||
for _, addr := range machine.IpAddresses {
|
||||
if netaddr.MustParseIP(addr).Is4() {
|
||||
IPV4Address = addr
|
||||
} else {
|
||||
IPV6Address = addr
|
||||
}
|
||||
}
|
||||
|
||||
nodeData := []string{
|
||||
strconv.FormatUint(machine.Id, headscale.Base10),
|
||||
machine.Name,
|
||||
machine.GetGivenName(),
|
||||
nodeKey.ShortString(),
|
||||
namespace,
|
||||
strings.Join([]string{IPV4Address, IPV6Address}, ", "),
|
||||
strconv.FormatBool(ephemeral),
|
||||
lastSeenTime,
|
||||
online,
|
||||
expired,
|
||||
}
|
||||
if showTags {
|
||||
nodeData = append(nodeData, []string{forcedTags, invalidTags, validTags}...)
|
||||
}
|
||||
tableData = append(
|
||||
tableData,
|
||||
[]string{
|
||||
strconv.FormatUint(machine.Id, headscale.Base10),
|
||||
machine.Name,
|
||||
nodeKey.ShortString(),
|
||||
namespace,
|
||||
machine.IpAddress,
|
||||
strconv.FormatBool(ephemeral),
|
||||
lastSeenTime,
|
||||
online,
|
||||
expired,
|
||||
},
|
||||
nodeData,
|
||||
)
|
||||
}
|
||||
|
||||
return tableData, nil
|
||||
}
|
||||
|
||||
var tagCmd = &cobra.Command{
|
||||
Use: "tag",
|
||||
Short: "Manage the tags of a node",
|
||||
Aliases: []string{"tags", "t"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
// retrieve flags from CLI
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
tagsToSet, err := cmd.Flags().GetStringSlice("tags")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error retrieving list of tags to add to machine, %v", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sending tags to machine
|
||||
request := &v1.SetTagsRequest{
|
||||
MachineId: identifier,
|
||||
Tags: tagsToSet,
|
||||
}
|
||||
resp, err := client.SetTags(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error while sending tags to headscale: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if resp != nil {
|
||||
SuccessOutput(
|
||||
resp.GetMachine(),
|
||||
"Machine updated",
|
||||
output,
|
||||
)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -13,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultPreAuthKeyExpiry = 1 * time.Hour
|
||||
DefaultPreAuthKeyExpiry = "1h"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -31,17 +32,19 @@ func init() {
|
||||
createPreAuthKeyCmd.PersistentFlags().
|
||||
Bool("ephemeral", false, "Preauthkey for ephemeral nodes")
|
||||
createPreAuthKeyCmd.Flags().
|
||||
DurationP("expiration", "e", DefaultPreAuthKeyExpiry, "Human-readable expiration of the key (30m, 24h, 365d...)")
|
||||
StringP("expiration", "e", DefaultPreAuthKeyExpiry, "Human-readable expiration of the key (e.g. 30m, 24h)")
|
||||
}
|
||||
|
||||
var preauthkeysCmd = &cobra.Command{
|
||||
Use: "preauthkeys",
|
||||
Short: "Handle the preauthkeys in Headscale",
|
||||
Use: "preauthkeys",
|
||||
Short: "Handle the preauthkeys in Headscale",
|
||||
Aliases: []string{"preauthkey", "authkey", "pre"},
|
||||
}
|
||||
|
||||
var listPreAuthKeys = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List the preauthkeys for this namespace",
|
||||
Use: "list",
|
||||
Short: "List the preauthkeys for this namespace",
|
||||
Aliases: []string{"ls", "show"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
@@ -83,7 +86,7 @@ var listPreAuthKeys = &cobra.Command{
|
||||
for _, key := range response.PreAuthKeys {
|
||||
expiration := "-"
|
||||
if key.GetExpiration() != nil {
|
||||
expiration = key.Expiration.AsTime().Format("2006-01-02 15:04:05")
|
||||
expiration = ColourTime(key.Expiration.AsTime())
|
||||
}
|
||||
|
||||
var reusable string
|
||||
@@ -118,8 +121,9 @@ var listPreAuthKeys = &cobra.Command{
|
||||
}
|
||||
|
||||
var createPreAuthKeyCmd = &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Creates a new preauthkey in the specified namespace",
|
||||
Use: "create",
|
||||
Short: "Creates a new preauthkey in the specified namespace",
|
||||
Aliases: []string{"c", "new"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
@@ -145,10 +149,22 @@ var createPreAuthKeyCmd = &cobra.Command{
|
||||
Ephemeral: ephemeral,
|
||||
}
|
||||
|
||||
duration, _ := cmd.Flags().GetDuration("expiration")
|
||||
expiration := time.Now().UTC().Add(duration)
|
||||
durationStr, _ := cmd.Flags().GetString("expiration")
|
||||
|
||||
log.Trace().Dur("expiration", duration).Msg("expiration has been set")
|
||||
duration, err := model.ParseDuration(durationStr)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Could not parse duration: %s\n", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
expiration := time.Now().UTC().Add(time.Duration(duration))
|
||||
|
||||
log.Trace().Dur("expiration", time.Duration(duration)).Msg("expiration has been set")
|
||||
|
||||
request.Expiration = timestamppb.New(expiration)
|
||||
|
||||
@@ -172,8 +188,9 @@ var createPreAuthKeyCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
var expirePreAuthKeyCmd = &cobra.Command{
|
||||
Use: "expire KEY",
|
||||
Short: "Expire a preauthkey",
|
||||
Use: "expire KEY",
|
||||
Short: "Expire a preauthkey",
|
||||
Aliases: []string{"revoke", "exp", "e"},
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return errMissingParameter
|
||||
|
19
cmd/headscale/cli/pterm_style.go
Normal file
19
cmd/headscale/cli/pterm_style.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/pterm/pterm"
|
||||
)
|
||||
|
||||
func ColourTime(date time.Time) string {
|
||||
dateStr := date.Format("2006-01-02 15:04:05")
|
||||
|
||||
if date.After(time.Now()) {
|
||||
dateStr = pterm.LightGreen(dateStr)
|
||||
} else {
|
||||
dateStr = pterm.LightRed(dateStr)
|
||||
}
|
||||
|
||||
return dateStr
|
||||
}
|
@@ -3,17 +3,75 @@ package cli
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/tcnksm/go-latest"
|
||||
)
|
||||
|
||||
var cfgFile string = ""
|
||||
|
||||
func init() {
|
||||
cobra.OnInitialize(initConfig)
|
||||
rootCmd.PersistentFlags().
|
||||
StringVarP(&cfgFile, "config", "c", "", "config file (default is /etc/headscale/config.yaml)")
|
||||
rootCmd.PersistentFlags().
|
||||
StringP("output", "o", "", "Output format. Empty for human-readable, 'json', 'json-line' or 'yaml'")
|
||||
rootCmd.PersistentFlags().
|
||||
Bool("force", false, "Disable prompts and forces the execution")
|
||||
}
|
||||
|
||||
func initConfig() {
|
||||
if cfgFile != "" {
|
||||
err := headscale.LoadConfig(cfgFile, true)
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err)
|
||||
}
|
||||
} else {
|
||||
err := headscale.LoadConfig("", false)
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err)
|
||||
}
|
||||
}
|
||||
|
||||
cfg, err := headscale.GetHeadscaleConfig()
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err)
|
||||
}
|
||||
|
||||
machineOutput := HasMachineOutputFlag()
|
||||
|
||||
zerolog.SetGlobalLevel(cfg.LogLevel)
|
||||
|
||||
// If the user has requested a "machine" readable format,
|
||||
// then disable login so the output remains valid.
|
||||
if machineOutput {
|
||||
zerolog.SetGlobalLevel(zerolog.Disabled)
|
||||
}
|
||||
|
||||
if !cfg.DisableUpdateCheck && !machineOutput {
|
||||
if (runtime.GOOS == "linux" || runtime.GOOS == "darwin") &&
|
||||
Version != "dev" {
|
||||
githubTag := &latest.GithubTag{
|
||||
Owner: "juanfont",
|
||||
Repository: "headscale",
|
||||
}
|
||||
res, err := latest.Check(githubTag, Version)
|
||||
if err == nil && res.Outdated {
|
||||
//nolint
|
||||
fmt.Printf(
|
||||
"An updated version of Headscale has been found (%s vs. your current %s). Check it out https://github.com/juanfont/headscale/releases\n",
|
||||
res.Current,
|
||||
Version,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "headscale",
|
||||
Short: "headscale - a Tailscale control server",
|
||||
|
@@ -24,6 +24,8 @@ func init() {
|
||||
enableRouteCmd.Flags().
|
||||
StringSliceP("route", "r", []string{}, "List (or repeated flags) of routes to enable")
|
||||
enableRouteCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
enableRouteCmd.Flags().BoolP("all", "a", false, "All routes from host")
|
||||
|
||||
err = enableRouteCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
@@ -35,13 +37,15 @@ func init() {
|
||||
}
|
||||
|
||||
var routesCmd = &cobra.Command{
|
||||
Use: "routes",
|
||||
Short: "Manage the routes of Headscale",
|
||||
Use: "routes",
|
||||
Short: "Manage the routes of Headscale",
|
||||
Aliases: []string{"r", "route"},
|
||||
}
|
||||
|
||||
var listRoutesCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List routes advertised and enabled by a given node",
|
||||
Use: "list",
|
||||
Short: "List routes advertised and enabled by a given node",
|
||||
Aliases: []string{"ls", "show"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
@@ -123,21 +127,43 @@ omit the route you do not want to enable.
|
||||
return
|
||||
}
|
||||
|
||||
routes, err := cmd.Flags().GetStringSlice("route")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting routes from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
var routes []string
|
||||
|
||||
isAll, _ := cmd.Flags().GetBool("all")
|
||||
if isAll {
|
||||
response, err := client.GetMachineRoute(ctx, &v1.GetMachineRouteRequest{
|
||||
MachineId: machineID,
|
||||
})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot get machine routes: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
routes = response.GetRoutes().GetAdvertisedRoutes()
|
||||
} else {
|
||||
routes, err = cmd.Flags().GetStringSlice("route")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting routes from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
request := &v1.EnableMachineRoutesRequest{
|
||||
MachineId: machineID,
|
||||
Routes: routes,
|
||||
|
@@ -1,8 +1,7 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -17,14 +16,14 @@ var serveCmd = &cobra.Command{
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
h, err := getHeadscaleApp()
|
||||
app, err := getHeadscaleApp()
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
log.Fatal().Caller().Err(err).Msg("Error initializing")
|
||||
}
|
||||
|
||||
err = h.Serve()
|
||||
err = app.Serve()
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
log.Fatal().Caller().Err(err).Msg("Error starting server")
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@@ -2,298 +2,31 @@ package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
"reflect"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/viper"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"gopkg.in/yaml.v2"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
)
|
||||
|
||||
func LoadConfig(path string) error {
|
||||
viper.SetConfigName("config")
|
||||
if path == "" {
|
||||
viper.AddConfigPath("/etc/headscale/")
|
||||
viper.AddConfigPath("$HOME/.headscale")
|
||||
viper.AddConfigPath(".")
|
||||
} else {
|
||||
// For testing
|
||||
viper.AddConfigPath(path)
|
||||
}
|
||||
|
||||
viper.SetEnvPrefix("headscale")
|
||||
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||
viper.AutomaticEnv()
|
||||
|
||||
viper.SetDefault("tls_letsencrypt_cache_dir", "/var/www/.cache")
|
||||
viper.SetDefault("tls_letsencrypt_challenge_type", "HTTP-01")
|
||||
|
||||
viper.SetDefault("ip_prefix", "100.64.0.0/10")
|
||||
|
||||
viper.SetDefault("log_level", "info")
|
||||
|
||||
viper.SetDefault("dns_config", nil)
|
||||
|
||||
viper.SetDefault("unix_socket", "/var/run/headscale.sock")
|
||||
|
||||
viper.SetDefault("cli.insecure", false)
|
||||
viper.SetDefault("cli.timeout", "5s")
|
||||
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
return fmt.Errorf("fatal error reading config file: %w", err)
|
||||
}
|
||||
|
||||
// Collect any validation errors and return them all at once
|
||||
var errorText string
|
||||
if (viper.GetString("tls_letsencrypt_hostname") != "") &&
|
||||
((viper.GetString("tls_cert_path") != "") || (viper.GetString("tls_key_path") != "")) {
|
||||
errorText += "Fatal config error: set either tls_letsencrypt_hostname or tls_cert_path/tls_key_path, not both\n"
|
||||
}
|
||||
|
||||
if (viper.GetString("tls_letsencrypt_hostname") != "") &&
|
||||
(viper.GetString("tls_letsencrypt_challenge_type") == "TLS-ALPN-01") &&
|
||||
(!strings.HasSuffix(viper.GetString("listen_addr"), ":443")) {
|
||||
// this is only a warning because there could be something sitting in front of headscale that redirects the traffic (e.g. an iptables rule)
|
||||
log.Warn().
|
||||
Msg("Warning: when using tls_letsencrypt_hostname with TLS-ALPN-01 as challenge type, headscale must be reachable on port 443, i.e. listen_addr should probably end in :443")
|
||||
}
|
||||
|
||||
if (viper.GetString("tls_letsencrypt_challenge_type") != "HTTP-01") &&
|
||||
(viper.GetString("tls_letsencrypt_challenge_type") != "TLS-ALPN-01") {
|
||||
errorText += "Fatal config error: the only supported values for tls_letsencrypt_challenge_type are HTTP-01 and TLS-ALPN-01\n"
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(viper.GetString("server_url"), "http://") &&
|
||||
!strings.HasPrefix(viper.GetString("server_url"), "https://") {
|
||||
errorText += "Fatal config error: server_url must start with https:// or http://\n"
|
||||
}
|
||||
if errorText != "" {
|
||||
//nolint
|
||||
return errors.New(strings.TrimSuffix(errorText, "\n"))
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func GetDERPConfig() headscale.DERPConfig {
|
||||
urlStrs := viper.GetStringSlice("derp.urls")
|
||||
|
||||
urls := make([]url.URL, len(urlStrs))
|
||||
for index, urlStr := range urlStrs {
|
||||
urlAddr, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("url", urlStr).
|
||||
Err(err).
|
||||
Msg("Failed to parse url, ignoring...")
|
||||
}
|
||||
|
||||
urls[index] = *urlAddr
|
||||
}
|
||||
|
||||
paths := viper.GetStringSlice("derp.paths")
|
||||
|
||||
autoUpdate := viper.GetBool("derp.auto_update_enabled")
|
||||
updateFrequency := viper.GetDuration("derp.update_frequency")
|
||||
|
||||
return headscale.DERPConfig{
|
||||
URLs: urls,
|
||||
Paths: paths,
|
||||
AutoUpdate: autoUpdate,
|
||||
UpdateFrequency: updateFrequency,
|
||||
}
|
||||
}
|
||||
|
||||
func GetDNSConfig() (*tailcfg.DNSConfig, string) {
|
||||
if viper.IsSet("dns_config") {
|
||||
dnsConfig := &tailcfg.DNSConfig{}
|
||||
|
||||
if viper.IsSet("dns_config.nameservers") {
|
||||
nameserversStr := viper.GetStringSlice("dns_config.nameservers")
|
||||
|
||||
nameservers := make([]netaddr.IP, len(nameserversStr))
|
||||
resolvers := make([]dnstype.Resolver, len(nameserversStr))
|
||||
|
||||
for index, nameserverStr := range nameserversStr {
|
||||
nameserver, err := netaddr.ParseIP(nameserverStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "getDNSConfig").
|
||||
Err(err).
|
||||
Msgf("Could not parse nameserver IP: %s", nameserverStr)
|
||||
}
|
||||
|
||||
nameservers[index] = nameserver
|
||||
resolvers[index] = dnstype.Resolver{
|
||||
Addr: nameserver.String(),
|
||||
}
|
||||
}
|
||||
|
||||
dnsConfig.Nameservers = nameservers
|
||||
dnsConfig.Resolvers = resolvers
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.restricted_nameservers") {
|
||||
if len(dnsConfig.Nameservers) > 0 {
|
||||
dnsConfig.Routes = make(map[string][]dnstype.Resolver)
|
||||
restrictedDNS := viper.GetStringMapStringSlice(
|
||||
"dns_config.restricted_nameservers",
|
||||
)
|
||||
for domain, restrictedNameservers := range restrictedDNS {
|
||||
restrictedResolvers := make(
|
||||
[]dnstype.Resolver,
|
||||
len(restrictedNameservers),
|
||||
)
|
||||
for index, nameserverStr := range restrictedNameservers {
|
||||
nameserver, err := netaddr.ParseIP(nameserverStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "getDNSConfig").
|
||||
Err(err).
|
||||
Msgf("Could not parse restricted nameserver IP: %s", nameserverStr)
|
||||
}
|
||||
restrictedResolvers[index] = dnstype.Resolver{
|
||||
Addr: nameserver.String(),
|
||||
}
|
||||
}
|
||||
dnsConfig.Routes[domain] = restrictedResolvers
|
||||
}
|
||||
} else {
|
||||
log.Warn().
|
||||
Msg("Warning: dns_config.restricted_nameservers is set, but no nameservers are configured. Ignoring restricted_nameservers.")
|
||||
}
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.domains") {
|
||||
dnsConfig.Domains = viper.GetStringSlice("dns_config.domains")
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.magic_dns") {
|
||||
magicDNS := viper.GetBool("dns_config.magic_dns")
|
||||
if len(dnsConfig.Nameservers) > 0 {
|
||||
dnsConfig.Proxied = magicDNS
|
||||
} else if magicDNS {
|
||||
log.Warn().
|
||||
Msg("Warning: dns_config.magic_dns is set, but no nameservers are configured. Ignoring magic_dns.")
|
||||
}
|
||||
}
|
||||
|
||||
var baseDomain string
|
||||
if viper.IsSet("dns_config.base_domain") {
|
||||
baseDomain = viper.GetString("dns_config.base_domain")
|
||||
} else {
|
||||
baseDomain = "headscale.net" // does not really matter when MagicDNS is not enabled
|
||||
}
|
||||
|
||||
return dnsConfig, baseDomain
|
||||
}
|
||||
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
func absPath(path string) string {
|
||||
// If a relative path is provided, prefix it with the the directory where
|
||||
// the config file was found.
|
||||
if (path != "") && !strings.HasPrefix(path, string(os.PathSeparator)) {
|
||||
dir, _ := filepath.Split(viper.ConfigFileUsed())
|
||||
if dir != "" {
|
||||
path = filepath.Join(dir, path)
|
||||
}
|
||||
}
|
||||
|
||||
return path
|
||||
}
|
||||
|
||||
func getHeadscaleConfig() headscale.Config {
|
||||
dnsConfig, baseDomain := GetDNSConfig()
|
||||
derpConfig := GetDERPConfig()
|
||||
|
||||
return headscale.Config{
|
||||
ServerURL: viper.GetString("server_url"),
|
||||
Addr: viper.GetString("listen_addr"),
|
||||
IPPrefix: netaddr.MustParseIPPrefix(viper.GetString("ip_prefix")),
|
||||
PrivateKeyPath: absPath(viper.GetString("private_key_path")),
|
||||
BaseDomain: baseDomain,
|
||||
|
||||
DERP: derpConfig,
|
||||
|
||||
EphemeralNodeInactivityTimeout: viper.GetDuration(
|
||||
"ephemeral_node_inactivity_timeout",
|
||||
),
|
||||
|
||||
DBtype: viper.GetString("db_type"),
|
||||
DBpath: absPath(viper.GetString("db_path")),
|
||||
DBhost: viper.GetString("db_host"),
|
||||
DBport: viper.GetInt("db_port"),
|
||||
DBname: viper.GetString("db_name"),
|
||||
DBuser: viper.GetString("db_user"),
|
||||
DBpass: viper.GetString("db_pass"),
|
||||
|
||||
TLSLetsEncryptHostname: viper.GetString("tls_letsencrypt_hostname"),
|
||||
TLSLetsEncryptListen: viper.GetString("tls_letsencrypt_listen"),
|
||||
TLSLetsEncryptCacheDir: absPath(
|
||||
viper.GetString("tls_letsencrypt_cache_dir"),
|
||||
),
|
||||
TLSLetsEncryptChallengeType: viper.GetString("tls_letsencrypt_challenge_type"),
|
||||
|
||||
TLSCertPath: absPath(viper.GetString("tls_cert_path")),
|
||||
TLSKeyPath: absPath(viper.GetString("tls_key_path")),
|
||||
|
||||
DNSConfig: dnsConfig,
|
||||
|
||||
ACMEEmail: viper.GetString("acme_email"),
|
||||
ACMEURL: viper.GetString("acme_url"),
|
||||
|
||||
UnixSocket: viper.GetString("unix_socket"),
|
||||
|
||||
OIDC: headscale.OIDCConfig{
|
||||
Issuer: viper.GetString("oidc.issuer"),
|
||||
ClientID: viper.GetString("oidc.client_id"),
|
||||
ClientSecret: viper.GetString("oidc.client_secret"),
|
||||
},
|
||||
|
||||
CLI: headscale.CLIConfig{
|
||||
Address: viper.GetString("cli.address"),
|
||||
APIKey: viper.GetString("cli.api_key"),
|
||||
Insecure: viper.GetBool("cli.insecure"),
|
||||
Timeout: viper.GetDuration("cli.timeout"),
|
||||
},
|
||||
}
|
||||
}
|
||||
const (
|
||||
HeadscaleDateTimeFormat = "2006-01-02 15:04:05"
|
||||
)
|
||||
|
||||
func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
// Minimum inactivity time out is keepalive timeout (60s) plus a few seconds
|
||||
// to avoid races
|
||||
minInactivityTimeout, _ := time.ParseDuration("65s")
|
||||
if viper.GetDuration("ephemeral_node_inactivity_timeout") <= minInactivityTimeout {
|
||||
// TODO: Find a better way to return this text
|
||||
//nolint
|
||||
err := fmt.Errorf(
|
||||
"ephemeral_node_inactivity_timeout (%s) is set too low, must be more than %s",
|
||||
viper.GetString("ephemeral_node_inactivity_timeout"),
|
||||
minInactivityTimeout,
|
||||
)
|
||||
|
||||
return nil, err
|
||||
cfg, err := headscale.GetHeadscaleConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load configuration while creating headscale instance: %w", err)
|
||||
}
|
||||
|
||||
cfg := getHeadscaleConfig()
|
||||
|
||||
cfg.OIDC.MatchMap = loadOIDCMatchMap()
|
||||
|
||||
app, err := headscale.NewHeadscale(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -301,11 +34,11 @@ func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
|
||||
// We are doing this here, as in the future could be cool to have it also hot-reload
|
||||
|
||||
if viper.GetString("acl_policy_path") != "" {
|
||||
aclPath := absPath(viper.GetString("acl_policy_path"))
|
||||
if cfg.ACL.PolicyPath != "" {
|
||||
aclPath := headscale.AbsolutePathFromConfigPath(cfg.ACL.PolicyPath)
|
||||
err = app.LoadACLPolicy(aclPath)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
log.Fatal().
|
||||
Str("path", aclPath).
|
||||
Err(err).
|
||||
Msg("Could not load the ACL policy")
|
||||
@@ -316,7 +49,14 @@ func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
}
|
||||
|
||||
func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc.ClientConn, context.CancelFunc) {
|
||||
cfg := getHeadscaleConfig()
|
||||
cfg, err := headscale.GetHeadscaleConfig()
|
||||
if err != nil {
|
||||
log.Fatal().
|
||||
Err(err).
|
||||
Caller().
|
||||
Msgf("Failed to load configuration")
|
||||
os.Exit(-1) // we get here if logging is suppressed (i.e., json output)
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Dur("timeout", cfg.CLI.Timeout).
|
||||
@@ -340,14 +80,14 @@ func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc.
|
||||
|
||||
grpcOptions = append(
|
||||
grpcOptions,
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithContextDialer(headscale.GrpcSocketDialer),
|
||||
)
|
||||
} else {
|
||||
// If we are not connecting to a local server, require an API key for authentication
|
||||
apiKey := cfg.CLI.APIKey
|
||||
if apiKey == "" {
|
||||
log.Fatal().Msgf("HEADSCALE_CLI_API_KEY environment variable needs to be set.")
|
||||
log.Fatal().Caller().Msgf("HEADSCALE_CLI_API_KEY environment variable needs to be set.")
|
||||
}
|
||||
grpcOptions = append(grpcOptions,
|
||||
grpc.WithPerRPCCredentials(tokenAuth{
|
||||
@@ -356,14 +96,28 @@ func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc.
|
||||
)
|
||||
|
||||
if cfg.CLI.Insecure {
|
||||
grpcOptions = append(grpcOptions, grpc.WithInsecure())
|
||||
tlsConfig := &tls.Config{
|
||||
// turn of gosec as we are intentionally setting
|
||||
// insecure.
|
||||
//nolint:gosec
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
grpcOptions = append(grpcOptions,
|
||||
grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)),
|
||||
)
|
||||
} else {
|
||||
grpcOptions = append(grpcOptions,
|
||||
grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
log.Trace().Caller().Str("address", address).Msg("Connecting via gRPC")
|
||||
conn, err := grpc.DialContext(ctx, address, grpcOptions...)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msgf("Could not connect: %v", err)
|
||||
log.Fatal().Caller().Err(err).Msgf("Could not connect: %v", err)
|
||||
os.Exit(-1) // we get here if logging is suppressed (i.e., json output)
|
||||
}
|
||||
|
||||
client := v1.NewHeadscaleServiceClient(conn)
|
||||
@@ -372,21 +126,21 @@ func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc.
|
||||
}
|
||||
|
||||
func SuccessOutput(result interface{}, override string, outputFormat string) {
|
||||
var j []byte
|
||||
var jsonBytes []byte
|
||||
var err error
|
||||
switch outputFormat {
|
||||
case "json":
|
||||
j, err = json.MarshalIndent(result, "", "\t")
|
||||
jsonBytes, err = json.MarshalIndent(result, "", "\t")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
case "json-line":
|
||||
j, err = json.Marshal(result)
|
||||
jsonBytes, err = json.Marshal(result)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
case "yaml":
|
||||
j, err = yaml.Marshal(result)
|
||||
jsonBytes, err = yaml.Marshal(result)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
@@ -398,7 +152,7 @@ func SuccessOutput(result interface{}, override string, outputFormat string) {
|
||||
}
|
||||
|
||||
//nolint
|
||||
fmt.Println(string(j))
|
||||
fmt.Println(string(jsonBytes))
|
||||
}
|
||||
|
||||
func ErrorOutput(errResult error, override string, outputFormat string) {
|
||||
@@ -437,14 +191,12 @@ func (tokenAuth) RequireTransportSecurity() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// loadOIDCMatchMap is a wrapper around viper to verifies that the keys in
|
||||
// the match map is valid regex strings.
|
||||
func loadOIDCMatchMap() map[string]string {
|
||||
strMap := viper.GetStringMapString("oidc.domain_map")
|
||||
|
||||
for oidcMatcher := range strMap {
|
||||
_ = regexp.MustCompile(oidcMatcher)
|
||||
func contains[T string](ts []T, t T) bool {
|
||||
for _, v := range ts {
|
||||
if reflect.DeepEqual(v, t) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return strMap
|
||||
return false
|
||||
}
|
||||
|
@@ -1,17 +1,13 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/efekarakus/termcolor"
|
||||
"github.com/juanfont/headscale/cmd/headscale/cli"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/tcnksm/go-latest"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -43,44 +39,5 @@ func main() {
|
||||
NoColor: !colors,
|
||||
})
|
||||
|
||||
if err := cli.LoadConfig(""); err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
|
||||
machineOutput := cli.HasMachineOutputFlag()
|
||||
|
||||
logLevel := viper.GetString("log_level")
|
||||
level, err := zerolog.ParseLevel(logLevel)
|
||||
if err != nil {
|
||||
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
||||
} else {
|
||||
zerolog.SetGlobalLevel(level)
|
||||
}
|
||||
|
||||
// If the user has requested a "machine" readable format,
|
||||
// then disable login so the output remains valid.
|
||||
if machineOutput {
|
||||
zerolog.SetGlobalLevel(zerolog.Disabled)
|
||||
}
|
||||
|
||||
if !viper.GetBool("disable_check_updates") && !machineOutput {
|
||||
if (runtime.GOOS == "linux" || runtime.GOOS == "darwin") &&
|
||||
cli.Version != "dev" {
|
||||
githubTag := &latest.GithubTag{
|
||||
Owner: "juanfont",
|
||||
Repository: "headscale",
|
||||
}
|
||||
res, err := latest.Check(githubTag, cli.Version)
|
||||
if err == nil && res.Outdated {
|
||||
//nolint
|
||||
fmt.Printf(
|
||||
"An updated version of Headscale has been found (%s vs. your current %s). Check it out https://github.com/juanfont/headscale/releases\n",
|
||||
res.Current,
|
||||
cli.Version,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cli.Execute()
|
||||
}
|
||||
|
@@ -1,13 +1,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/juanfont/headscale/cmd/headscale/cli"
|
||||
"github.com/juanfont/headscale"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
@@ -26,6 +27,51 @@ func (s *Suite) SetUpSuite(c *check.C) {
|
||||
func (s *Suite) TearDownSuite(c *check.C) {
|
||||
}
|
||||
|
||||
func (*Suite) TestConfigFileLoading(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "headscale")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
cfgFile := filepath.Join(tmpDir, "config.yaml")
|
||||
|
||||
// Symlink the example config file
|
||||
err = os.Symlink(
|
||||
filepath.Clean(path+"/../../config-example.yaml"),
|
||||
cfgFile,
|
||||
)
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Load example config, it should load without validation errors
|
||||
err = headscale.LoadConfig(cfgFile, true)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Test that config file was interpreted correctly
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8080")
|
||||
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
|
||||
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite3")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "/var/lib/headscale/db.sqlite")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
c.Assert(viper.GetStringSlice("dns_config.nameservers")[0], check.Equals, "1.1.1.1")
|
||||
c.Assert(
|
||||
headscale.GetFileMode("unix_socket_permission"),
|
||||
check.Equals,
|
||||
fs.FileMode(0o770),
|
||||
)
|
||||
c.Assert(viper.GetBool("logtail.enabled"), check.Equals, false)
|
||||
}
|
||||
|
||||
func (*Suite) TestConfigLoading(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "headscale")
|
||||
if err != nil {
|
||||
@@ -48,19 +94,26 @@ func (*Suite) TestConfigLoading(c *check.C) {
|
||||
}
|
||||
|
||||
// Load example config, it should load without validation errors
|
||||
err = cli.LoadConfig(tmpDir)
|
||||
err = headscale.LoadConfig(tmpDir, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Test that config file was interpreted correctly
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8080")
|
||||
c.Assert(viper.GetStringSlice("derp.paths")[0], check.Equals, "derp-example.yaml")
|
||||
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
|
||||
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite3")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "db.sqlite")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "/var/lib/headscale/db.sqlite")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
c.Assert(viper.GetStringSlice("dns_config.nameservers")[0], check.Equals, "1.1.1.1")
|
||||
c.Assert(
|
||||
headscale.GetFileMode("unix_socket_permission"),
|
||||
check.Equals,
|
||||
fs.FileMode(0o770),
|
||||
)
|
||||
c.Assert(viper.GetBool("logtail.enabled"), check.Equals, false)
|
||||
c.Assert(viper.GetBool("randomize_client_port"), check.Equals, false)
|
||||
}
|
||||
|
||||
func (*Suite) TestDNSConfigLoading(c *check.C) {
|
||||
@@ -85,10 +138,10 @@ func (*Suite) TestDNSConfigLoading(c *check.C) {
|
||||
}
|
||||
|
||||
// Load example config, it should load without validation errors
|
||||
err = cli.LoadConfig(tmpDir)
|
||||
err = headscale.LoadConfig(tmpDir, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
dnsConfig, baseDomain := cli.GetDNSConfig()
|
||||
dnsConfig, baseDomain := headscale.GetDNSConfig()
|
||||
|
||||
c.Assert(dnsConfig.Nameservers[0].String(), check.Equals, "1.1.1.1")
|
||||
c.Assert(dnsConfig.Resolvers[0].Addr, check.Equals, "1.1.1.1")
|
||||
@@ -118,7 +171,7 @@ func (*Suite) TestTLSConfigValidation(c *check.C) {
|
||||
writeConfig(c, tmpDir, configYaml)
|
||||
|
||||
// Check configuration validation errors (1)
|
||||
err = cli.LoadConfig(tmpDir)
|
||||
err = headscale.LoadConfig(tmpDir, false)
|
||||
c.Assert(err, check.NotNil)
|
||||
// check.Matches can not handle multiline strings
|
||||
tmp := strings.ReplaceAll(err.Error(), "\n", "***")
|
||||
@@ -143,6 +196,6 @@ func (*Suite) TestTLSConfigValidation(c *check.C) {
|
||||
"---\nserver_url: \"http://127.0.0.1:8080\"\ntls_letsencrypt_hostname: \"example.com\"\ntls_letsencrypt_challenge_type: \"TLS-ALPN-01\"",
|
||||
)
|
||||
writeConfig(c, tmpDir, configYaml)
|
||||
err = cli.LoadConfig(tmpDir)
|
||||
err = headscale.LoadConfig(tmpDir, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
@@ -1,39 +1,117 @@
|
||||
---
|
||||
# headscale will look for a configuration file named `config.yaml` (or `config.json`) in the following order:
|
||||
#
|
||||
# - `/etc/headscale`
|
||||
# - `~/.headscale`
|
||||
# - current working directory
|
||||
|
||||
# The url clients will connect to.
|
||||
# Typically this will be a domain.
|
||||
# Typically this will be a domain like:
|
||||
#
|
||||
# https://myheadscale.example.com:443
|
||||
#
|
||||
server_url: http://127.0.0.1:8080
|
||||
|
||||
# Address to listen to / bind to on the server
|
||||
#
|
||||
listen_addr: 0.0.0.0:8080
|
||||
|
||||
# Private key file which will be
|
||||
# autogenerated if it's missing
|
||||
private_key_path: private.key
|
||||
# Address to listen to /metrics, you may want
|
||||
# to keep this endpoint private to your internal
|
||||
# network
|
||||
#
|
||||
metrics_listen_addr: 127.0.0.1:9090
|
||||
|
||||
# Address to listen for gRPC.
|
||||
# gRPC is used for controlling a headscale server
|
||||
# remotely with the CLI
|
||||
# Note: Remote access _only_ works if you have
|
||||
# valid certificates.
|
||||
grpc_listen_addr: 0.0.0.0:50443
|
||||
|
||||
# Allow the gRPC admin interface to run in INSECURE
|
||||
# mode. This is not recommended as the traffic will
|
||||
# be unencrypted. Only enable if you know what you
|
||||
# are doing.
|
||||
grpc_allow_insecure: false
|
||||
|
||||
# Private key used encrypt the traffic between headscale
|
||||
# and Tailscale clients.
|
||||
# The private key file which will be
|
||||
# autogenerated if it's missing
|
||||
private_key_path: /var/lib/headscale/private.key
|
||||
|
||||
# List of IP prefixes to allocate tailaddresses from.
|
||||
# Each prefix consists of either an IPv4 or IPv6 address,
|
||||
# and the associated prefix length, delimited by a slash.
|
||||
ip_prefixes:
|
||||
- fd7a:115c:a1e0::/48
|
||||
- 100.64.0.0/10
|
||||
|
||||
# DERP is a relay system that Tailscale uses when a direct
|
||||
# connection cannot be established.
|
||||
# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp
|
||||
#
|
||||
# headscale needs a list of DERP servers that can be presented
|
||||
# to the clients.
|
||||
derp:
|
||||
server:
|
||||
# If enabled, runs the embedded DERP server and merges it into the rest of the DERP config
|
||||
# The Headscale server_url defined above MUST be using https, DERP requires TLS to be in place
|
||||
enabled: false
|
||||
|
||||
# Region ID to use for the embedded DERP server.
|
||||
# The local DERP prevails if the region ID collides with other region ID coming from
|
||||
# the regular DERP config.
|
||||
region_id: 999
|
||||
|
||||
# Region code and name are displayed in the Tailscale UI to identify a DERP region
|
||||
region_code: "headscale"
|
||||
region_name: "Headscale Embedded DERP"
|
||||
|
||||
# Listens in UDP at the configured address for STUN connections to help on NAT traversal.
|
||||
# When the embedded DERP server is enabled stun_listen_addr MUST be defined.
|
||||
#
|
||||
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
|
||||
stun_listen_addr: "0.0.0.0:3478"
|
||||
|
||||
# List of externally available DERP maps encoded in JSON
|
||||
urls:
|
||||
- https://controlplane.tailscale.com/derpmap/default
|
||||
|
||||
# Locally available DERP map files encoded in YAML
|
||||
paths:
|
||||
- derp-example.yaml
|
||||
#
|
||||
# This option is mostly interesting for people hosting
|
||||
# their own DERP servers:
|
||||
# https://tailscale.com/kb/1118/custom-derp-servers/
|
||||
#
|
||||
# paths:
|
||||
# - /etc/headscale/derp-example.yaml
|
||||
paths: []
|
||||
|
||||
# If enabled, a worker will be set up to periodically
|
||||
# refresh the given sources and update the derpmap
|
||||
# will be set up.
|
||||
auto_update_enabled: true
|
||||
|
||||
# How often should we check for updates?
|
||||
# How often should we check for DERP updates?
|
||||
update_frequency: 24h
|
||||
|
||||
# Disables the automatic check for updates on startup
|
||||
# Disables the automatic check for headscale updates on startup
|
||||
disable_check_updates: false
|
||||
|
||||
# Time before an inactive ephemeral node is deleted?
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
|
||||
# Period to check for node updates in the tailnet. A value too low will severily affect
|
||||
# CPU consumption of Headscale. A value too high (over 60s) will cause problems
|
||||
# to the nodes, as they won't get updates or keep alive messages in time.
|
||||
# In case of doubts, do not touch the default 10s.
|
||||
node_update_check_interval: 10s
|
||||
|
||||
# SQLite config
|
||||
db_type: sqlite3
|
||||
db_path: db.sqlite
|
||||
db_path: /var/lib/headscale/db.sqlite
|
||||
|
||||
# # Postgres config
|
||||
# db_type: postgres
|
||||
@@ -43,33 +121,96 @@ db_path: db.sqlite
|
||||
# db_user: foo
|
||||
# db_pass: bar
|
||||
|
||||
### TLS configuration
|
||||
#
|
||||
## Let's encrypt / ACME
|
||||
#
|
||||
# headscale supports automatically requesting and setting up
|
||||
# TLS for a domain with Let's Encrypt.
|
||||
#
|
||||
# URL to ACME directory
|
||||
acme_url: https://acme-v02.api.letsencrypt.org/directory
|
||||
|
||||
# Email to register with ACME provider
|
||||
acme_email: ""
|
||||
|
||||
# Domain name to request a TLS certificate for:
|
||||
tls_letsencrypt_hostname: ""
|
||||
tls_letsencrypt_listen: ":http"
|
||||
tls_letsencrypt_cache_dir: ".cache"
|
||||
tls_letsencrypt_challenge_type: HTTP-01
|
||||
|
||||
# Client (Tailscale/Browser) authentication mode (mTLS)
|
||||
# Acceptable values:
|
||||
# - disabled: client authentication disabled
|
||||
# - relaxed: client certificate is required but not verified
|
||||
# - enforced: client certificate is required and verified
|
||||
tls_client_auth_mode: relaxed
|
||||
|
||||
# Path to store certificates and metadata needed by
|
||||
# letsencrypt
|
||||
tls_letsencrypt_cache_dir: /var/lib/headscale/cache
|
||||
|
||||
# Type of ACME challenge to use, currently supported types:
|
||||
# HTTP-01 or TLS-ALPN-01
|
||||
# See [docs/tls.md](docs/tls.md) for more information
|
||||
tls_letsencrypt_challenge_type: HTTP-01
|
||||
# When HTTP-01 challenge is chosen, letsencrypt must set up a
|
||||
# verification endpoint, and it will be listning on:
|
||||
# :http = port 80
|
||||
tls_letsencrypt_listen: ":http"
|
||||
|
||||
## Use already defined certificates:
|
||||
tls_cert_path: ""
|
||||
tls_key_path: ""
|
||||
|
||||
log_level: info
|
||||
|
||||
# Path to a file containg ACL policies.
|
||||
# ACLs can be defined as YAML or HUJSON.
|
||||
# https://tailscale.com/kb/1018/acls/
|
||||
acl_policy_path: ""
|
||||
|
||||
## DNS
|
||||
#
|
||||
# headscale supports Tailscale's DNS configuration and MagicDNS.
|
||||
# Please have a look to their KB to better understand the concepts:
|
||||
#
|
||||
# - https://tailscale.com/kb/1054/dns/
|
||||
# - https://tailscale.com/kb/1081/magicdns/
|
||||
# - https://tailscale.com/blog/2021-09-private-dns-with-magicdns/
|
||||
#
|
||||
dns_config:
|
||||
# Upstream DNS servers
|
||||
# List of DNS servers to expose to clients.
|
||||
nameservers:
|
||||
- 1.1.1.1
|
||||
|
||||
# Split DNS (see https://tailscale.com/kb/1054/dns/),
|
||||
# list of search domains and the DNS to query for each one.
|
||||
#
|
||||
# restricted_nameservers:
|
||||
# foo.bar.com:
|
||||
# - 1.1.1.1
|
||||
# darp.headscale.net:
|
||||
# - 1.1.1.1
|
||||
# - 8.8.8.8
|
||||
|
||||
# Search domains to inject.
|
||||
domains: []
|
||||
|
||||
# Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
|
||||
# Only works if there is at least a nameserver defined.
|
||||
magic_dns: true
|
||||
|
||||
# Defines the base domain to create the hostnames for MagicDNS.
|
||||
# `base_domain` must be a FQDNs, without the trailing dot.
|
||||
# The FQDN of the hosts will be
|
||||
# `hostname.namespace.base_domain` (e.g., _myhost.mynamespace.example.com_).
|
||||
base_domain: example.com
|
||||
|
||||
# Unix socket used for the CLI to connect without authentication
|
||||
# Note: for local development, you probably want to change this to:
|
||||
# unix_socket: ./headscale.sock
|
||||
unix_socket: /var/run/headscale.sock
|
||||
unix_socket_permission: "0770"
|
||||
#
|
||||
# headscale supports experimental OpenID connect support,
|
||||
# it is still being tested and might have some bugs, please
|
||||
# help us test it.
|
||||
@@ -79,7 +220,38 @@ unix_socket: /var/run/headscale.sock
|
||||
# client_id: "your-oidc-client-id"
|
||||
# client_secret: "your-oidc-client-secret"
|
||||
#
|
||||
# # Domain map is used to map incomming users (by their email) to
|
||||
# # a namespace. The key can be a string, or regex.
|
||||
# domain_map:
|
||||
# ".*": default-namespace
|
||||
# Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
|
||||
# parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".
|
||||
#
|
||||
# scope: ["openid", "profile", "email", "custom"]
|
||||
# extra_params:
|
||||
# domain_hint: example.com
|
||||
#
|
||||
# List allowed principal domains and/or users. If an authenticated user's domain is not in this list, the
|
||||
# authentication request will be rejected.
|
||||
#
|
||||
# allowed_domains:
|
||||
# - example.com
|
||||
# allowed_users:
|
||||
# - alice@example.com
|
||||
#
|
||||
# If `strip_email_domain` is set to `true`, the domain part of the username email address will be removed.
|
||||
# This will transform `first-name.last-name@example.com` to the namespace `first-name.last-name`
|
||||
# If `strip_email_domain` is set to `false` the domain part will NOT be removed resulting to the following
|
||||
# namespace: `first-name.last-name.example.com`
|
||||
#
|
||||
# strip_email_domain: true
|
||||
|
||||
# Logtail configuration
|
||||
# Logtail is Tailscales logging and auditing infrastructure, it allows the control panel
|
||||
# to instruct tailscale nodes to log their activity to a remote server.
|
||||
logtail:
|
||||
# Enable logtail for this headscales clients.
|
||||
# As there is currently no support for overriding the log server in headscale, this is
|
||||
# disabled by default. Enabling this will make your clients send logs to Tailscale Inc.
|
||||
enabled: false
|
||||
|
||||
# Enabling this option makes devices prefer a random port for WireGuard traffic over the
|
||||
# default static port 41641. This option is intended as a workaround for some buggy
|
||||
# firewall devices. See https://tailscale.com/kb/1181/firewalls/ for more information.
|
||||
randomize_client_port: false
|
||||
|
543
config.go
Normal file
543
config.go
Normal file
@@ -0,0 +1,543 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/viper"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
)
|
||||
|
||||
const (
|
||||
tlsALPN01ChallengeType = "TLS-ALPN-01"
|
||||
http01ChallengeType = "HTTP-01"
|
||||
)
|
||||
|
||||
// Config contains the initial Headscale configuration.
|
||||
type Config struct {
|
||||
ServerURL string
|
||||
Addr string
|
||||
MetricsAddr string
|
||||
GRPCAddr string
|
||||
GRPCAllowInsecure bool
|
||||
EphemeralNodeInactivityTimeout time.Duration
|
||||
NodeUpdateCheckInterval time.Duration
|
||||
IPPrefixes []netaddr.IPPrefix
|
||||
PrivateKeyPath string
|
||||
BaseDomain string
|
||||
LogLevel zerolog.Level
|
||||
DisableUpdateCheck bool
|
||||
|
||||
DERP DERPConfig
|
||||
|
||||
DBtype string
|
||||
DBpath string
|
||||
DBhost string
|
||||
DBport int
|
||||
DBname string
|
||||
DBuser string
|
||||
DBpass string
|
||||
|
||||
TLS TLSConfig
|
||||
|
||||
ACMEURL string
|
||||
ACMEEmail string
|
||||
|
||||
DNSConfig *tailcfg.DNSConfig
|
||||
|
||||
UnixSocket string
|
||||
UnixSocketPermission fs.FileMode
|
||||
|
||||
OIDC OIDCConfig
|
||||
|
||||
LogTail LogTailConfig
|
||||
RandomizeClientPort bool
|
||||
|
||||
CLI CLIConfig
|
||||
|
||||
ACL ACLConfig
|
||||
}
|
||||
|
||||
type TLSConfig struct {
|
||||
CertPath string
|
||||
KeyPath string
|
||||
ClientAuthMode tls.ClientAuthType
|
||||
|
||||
LetsEncrypt LetsEncryptConfig
|
||||
}
|
||||
|
||||
type LetsEncryptConfig struct {
|
||||
Listen string
|
||||
Hostname string
|
||||
CacheDir string
|
||||
ChallengeType string
|
||||
}
|
||||
|
||||
type OIDCConfig struct {
|
||||
Issuer string
|
||||
ClientID string
|
||||
ClientSecret string
|
||||
Scope []string
|
||||
ExtraParams map[string]string
|
||||
AllowedDomains []string
|
||||
AllowedUsers []string
|
||||
StripEmaildomain bool
|
||||
}
|
||||
|
||||
type DERPConfig struct {
|
||||
ServerEnabled bool
|
||||
ServerRegionID int
|
||||
ServerRegionCode string
|
||||
ServerRegionName string
|
||||
STUNAddr string
|
||||
URLs []url.URL
|
||||
Paths []string
|
||||
AutoUpdate bool
|
||||
UpdateFrequency time.Duration
|
||||
}
|
||||
|
||||
type LogTailConfig struct {
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
type CLIConfig struct {
|
||||
Address string
|
||||
APIKey string
|
||||
Timeout time.Duration
|
||||
Insecure bool
|
||||
}
|
||||
|
||||
type ACLConfig struct {
|
||||
PolicyPath string
|
||||
}
|
||||
|
||||
func LoadConfig(path string, isFile bool) error {
|
||||
if isFile {
|
||||
viper.SetConfigFile(path)
|
||||
} else {
|
||||
viper.SetConfigName("config")
|
||||
if path == "" {
|
||||
viper.AddConfigPath("/etc/headscale/")
|
||||
viper.AddConfigPath("$HOME/.headscale")
|
||||
viper.AddConfigPath(".")
|
||||
} else {
|
||||
// For testing
|
||||
viper.AddConfigPath(path)
|
||||
}
|
||||
}
|
||||
|
||||
viper.SetEnvPrefix("headscale")
|
||||
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||
viper.AutomaticEnv()
|
||||
|
||||
viper.SetDefault("tls_letsencrypt_cache_dir", "/var/www/.cache")
|
||||
viper.SetDefault("tls_letsencrypt_challenge_type", http01ChallengeType)
|
||||
viper.SetDefault("tls_client_auth_mode", "relaxed")
|
||||
|
||||
viper.SetDefault("log_level", "info")
|
||||
|
||||
viper.SetDefault("dns_config", nil)
|
||||
|
||||
viper.SetDefault("derp.server.enabled", false)
|
||||
viper.SetDefault("derp.server.stun.enabled", true)
|
||||
|
||||
viper.SetDefault("unix_socket", "/var/run/headscale.sock")
|
||||
viper.SetDefault("unix_socket_permission", "0o770")
|
||||
|
||||
viper.SetDefault("grpc_listen_addr", ":50443")
|
||||
viper.SetDefault("grpc_allow_insecure", false)
|
||||
|
||||
viper.SetDefault("cli.timeout", "5s")
|
||||
viper.SetDefault("cli.insecure", false)
|
||||
|
||||
viper.SetDefault("oidc.scope", []string{oidc.ScopeOpenID, "profile", "email"})
|
||||
viper.SetDefault("oidc.strip_email_domain", true)
|
||||
|
||||
viper.SetDefault("logtail.enabled", false)
|
||||
viper.SetDefault("randomize_client_port", false)
|
||||
|
||||
viper.SetDefault("ephemeral_node_inactivity_timeout", "120s")
|
||||
|
||||
viper.SetDefault("node_update_check_interval", "10s")
|
||||
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to read configuration from disk")
|
||||
|
||||
return fmt.Errorf("fatal error reading config file: %w", err)
|
||||
}
|
||||
|
||||
// Collect any validation errors and return them all at once
|
||||
var errorText string
|
||||
if (viper.GetString("tls_letsencrypt_hostname") != "") &&
|
||||
((viper.GetString("tls_cert_path") != "") || (viper.GetString("tls_key_path") != "")) {
|
||||
errorText += "Fatal config error: set either tls_letsencrypt_hostname or tls_cert_path/tls_key_path, not both\n"
|
||||
}
|
||||
|
||||
if (viper.GetString("tls_letsencrypt_hostname") != "") &&
|
||||
(viper.GetString("tls_letsencrypt_challenge_type") == tlsALPN01ChallengeType) &&
|
||||
(!strings.HasSuffix(viper.GetString("listen_addr"), ":443")) {
|
||||
// this is only a warning because there could be something sitting in front of headscale that redirects the traffic (e.g. an iptables rule)
|
||||
log.Warn().
|
||||
Msg("Warning: when using tls_letsencrypt_hostname with TLS-ALPN-01 as challenge type, headscale must be reachable on port 443, i.e. listen_addr should probably end in :443")
|
||||
}
|
||||
|
||||
if (viper.GetString("tls_letsencrypt_challenge_type") != http01ChallengeType) &&
|
||||
(viper.GetString("tls_letsencrypt_challenge_type") != tlsALPN01ChallengeType) {
|
||||
errorText += "Fatal config error: the only supported values for tls_letsencrypt_challenge_type are HTTP-01 and TLS-ALPN-01\n"
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(viper.GetString("server_url"), "http://") &&
|
||||
!strings.HasPrefix(viper.GetString("server_url"), "https://") {
|
||||
errorText += "Fatal config error: server_url must start with https:// or http://\n"
|
||||
}
|
||||
|
||||
_, authModeValid := LookupTLSClientAuthMode(
|
||||
viper.GetString("tls_client_auth_mode"),
|
||||
)
|
||||
|
||||
if !authModeValid {
|
||||
errorText += fmt.Sprintf(
|
||||
"Invalid tls_client_auth_mode supplied: %s. Accepted values: %s, %s, %s.",
|
||||
viper.GetString("tls_client_auth_mode"),
|
||||
DisabledClientAuth,
|
||||
RelaxedClientAuth,
|
||||
EnforcedClientAuth)
|
||||
}
|
||||
|
||||
// Minimum inactivity time out is keepalive timeout (60s) plus a few seconds
|
||||
// to avoid races
|
||||
minInactivityTimeout, _ := time.ParseDuration("65s")
|
||||
if viper.GetDuration("ephemeral_node_inactivity_timeout") <= minInactivityTimeout {
|
||||
errorText += fmt.Sprintf(
|
||||
"Fatal config error: ephemeral_node_inactivity_timeout (%s) is set too low, must be more than %s",
|
||||
viper.GetString("ephemeral_node_inactivity_timeout"),
|
||||
minInactivityTimeout,
|
||||
)
|
||||
}
|
||||
|
||||
maxNodeUpdateCheckInterval, _ := time.ParseDuration("60s")
|
||||
if viper.GetDuration("node_update_check_interval") > maxNodeUpdateCheckInterval {
|
||||
errorText += fmt.Sprintf(
|
||||
"Fatal config error: node_update_check_interval (%s) is set too high, must be less than %s",
|
||||
viper.GetString("node_update_check_interval"),
|
||||
maxNodeUpdateCheckInterval,
|
||||
)
|
||||
}
|
||||
|
||||
if errorText != "" {
|
||||
//nolint
|
||||
return errors.New(strings.TrimSuffix(errorText, "\n"))
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func GetTLSConfig() TLSConfig {
|
||||
tlsClientAuthMode, _ := LookupTLSClientAuthMode(
|
||||
viper.GetString("tls_client_auth_mode"),
|
||||
)
|
||||
|
||||
return TLSConfig{
|
||||
LetsEncrypt: LetsEncryptConfig{
|
||||
Hostname: viper.GetString("tls_letsencrypt_hostname"),
|
||||
Listen: viper.GetString("tls_letsencrypt_listen"),
|
||||
CacheDir: AbsolutePathFromConfigPath(
|
||||
viper.GetString("tls_letsencrypt_cache_dir"),
|
||||
),
|
||||
ChallengeType: viper.GetString("tls_letsencrypt_challenge_type"),
|
||||
},
|
||||
CertPath: AbsolutePathFromConfigPath(
|
||||
viper.GetString("tls_cert_path"),
|
||||
),
|
||||
KeyPath: AbsolutePathFromConfigPath(
|
||||
viper.GetString("tls_key_path"),
|
||||
),
|
||||
ClientAuthMode: tlsClientAuthMode,
|
||||
}
|
||||
}
|
||||
|
||||
func GetDERPConfig() DERPConfig {
|
||||
serverEnabled := viper.GetBool("derp.server.enabled")
|
||||
serverRegionID := viper.GetInt("derp.server.region_id")
|
||||
serverRegionCode := viper.GetString("derp.server.region_code")
|
||||
serverRegionName := viper.GetString("derp.server.region_name")
|
||||
stunAddr := viper.GetString("derp.server.stun_listen_addr")
|
||||
|
||||
if serverEnabled && stunAddr == "" {
|
||||
log.Fatal().
|
||||
Msg("derp.server.stun_listen_addr must be set if derp.server.enabled is true")
|
||||
}
|
||||
|
||||
urlStrs := viper.GetStringSlice("derp.urls")
|
||||
|
||||
urls := make([]url.URL, len(urlStrs))
|
||||
for index, urlStr := range urlStrs {
|
||||
urlAddr, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("url", urlStr).
|
||||
Err(err).
|
||||
Msg("Failed to parse url, ignoring...")
|
||||
}
|
||||
|
||||
urls[index] = *urlAddr
|
||||
}
|
||||
|
||||
paths := viper.GetStringSlice("derp.paths")
|
||||
|
||||
autoUpdate := viper.GetBool("derp.auto_update_enabled")
|
||||
updateFrequency := viper.GetDuration("derp.update_frequency")
|
||||
|
||||
return DERPConfig{
|
||||
ServerEnabled: serverEnabled,
|
||||
ServerRegionID: serverRegionID,
|
||||
ServerRegionCode: serverRegionCode,
|
||||
ServerRegionName: serverRegionName,
|
||||
STUNAddr: stunAddr,
|
||||
URLs: urls,
|
||||
Paths: paths,
|
||||
AutoUpdate: autoUpdate,
|
||||
UpdateFrequency: updateFrequency,
|
||||
}
|
||||
}
|
||||
|
||||
func GetLogTailConfig() LogTailConfig {
|
||||
enabled := viper.GetBool("logtail.enabled")
|
||||
|
||||
return LogTailConfig{
|
||||
Enabled: enabled,
|
||||
}
|
||||
}
|
||||
|
||||
func GetACLConfig() ACLConfig {
|
||||
policyPath := viper.GetString("acl_policy_path")
|
||||
|
||||
return ACLConfig{
|
||||
PolicyPath: policyPath,
|
||||
}
|
||||
}
|
||||
|
||||
func GetDNSConfig() (*tailcfg.DNSConfig, string) {
|
||||
if viper.IsSet("dns_config") {
|
||||
dnsConfig := &tailcfg.DNSConfig{}
|
||||
|
||||
if viper.IsSet("dns_config.nameservers") {
|
||||
nameserversStr := viper.GetStringSlice("dns_config.nameservers")
|
||||
|
||||
nameservers := make([]netaddr.IP, len(nameserversStr))
|
||||
resolvers := make([]*dnstype.Resolver, len(nameserversStr))
|
||||
|
||||
for index, nameserverStr := range nameserversStr {
|
||||
nameserver, err := netaddr.ParseIP(nameserverStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "getDNSConfig").
|
||||
Err(err).
|
||||
Msgf("Could not parse nameserver IP: %s", nameserverStr)
|
||||
}
|
||||
|
||||
nameservers[index] = nameserver
|
||||
resolvers[index] = &dnstype.Resolver{
|
||||
Addr: nameserver.String(),
|
||||
}
|
||||
}
|
||||
|
||||
dnsConfig.Nameservers = nameservers
|
||||
dnsConfig.Resolvers = resolvers
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.restricted_nameservers") {
|
||||
if len(dnsConfig.Nameservers) > 0 {
|
||||
dnsConfig.Routes = make(map[string][]*dnstype.Resolver)
|
||||
restrictedDNS := viper.GetStringMapStringSlice(
|
||||
"dns_config.restricted_nameservers",
|
||||
)
|
||||
for domain, restrictedNameservers := range restrictedDNS {
|
||||
restrictedResolvers := make(
|
||||
[]*dnstype.Resolver,
|
||||
len(restrictedNameservers),
|
||||
)
|
||||
for index, nameserverStr := range restrictedNameservers {
|
||||
nameserver, err := netaddr.ParseIP(nameserverStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "getDNSConfig").
|
||||
Err(err).
|
||||
Msgf("Could not parse restricted nameserver IP: %s", nameserverStr)
|
||||
}
|
||||
restrictedResolvers[index] = &dnstype.Resolver{
|
||||
Addr: nameserver.String(),
|
||||
}
|
||||
}
|
||||
dnsConfig.Routes[domain] = restrictedResolvers
|
||||
}
|
||||
} else {
|
||||
log.Warn().
|
||||
Msg("Warning: dns_config.restricted_nameservers is set, but no nameservers are configured. Ignoring restricted_nameservers.")
|
||||
}
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.domains") {
|
||||
dnsConfig.Domains = viper.GetStringSlice("dns_config.domains")
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.magic_dns") {
|
||||
magicDNS := viper.GetBool("dns_config.magic_dns")
|
||||
if len(dnsConfig.Nameservers) > 0 {
|
||||
dnsConfig.Proxied = magicDNS
|
||||
} else if magicDNS {
|
||||
log.Warn().
|
||||
Msg("Warning: dns_config.magic_dns is set, but no nameservers are configured. Ignoring magic_dns.")
|
||||
}
|
||||
}
|
||||
|
||||
var baseDomain string
|
||||
if viper.IsSet("dns_config.base_domain") {
|
||||
baseDomain = viper.GetString("dns_config.base_domain")
|
||||
} else {
|
||||
baseDomain = "headscale.net" // does not really matter when MagicDNS is not enabled
|
||||
}
|
||||
|
||||
return dnsConfig, baseDomain
|
||||
}
|
||||
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
func GetHeadscaleConfig() (*Config, error) {
|
||||
dnsConfig, baseDomain := GetDNSConfig()
|
||||
derpConfig := GetDERPConfig()
|
||||
logConfig := GetLogTailConfig()
|
||||
randomizeClientPort := viper.GetBool("randomize_client_port")
|
||||
|
||||
configuredPrefixes := viper.GetStringSlice("ip_prefixes")
|
||||
parsedPrefixes := make([]netaddr.IPPrefix, 0, len(configuredPrefixes)+1)
|
||||
|
||||
logLevelStr := viper.GetString("log_level")
|
||||
logLevel, err := zerolog.ParseLevel(logLevelStr)
|
||||
if err != nil {
|
||||
logLevel = zerolog.DebugLevel
|
||||
}
|
||||
|
||||
legacyPrefixField := viper.GetString("ip_prefix")
|
||||
if len(legacyPrefixField) > 0 {
|
||||
log.
|
||||
Warn().
|
||||
Msgf(
|
||||
"%s, %s",
|
||||
"use of 'ip_prefix' for configuration is deprecated",
|
||||
"please see 'ip_prefixes' in the shipped example.",
|
||||
)
|
||||
legacyPrefix, err := netaddr.ParseIPPrefix(legacyPrefixField)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to parse ip_prefix: %w", err))
|
||||
}
|
||||
parsedPrefixes = append(parsedPrefixes, legacyPrefix)
|
||||
}
|
||||
|
||||
for i, prefixInConfig := range configuredPrefixes {
|
||||
prefix, err := netaddr.ParseIPPrefix(prefixInConfig)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to parse ip_prefixes[%d]: %w", i, err))
|
||||
}
|
||||
parsedPrefixes = append(parsedPrefixes, prefix)
|
||||
}
|
||||
|
||||
prefixes := make([]netaddr.IPPrefix, 0, len(parsedPrefixes))
|
||||
{
|
||||
// dedup
|
||||
normalizedPrefixes := make(map[string]int, len(parsedPrefixes))
|
||||
for i, p := range parsedPrefixes {
|
||||
normalized, _ := p.Range().Prefix()
|
||||
normalizedPrefixes[normalized.String()] = i
|
||||
}
|
||||
|
||||
// convert back to list
|
||||
for _, i := range normalizedPrefixes {
|
||||
prefixes = append(prefixes, parsedPrefixes[i])
|
||||
}
|
||||
}
|
||||
|
||||
if len(prefixes) < 1 {
|
||||
prefixes = append(prefixes, netaddr.MustParseIPPrefix("100.64.0.0/10"))
|
||||
log.Warn().
|
||||
Msgf("'ip_prefixes' not configured, falling back to default: %v", prefixes)
|
||||
}
|
||||
|
||||
return &Config{
|
||||
ServerURL: viper.GetString("server_url"),
|
||||
Addr: viper.GetString("listen_addr"),
|
||||
MetricsAddr: viper.GetString("metrics_listen_addr"),
|
||||
GRPCAddr: viper.GetString("grpc_listen_addr"),
|
||||
GRPCAllowInsecure: viper.GetBool("grpc_allow_insecure"),
|
||||
DisableUpdateCheck: viper.GetBool("disable_check_updates"),
|
||||
LogLevel: logLevel,
|
||||
|
||||
IPPrefixes: prefixes,
|
||||
PrivateKeyPath: AbsolutePathFromConfigPath(
|
||||
viper.GetString("private_key_path"),
|
||||
),
|
||||
BaseDomain: baseDomain,
|
||||
|
||||
DERP: derpConfig,
|
||||
|
||||
EphemeralNodeInactivityTimeout: viper.GetDuration(
|
||||
"ephemeral_node_inactivity_timeout",
|
||||
),
|
||||
|
||||
NodeUpdateCheckInterval: viper.GetDuration(
|
||||
"node_update_check_interval",
|
||||
),
|
||||
|
||||
DBtype: viper.GetString("db_type"),
|
||||
DBpath: AbsolutePathFromConfigPath(viper.GetString("db_path")),
|
||||
DBhost: viper.GetString("db_host"),
|
||||
DBport: viper.GetInt("db_port"),
|
||||
DBname: viper.GetString("db_name"),
|
||||
DBuser: viper.GetString("db_user"),
|
||||
DBpass: viper.GetString("db_pass"),
|
||||
|
||||
TLS: GetTLSConfig(),
|
||||
|
||||
DNSConfig: dnsConfig,
|
||||
|
||||
ACMEEmail: viper.GetString("acme_email"),
|
||||
ACMEURL: viper.GetString("acme_url"),
|
||||
|
||||
UnixSocket: viper.GetString("unix_socket"),
|
||||
UnixSocketPermission: GetFileMode("unix_socket_permission"),
|
||||
|
||||
OIDC: OIDCConfig{
|
||||
Issuer: viper.GetString("oidc.issuer"),
|
||||
ClientID: viper.GetString("oidc.client_id"),
|
||||
ClientSecret: viper.GetString("oidc.client_secret"),
|
||||
Scope: viper.GetStringSlice("oidc.scope"),
|
||||
ExtraParams: viper.GetStringMapString("oidc.extra_params"),
|
||||
AllowedDomains: viper.GetStringSlice("oidc.allowed_domains"),
|
||||
AllowedUsers: viper.GetStringSlice("oidc.allowed_users"),
|
||||
StripEmaildomain: viper.GetBool("oidc.strip_email_domain"),
|
||||
},
|
||||
|
||||
LogTail: logConfig,
|
||||
RandomizeClientPort: randomizeClientPort,
|
||||
|
||||
CLI: CLIConfig{
|
||||
Address: viper.GetString("cli.address"),
|
||||
APIKey: viper.GetString("cli.api_key"),
|
||||
Timeout: viper.GetDuration("cli.timeout"),
|
||||
Insecure: viper.GetBool("cli.insecure"),
|
||||
},
|
||||
|
||||
ACL: GetACLConfig(),
|
||||
}, nil
|
||||
}
|
200
db.go
200
db.go
@@ -1,12 +1,20 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/glebarez/sqlite"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/driver/sqlite"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -28,26 +36,104 @@ func (h *Headscale) initDB() error {
|
||||
h.db = db
|
||||
|
||||
if h.dbType == Postgres {
|
||||
db.Exec("create extension if not exists \"uuid-ossp\";")
|
||||
db.Exec(`create extension if not exists "uuid-ossp";`)
|
||||
}
|
||||
|
||||
_ = db.Migrator().RenameColumn(&Machine{}, "ip_address", "ip_addresses")
|
||||
_ = db.Migrator().RenameColumn(&Machine{}, "name", "hostname")
|
||||
|
||||
// GivenName is used as the primary source of DNS names, make sure
|
||||
// the field is populated and normalized if it was not when the
|
||||
// machine was registered.
|
||||
_ = db.Migrator().RenameColumn(&Machine{}, "nickname", "given_name")
|
||||
|
||||
// If the Machine table has a column for registered,
|
||||
// find all occourences of "false" and drop them. Then
|
||||
// remove the column.
|
||||
if db.Migrator().HasColumn(&Machine{}, "registered") {
|
||||
log.Info().
|
||||
Msg(`Database has legacy "registered" column in machine, removing...`)
|
||||
|
||||
machines := Machines{}
|
||||
if err := h.db.Not("registered").Find(&machines).Error; err != nil {
|
||||
log.Error().Err(err).Msg("Error accessing db")
|
||||
}
|
||||
|
||||
for _, machine := range machines {
|
||||
log.Info().
|
||||
Str("machine", machine.Hostname).
|
||||
Str("machine_key", machine.MachineKey).
|
||||
Msg("Deleting unregistered machine")
|
||||
if err := h.db.Delete(&Machine{}, machine.ID).Error; err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("machine", machine.Hostname).
|
||||
Str("machine_key", machine.MachineKey).
|
||||
Msg("Error deleting unregistered machine")
|
||||
}
|
||||
}
|
||||
|
||||
err := db.Migrator().DropColumn(&Machine{}, "registered")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error dropping registered column")
|
||||
}
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&Machine{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if db.Migrator().HasColumn(&Machine{}, "given_name") {
|
||||
machines := Machines{}
|
||||
if err := h.db.Find(&machines).Error; err != nil {
|
||||
log.Error().Err(err).Msg("Error accessing db")
|
||||
}
|
||||
|
||||
for item, machine := range machines {
|
||||
if machine.GivenName == "" {
|
||||
normalizedHostname, err := NormalizeToFQDNRules(
|
||||
machine.Hostname,
|
||||
h.cfg.OIDC.StripEmaildomain,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("hostname", machine.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed to normalize machine hostname in DB migration")
|
||||
}
|
||||
|
||||
err = h.RenameMachine(&machines[item], normalizedHostname)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("hostname", machine.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed to save normalized machine name in DB migration")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&KV{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&Namespace{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&PreAuthKey{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&SharedMachine{})
|
||||
_ = db.Migrator().DropTable("shared_machines")
|
||||
|
||||
err = db.AutoMigrate(&APIKey{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -70,10 +156,24 @@ func (h *Headscale) openDB() (*gorm.DB, error) {
|
||||
|
||||
switch h.dbType {
|
||||
case Sqlite:
|
||||
db, err = gorm.Open(sqlite.Open(h.dbString), &gorm.Config{
|
||||
DisableForeignKeyConstraintWhenMigrating: true,
|
||||
Logger: log,
|
||||
})
|
||||
db, err = gorm.Open(
|
||||
sqlite.Open(h.dbString+"?_synchronous=1&_journal_mode=WAL"),
|
||||
&gorm.Config{
|
||||
DisableForeignKeyConstraintWhenMigrating: true,
|
||||
Logger: log,
|
||||
},
|
||||
)
|
||||
|
||||
db.Exec("PRAGMA foreign_keys=ON")
|
||||
|
||||
// The pure Go SQLite library does not handle locking in
|
||||
// the same way as the C based one and we cant use the gorm
|
||||
// connection pool as of 2022/02/23.
|
||||
sqlDB, _ := db.DB()
|
||||
sqlDB.SetMaxIdleConns(1)
|
||||
sqlDB.SetMaxOpenConns(1)
|
||||
sqlDB.SetConnMaxIdleTime(time.Hour)
|
||||
|
||||
case Postgres:
|
||||
db, err = gorm.Open(postgres.Open(h.dbString), &gorm.Config{
|
||||
DisableForeignKeyConstraintWhenMigrating: true,
|
||||
@@ -114,7 +214,91 @@ func (h *Headscale) setValue(key string, value string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
h.db.Create(keyValue)
|
||||
if err := h.db.Create(keyValue).Error; err != nil {
|
||||
return fmt.Errorf("failed to create key value pair in the database: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Headscale) pingDB() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||
defer cancel()
|
||||
db, err := h.db.DB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return db.PingContext(ctx)
|
||||
}
|
||||
|
||||
// This is a "wrapper" type around tailscales
|
||||
// Hostinfo to allow us to add database "serialization"
|
||||
// methods. This allows us to use a typed values throughout
|
||||
// the code and not have to marshal/unmarshal and error
|
||||
// check all over the code.
|
||||
type HostInfo tailcfg.Hostinfo
|
||||
|
||||
func (hi *HostInfo) Scan(destination interface{}) error {
|
||||
switch value := destination.(type) {
|
||||
case []byte:
|
||||
return json.Unmarshal(value, hi)
|
||||
|
||||
case string:
|
||||
return json.Unmarshal([]byte(value), hi)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("%w: unexpected data type %T", errMachineAddressesInvalid, destination)
|
||||
}
|
||||
}
|
||||
|
||||
// Value return json value, implement driver.Valuer interface.
|
||||
func (hi HostInfo) Value() (driver.Value, error) {
|
||||
bytes, err := json.Marshal(hi)
|
||||
|
||||
return string(bytes), err
|
||||
}
|
||||
|
||||
type IPPrefixes []netaddr.IPPrefix
|
||||
|
||||
func (i *IPPrefixes) Scan(destination interface{}) error {
|
||||
switch value := destination.(type) {
|
||||
case []byte:
|
||||
return json.Unmarshal(value, i)
|
||||
|
||||
case string:
|
||||
return json.Unmarshal([]byte(value), i)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("%w: unexpected data type %T", errMachineAddressesInvalid, destination)
|
||||
}
|
||||
}
|
||||
|
||||
// Value return json value, implement driver.Valuer interface.
|
||||
func (i IPPrefixes) Value() (driver.Value, error) {
|
||||
bytes, err := json.Marshal(i)
|
||||
|
||||
return string(bytes), err
|
||||
}
|
||||
|
||||
type StringList []string
|
||||
|
||||
func (i *StringList) Scan(destination interface{}) error {
|
||||
switch value := destination.(type) {
|
||||
case []byte:
|
||||
return json.Unmarshal(value, i)
|
||||
|
||||
case string:
|
||||
return json.Unmarshal([]byte(value), i)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("%w: unexpected data type %T", errMachineAddressesInvalid, destination)
|
||||
}
|
||||
}
|
||||
|
||||
// Value return json value, implement driver.Valuer interface.
|
||||
func (i StringList) Value() (driver.Value, error) {
|
||||
bytes, err := json.Marshal(i)
|
||||
|
||||
return string(bytes), err
|
||||
}
|
||||
|
@@ -12,4 +12,4 @@ regions:
|
||||
ipv6: "2604:a880:400:d1::828:b001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
derpport: 0
|
||||
|
12
derp.go
12
derp.go
@@ -148,17 +148,11 @@ func (h *Headscale) scheduledDERPMapUpdateWorker(cancelChan <-chan struct{}) {
|
||||
case <-ticker.C:
|
||||
log.Info().Msg("Fetching DERPMap updates")
|
||||
h.DERPMap = GetDERPMap(h.cfg.DERP)
|
||||
|
||||
namespaces, err := h.ListNamespaces()
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Msg("Failed to fetch namespaces")
|
||||
if h.cfg.DERP.ServerEnabled {
|
||||
h.DERPMap.Regions[h.DERPServer.region.RegionID] = &h.DERPServer.region
|
||||
}
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
h.setLastStateChangeToNow(namespace.Name)
|
||||
}
|
||||
h.setLastStateChangeToNow()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
287
derp_server.go
Normal file
287
derp_server.go
Normal file
@@ -0,0 +1,287 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"tailscale.com/derp"
|
||||
"tailscale.com/net/stun"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
// fastStartHeader is the header (with value "1") that signals to the HTTP
|
||||
// server that the DERP HTTP client does not want the HTTP 101 response
|
||||
// headers and it will begin writing & reading the DERP protocol immediately
|
||||
// following its HTTP request.
|
||||
const fastStartHeader = "Derp-Fast-Start"
|
||||
|
||||
type DERPServer struct {
|
||||
tailscaleDERP *derp.Server
|
||||
region tailcfg.DERPRegion
|
||||
}
|
||||
|
||||
func (h *Headscale) NewDERPServer() (*DERPServer, error) {
|
||||
log.Trace().Caller().Msg("Creating new embedded DERP server")
|
||||
server := derp.NewServer(key.NodePrivate(*h.privateKey), log.Info().Msgf)
|
||||
region, err := h.generateRegionLocalDERP()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &DERPServer{server, region}, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) generateRegionLocalDERP() (tailcfg.DERPRegion, error) {
|
||||
serverURL, err := url.Parse(h.cfg.ServerURL)
|
||||
if err != nil {
|
||||
return tailcfg.DERPRegion{}, err
|
||||
}
|
||||
var host string
|
||||
var port int
|
||||
host, portStr, err := net.SplitHostPort(serverURL.Host)
|
||||
if err != nil {
|
||||
if serverURL.Scheme == "https" {
|
||||
host = serverURL.Host
|
||||
port = 443
|
||||
} else {
|
||||
host = serverURL.Host
|
||||
port = 80
|
||||
}
|
||||
} else {
|
||||
port, err = strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return tailcfg.DERPRegion{}, err
|
||||
}
|
||||
}
|
||||
|
||||
localDERPregion := tailcfg.DERPRegion{
|
||||
RegionID: h.cfg.DERP.ServerRegionID,
|
||||
RegionCode: h.cfg.DERP.ServerRegionCode,
|
||||
RegionName: h.cfg.DERP.ServerRegionName,
|
||||
Avoid: false,
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{
|
||||
Name: fmt.Sprintf("%d", h.cfg.DERP.ServerRegionID),
|
||||
RegionID: h.cfg.DERP.ServerRegionID,
|
||||
HostName: host,
|
||||
DERPPort: port,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, portSTUNStr, err := net.SplitHostPort(h.cfg.DERP.STUNAddr)
|
||||
if err != nil {
|
||||
return tailcfg.DERPRegion{}, err
|
||||
}
|
||||
portSTUN, err := strconv.Atoi(portSTUNStr)
|
||||
if err != nil {
|
||||
return tailcfg.DERPRegion{}, err
|
||||
}
|
||||
localDERPregion.Nodes[0].STUNPort = portSTUN
|
||||
|
||||
log.Info().Caller().Msgf("DERP region: %+v", localDERPregion)
|
||||
|
||||
return localDERPregion, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) DERPHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
log.Trace().Caller().Msgf("/derp request from %v", req.RemoteAddr)
|
||||
up := strings.ToLower(req.Header.Get("Upgrade"))
|
||||
if up != "websocket" && up != "derp" {
|
||||
if up != "" {
|
||||
log.Warn().Caller().Msgf("Weird websockets connection upgrade: %q", up)
|
||||
}
|
||||
writer.Header().Set("Content-Type", "text/plain")
|
||||
writer.WriteHeader(http.StatusUpgradeRequired)
|
||||
_, err := writer.Write([]byte("DERP requires connection upgrade"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
fastStart := req.Header.Get(fastStartHeader) == "1"
|
||||
|
||||
hijacker, ok := writer.(http.Hijacker)
|
||||
if !ok {
|
||||
log.Error().Caller().Msg("DERP requires Hijacker interface from Gin")
|
||||
writer.Header().Set("Content-Type", "text/plain")
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
_, err := writer.Write([]byte("HTTP does not support general TCP support"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
netConn, conn, err := hijacker.Hijack()
|
||||
if err != nil {
|
||||
log.Error().Caller().Err(err).Msgf("Hijack failed")
|
||||
writer.Header().Set("Content-Type", "text/plain")
|
||||
writer.WriteHeader(http.StatusInternalServerError)
|
||||
_, err = writer.Write([]byte("HTTP does not support general TCP support"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
log.Trace().Caller().Msgf("Hijacked connection from %v", req.RemoteAddr)
|
||||
|
||||
if !fastStart {
|
||||
pubKey := h.privateKey.Public()
|
||||
pubKeyStr := pubKey.UntypedHexString() // nolint
|
||||
fmt.Fprintf(conn, "HTTP/1.1 101 Switching Protocols\r\n"+
|
||||
"Upgrade: DERP\r\n"+
|
||||
"Connection: Upgrade\r\n"+
|
||||
"Derp-Version: %v\r\n"+
|
||||
"Derp-Public-Key: %s\r\n\r\n",
|
||||
derp.ProtocolVersion,
|
||||
pubKeyStr)
|
||||
}
|
||||
|
||||
h.DERPServer.tailscaleDERP.Accept(netConn, conn, netConn.RemoteAddr().String())
|
||||
}
|
||||
|
||||
// DERPProbeHandler is the endpoint that js/wasm clients hit to measure
|
||||
// DERP latency, since they can't do UDP STUN queries.
|
||||
func (h *Headscale) DERPProbeHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
switch req.Method {
|
||||
case "HEAD", "GET":
|
||||
writer.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
default:
|
||||
writer.WriteHeader(http.StatusMethodNotAllowed)
|
||||
_, err := writer.Write([]byte("bogus probe method"))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DERPBootstrapDNSHandler implements the /bootsrap-dns endpoint
|
||||
// Described in https://github.com/tailscale/tailscale/issues/1405,
|
||||
// this endpoint provides a way to help a client when it fails to start up
|
||||
// because its DNS are broken.
|
||||
// The initial implementation is here https://github.com/tailscale/tailscale/pull/1406
|
||||
// They have a cache, but not clear if that is really necessary at Headscale, uh, scale.
|
||||
// An example implementation is found here https://derp.tailscale.com/bootstrap-dns
|
||||
func (h *Headscale) DERPBootstrapDNSHandler(
|
||||
writer http.ResponseWriter,
|
||||
req *http.Request,
|
||||
) {
|
||||
dnsEntries := make(map[string][]net.IP)
|
||||
|
||||
resolvCtx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer cancel()
|
||||
var resolver net.Resolver
|
||||
for _, region := range h.DERPMap.Regions {
|
||||
for _, node := range region.Nodes { // we don't care if we override some nodes
|
||||
addrs, err := resolver.LookupIP(resolvCtx, "ip", node.HostName)
|
||||
if err != nil {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msgf("bootstrap DNS lookup failed %q", node.HostName)
|
||||
|
||||
continue
|
||||
}
|
||||
dnsEntries[node.HostName] = addrs
|
||||
}
|
||||
}
|
||||
writer.Header().Set("Content-Type", "application/json")
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
err := json.NewEncoder(writer).Encode(dnsEntries)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
// ServeSTUN starts a STUN server on the configured addr.
|
||||
func (h *Headscale) ServeSTUN() {
|
||||
packetConn, err := net.ListenPacket("udp", h.cfg.DERP.STUNAddr)
|
||||
if err != nil {
|
||||
log.Fatal().Msgf("failed to open STUN listener: %v", err)
|
||||
}
|
||||
log.Info().Msgf("STUN server started at %s", packetConn.LocalAddr())
|
||||
|
||||
udpConn, ok := packetConn.(*net.UDPConn)
|
||||
if !ok {
|
||||
log.Fatal().Msg("STUN listener is not a UDP listener")
|
||||
}
|
||||
serverSTUNListener(context.Background(), udpConn)
|
||||
}
|
||||
|
||||
func serverSTUNListener(ctx context.Context, packetConn *net.UDPConn) {
|
||||
var buf [64 << 10]byte
|
||||
var (
|
||||
bytesRead int
|
||||
udpAddr *net.UDPAddr
|
||||
err error
|
||||
)
|
||||
for {
|
||||
bytesRead, udpAddr, err = packetConn.ReadFromUDP(buf[:])
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
log.Error().Caller().Err(err).Msgf("STUN ReadFrom")
|
||||
time.Sleep(time.Second)
|
||||
|
||||
continue
|
||||
}
|
||||
log.Trace().Caller().Msgf("STUN request from %v", udpAddr)
|
||||
pkt := buf[:bytesRead]
|
||||
if !stun.Is(pkt) {
|
||||
log.Trace().Caller().Msgf("UDP packet is not STUN")
|
||||
|
||||
continue
|
||||
}
|
||||
txid, err := stun.ParseBindingRequest(pkt)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("STUN parse error")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
res := stun.Response(txid, udpAddr.IP, uint16(udpAddr.Port))
|
||||
_, err = packetConn.WriteTo(res, udpAddr)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("Issue writing to UDP")
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
103
dns.go
103
dns.go
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/fatih/set"
|
||||
mapset "github.com/deckarep/golang-set/v2"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/dnsname"
|
||||
@@ -14,6 +14,11 @@ const (
|
||||
ByteSize = 8
|
||||
)
|
||||
|
||||
const (
|
||||
ipv4AddressLength = 32
|
||||
ipv6AddressLength = 128
|
||||
)
|
||||
|
||||
// generateMagicDNSRootDomains generates a list of DNS entries to be included in `Routes` in `MapResponse`.
|
||||
// This list of reverse DNS entries instructs the OS on what subnets and domains the Tailscale embedded DNS
|
||||
// server (listening in 100.100.100.100 udp/53) should be used for.
|
||||
@@ -34,14 +39,33 @@ const (
|
||||
|
||||
// From the netmask we can find out the wildcard bits (the bits that are not set in the netmask).
|
||||
// This allows us to then calculate the subnets included in the subsequent class block and generate the entries.
|
||||
func generateMagicDNSRootDomains(
|
||||
ipPrefix netaddr.IPPrefix,
|
||||
) []dnsname.FQDN {
|
||||
// TODO(juanfont): we are not handing out IPv6 addresses yet
|
||||
// and in fact this is Tailscale.com's range (note the fd7a:115c:a1e0: range in the fc00::/7 network)
|
||||
ipv6base := dnsname.FQDN("0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa.")
|
||||
fqdns := []dnsname.FQDN{ipv6base}
|
||||
func generateMagicDNSRootDomains(ipPrefixes []netaddr.IPPrefix) []dnsname.FQDN {
|
||||
fqdns := make([]dnsname.FQDN, 0, len(ipPrefixes))
|
||||
for _, ipPrefix := range ipPrefixes {
|
||||
var generateDNSRoot func(netaddr.IPPrefix) []dnsname.FQDN
|
||||
switch ipPrefix.IP().BitLen() {
|
||||
case ipv4AddressLength:
|
||||
generateDNSRoot = generateIPv4DNSRootDomain
|
||||
|
||||
case ipv6AddressLength:
|
||||
generateDNSRoot = generateIPv6DNSRootDomain
|
||||
|
||||
default:
|
||||
panic(
|
||||
fmt.Sprintf(
|
||||
"unsupported IP version with address length %d",
|
||||
ipPrefix.IP().BitLen(),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
fqdns = append(fqdns, generateDNSRoot(ipPrefix)...)
|
||||
}
|
||||
|
||||
return fqdns
|
||||
}
|
||||
|
||||
func generateIPv4DNSRootDomain(ipPrefix netaddr.IPPrefix) []dnsname.FQDN {
|
||||
// Conversion to the std lib net.IPnet, a bit easier to operate
|
||||
netRange := ipPrefix.IPNet()
|
||||
maskBits, _ := netRange.Mask.Size()
|
||||
@@ -65,6 +89,7 @@ func generateMagicDNSRootDomains(
|
||||
rdnsSlice = append(rdnsSlice, "in-addr.arpa.")
|
||||
rdnsBase := strings.Join(rdnsSlice, ".")
|
||||
|
||||
fqdns := make([]dnsname.FQDN, 0, max-min+1)
|
||||
for i := min; i <= max; i++ {
|
||||
fqdn, err := dnsname.ToFQDN(fmt.Sprintf("%d.%s", i, rdnsBase))
|
||||
if err != nil {
|
||||
@@ -76,6 +101,56 @@ func generateMagicDNSRootDomains(
|
||||
return fqdns
|
||||
}
|
||||
|
||||
func generateIPv6DNSRootDomain(ipPrefix netaddr.IPPrefix) []dnsname.FQDN {
|
||||
const nibbleLen = 4
|
||||
|
||||
maskBits, _ := ipPrefix.IPNet().Mask.Size()
|
||||
expanded := ipPrefix.IP().StringExpanded()
|
||||
nibbleStr := strings.Map(func(r rune) rune {
|
||||
if r == ':' {
|
||||
return -1
|
||||
}
|
||||
|
||||
return r
|
||||
}, expanded)
|
||||
|
||||
// TODO?: that does not look the most efficient implementation,
|
||||
// but the inputs are not so long as to cause problems,
|
||||
// and from what I can see, the generateMagicDNSRootDomains
|
||||
// function is called only once over the lifetime of a server process.
|
||||
prefixConstantParts := []string{}
|
||||
for i := 0; i < maskBits/nibbleLen; i++ {
|
||||
prefixConstantParts = append(
|
||||
[]string{string(nibbleStr[i])},
|
||||
prefixConstantParts...)
|
||||
}
|
||||
|
||||
makeDomain := func(variablePrefix ...string) (dnsname.FQDN, error) {
|
||||
prefix := strings.Join(append(variablePrefix, prefixConstantParts...), ".")
|
||||
|
||||
return dnsname.ToFQDN(fmt.Sprintf("%s.ip6.arpa", prefix))
|
||||
}
|
||||
|
||||
var fqdns []dnsname.FQDN
|
||||
if maskBits%4 == 0 {
|
||||
dom, _ := makeDomain()
|
||||
fqdns = append(fqdns, dom)
|
||||
} else {
|
||||
domCount := 1 << (maskBits % nibbleLen)
|
||||
fqdns = make([]dnsname.FQDN, 0, domCount)
|
||||
for i := 0; i < domCount; i++ {
|
||||
varNibble := fmt.Sprintf("%x", i)
|
||||
dom, err := makeDomain(varNibble)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
fqdns = append(fqdns, dom)
|
||||
}
|
||||
}
|
||||
|
||||
return fqdns
|
||||
}
|
||||
|
||||
func getMapResponseDNSConfig(
|
||||
dnsConfigOrig *tailcfg.DNSConfig,
|
||||
baseDomain string,
|
||||
@@ -88,16 +163,20 @@ func getMapResponseDNSConfig(
|
||||
dnsConfig = dnsConfigOrig.Clone()
|
||||
dnsConfig.Domains = append(
|
||||
dnsConfig.Domains,
|
||||
fmt.Sprintf("%s.%s", machine.Namespace.Name, baseDomain),
|
||||
fmt.Sprintf(
|
||||
"%s.%s",
|
||||
machine.Namespace.Name,
|
||||
baseDomain,
|
||||
),
|
||||
)
|
||||
|
||||
namespaceSet := set.New(set.ThreadSafe)
|
||||
namespaceSet := mapset.NewSet[Namespace]()
|
||||
namespaceSet.Add(machine.Namespace)
|
||||
for _, p := range peers {
|
||||
namespaceSet.Add(p.Namespace)
|
||||
}
|
||||
for _, namespace := range namespaceSet.List() {
|
||||
dnsRoute := fmt.Sprintf("%s.%s", namespace.(Namespace).Name, baseDomain)
|
||||
for _, namespace := range namespaceSet.ToSlice() {
|
||||
dnsRoute := fmt.Sprintf("%v.%v", namespace.Name, baseDomain)
|
||||
dnsConfig.Routes[dnsRoute] = nil
|
||||
}
|
||||
} else {
|
||||
|
117
dns_test.go
117
dns_test.go
@@ -10,8 +10,10 @@ import (
|
||||
)
|
||||
|
||||
func (s *Suite) TestMagicDNSRootDomains100(c *check.C) {
|
||||
prefix := netaddr.MustParseIPPrefix("100.64.0.0/10")
|
||||
domains := generateMagicDNSRootDomains(prefix)
|
||||
prefixes := []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("100.64.0.0/10"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
found := false
|
||||
for _, domain := range domains {
|
||||
@@ -45,8 +47,10 @@ func (s *Suite) TestMagicDNSRootDomains100(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *Suite) TestMagicDNSRootDomains172(c *check.C) {
|
||||
prefix := netaddr.MustParseIPPrefix("172.16.0.0/16")
|
||||
domains := generateMagicDNSRootDomains(prefix)
|
||||
prefixes := []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("172.16.0.0/16"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
found := false
|
||||
for _, domain := range domains {
|
||||
@@ -69,6 +73,44 @@ func (s *Suite) TestMagicDNSRootDomains172(c *check.C) {
|
||||
c.Assert(found, check.Equals, true)
|
||||
}
|
||||
|
||||
// Happens when netmask is a multiple of 4 bits (sounds likely).
|
||||
func (s *Suite) TestMagicDNSRootDomainsIPv6Single(c *check.C) {
|
||||
prefixes := []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/48"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
c.Assert(len(domains), check.Equals, 1)
|
||||
c.Assert(
|
||||
domains[0].WithTrailingDot(),
|
||||
check.Equals,
|
||||
"0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa.",
|
||||
)
|
||||
}
|
||||
|
||||
func (s *Suite) TestMagicDNSRootDomainsIPv6SingleMultiple(c *check.C) {
|
||||
prefixes := []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/50"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
yieldsRoot := func(dom string) bool {
|
||||
for _, candidate := range domains {
|
||||
if candidate.WithTrailingDot() == dom {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
c.Assert(len(domains), check.Equals, 4)
|
||||
c.Assert(yieldsRoot("0.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa."), check.Equals, true)
|
||||
c.Assert(yieldsRoot("1.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa."), check.Equals, true)
|
||||
c.Assert(yieldsRoot("2.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa."), check.Equals, true)
|
||||
c.Assert(yieldsRoot("3.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa."), check.Equals, true)
|
||||
}
|
||||
|
||||
func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
namespaceShared1, err := app.CreateNamespace("shared1")
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -119,17 +161,16 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
DiscoKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
Name: "test_get_shared_nodes_1",
|
||||
Hostname: "test_get_shared_nodes_1",
|
||||
NamespaceID: namespaceShared1.ID,
|
||||
Namespace: *namespaceShared1,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddress: "100.64.0.1",
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.1")},
|
||||
AuthKeyID: uint(preAuthKeyInShared1.ID),
|
||||
}
|
||||
app.db.Save(machineInShared1)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared1.Name, machineInShared1.Name)
|
||||
_, err = app.GetMachine(namespaceShared1.Name, machineInShared1.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared2 := &Machine{
|
||||
@@ -137,17 +178,16 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_2",
|
||||
Hostname: "test_get_shared_nodes_2",
|
||||
NamespaceID: namespaceShared2.ID,
|
||||
Namespace: *namespaceShared2,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddress: "100.64.0.2",
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.2")},
|
||||
AuthKeyID: uint(preAuthKeyInShared2.ID),
|
||||
}
|
||||
app.db.Save(machineInShared2)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared2.Name, machineInShared2.Name)
|
||||
_, err = app.GetMachine(namespaceShared2.Name, machineInShared2.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared3 := &Machine{
|
||||
@@ -155,17 +195,16 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_3",
|
||||
Hostname: "test_get_shared_nodes_3",
|
||||
NamespaceID: namespaceShared3.ID,
|
||||
Namespace: *namespaceShared3,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddress: "100.64.0.3",
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.3")},
|
||||
AuthKeyID: uint(preAuthKeyInShared3.ID),
|
||||
}
|
||||
app.db.Save(machineInShared3)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared3.Name, machineInShared3.Name)
|
||||
_, err = app.GetMachine(namespaceShared3.Name, machineInShared3.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machine2InShared1 := &Machine{
|
||||
@@ -173,22 +212,18 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_4",
|
||||
Hostname: "test_get_shared_nodes_4",
|
||||
NamespaceID: namespaceShared1.ID,
|
||||
Namespace: *namespaceShared1,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddress: "100.64.0.4",
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.4")},
|
||||
AuthKeyID: uint(PreAuthKey2InShared1.ID),
|
||||
}
|
||||
app.db.Save(machine2InShared1)
|
||||
|
||||
err = app.AddSharedMachineToNamespace(machineInShared2, namespaceShared1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
baseDomain := "foobar.headscale.net"
|
||||
dnsConfigOrig := tailcfg.DNSConfig{
|
||||
Routes: make(map[string][]dnstype.Resolver),
|
||||
Routes: make(map[string][]*dnstype.Resolver),
|
||||
Domains: []string{baseDomain},
|
||||
Proxied: true,
|
||||
}
|
||||
@@ -203,7 +238,8 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
peersOfMachineInShared1,
|
||||
)
|
||||
c.Assert(dnsConfig, check.NotNil)
|
||||
c.Assert(len(dnsConfig.Routes), check.Equals, 2)
|
||||
|
||||
c.Assert(len(dnsConfig.Routes), check.Equals, 3)
|
||||
|
||||
domainRouteShared1 := fmt.Sprintf("%s.%s", namespaceShared1.Name, baseDomain)
|
||||
_, ok := dnsConfig.Routes[domainRouteShared1]
|
||||
@@ -215,7 +251,7 @@ func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
|
||||
domainRouteShared3 := fmt.Sprintf("%s.%s", namespaceShared3.Name, baseDomain)
|
||||
_, ok = dnsConfig.Routes[domainRouteShared3]
|
||||
c.Assert(ok, check.Equals, false)
|
||||
c.Assert(ok, check.Equals, true)
|
||||
}
|
||||
|
||||
func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
@@ -268,17 +304,16 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
DiscoKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
Name: "test_get_shared_nodes_1",
|
||||
Hostname: "test_get_shared_nodes_1",
|
||||
NamespaceID: namespaceShared1.ID,
|
||||
Namespace: *namespaceShared1,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddress: "100.64.0.1",
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.1")},
|
||||
AuthKeyID: uint(preAuthKeyInShared1.ID),
|
||||
}
|
||||
app.db.Save(machineInShared1)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared1.Name, machineInShared1.Name)
|
||||
_, err = app.GetMachine(namespaceShared1.Name, machineInShared1.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared2 := &Machine{
|
||||
@@ -286,17 +321,16 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_2",
|
||||
Hostname: "test_get_shared_nodes_2",
|
||||
NamespaceID: namespaceShared2.ID,
|
||||
Namespace: *namespaceShared2,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddress: "100.64.0.2",
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.2")},
|
||||
AuthKeyID: uint(preAuthKeyInShared2.ID),
|
||||
}
|
||||
app.db.Save(machineInShared2)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared2.Name, machineInShared2.Name)
|
||||
_, err = app.GetMachine(namespaceShared2.Name, machineInShared2.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared3 := &Machine{
|
||||
@@ -304,17 +338,16 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_3",
|
||||
Hostname: "test_get_shared_nodes_3",
|
||||
NamespaceID: namespaceShared3.ID,
|
||||
Namespace: *namespaceShared3,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddress: "100.64.0.3",
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.3")},
|
||||
AuthKeyID: uint(preAuthKeyInShared3.ID),
|
||||
}
|
||||
app.db.Save(machineInShared3)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared3.Name, machineInShared3.Name)
|
||||
_, err = app.GetMachine(namespaceShared3.Name, machineInShared3.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machine2InShared1 := &Machine{
|
||||
@@ -322,22 +355,18 @@ func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_4",
|
||||
Hostname: "test_get_shared_nodes_4",
|
||||
NamespaceID: namespaceShared1.ID,
|
||||
Namespace: *namespaceShared1,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddress: "100.64.0.4",
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.4")},
|
||||
AuthKeyID: uint(preAuthKey2InShared1.ID),
|
||||
}
|
||||
app.db.Save(machine2InShared1)
|
||||
|
||||
err = app.AddSharedMachineToNamespace(machineInShared2, namespaceShared1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
baseDomain := "foobar.headscale.net"
|
||||
dnsConfigOrig := tailcfg.DNSConfig{
|
||||
Routes: make(map[string][]dnstype.Resolver),
|
||||
Routes: make(map[string][]*dnstype.Resolver),
|
||||
Domains: []string{baseDomain},
|
||||
Proxied: false,
|
||||
}
|
||||
|
@@ -1,74 +0,0 @@
|
||||
# Configuration reference
|
||||
|
||||
Headscale will look for a configuration file named `config.yaml` (or `config.json`) in the following order:
|
||||
|
||||
- `/etc/headscale`
|
||||
- `~/.headscale`
|
||||
- current working directory
|
||||
|
||||
```yaml
|
||||
server_url: http://headscale.mydomain.net
|
||||
listen_addr: 0.0.0.0:8080
|
||||
ip_prefix: 100.64.0.0/10
|
||||
disable_check_updates: false
|
||||
```
|
||||
|
||||
`server_url` is the external URL via which Headscale is reachable. `listen_addr` is the IP address and port the Headscale program should listen on. `ip_prefix` is the IP prefix (range) in which IP addresses for nodes will be allocated (default 100.64.0.0/10, e.g., 192.168.4.0/24, 10.0.0.0/8). `disable_check_updates` disables the automatic check for updates.
|
||||
|
||||
```yaml
|
||||
log_level: debug
|
||||
```
|
||||
|
||||
`log_level` can be used to set the Log level for Headscale, it defaults to `debug`, and the available levels are: `trace`, `debug`, `info`, `warn` and `error`.
|
||||
|
||||
```yaml
|
||||
derp_map_path: derp.yaml
|
||||
```
|
||||
|
||||
`derp_map_path` is the path to the [DERP](https://pkg.go.dev/tailscale.com/derp) map file. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.
|
||||
|
||||
```yaml
|
||||
ephemeral_node_inactivity_timeout: "30m"
|
||||
```
|
||||
|
||||
`ephemeral_node_inactivity_timeout` is the timeout after which inactive ephemeral node records will be deleted from the database. The default is 30 minutes. This value must be higher than 65 seconds (the keepalive timeout for the HTTP long poll is 60 seconds, plus a few seconds to avoid race conditions).
|
||||
|
||||
PostgresSQL
|
||||
|
||||
```yaml
|
||||
db_host: localhost
|
||||
db_port: 5432
|
||||
db_name: headscale
|
||||
db_user: foo
|
||||
db_pass: bar
|
||||
```
|
||||
|
||||
SQLite
|
||||
|
||||
```yaml
|
||||
db_type: sqlite3
|
||||
db_path: db.sqlite
|
||||
```
|
||||
|
||||
The fields starting with `db_` are used for the DB connection information.
|
||||
|
||||
### TLS configuration
|
||||
|
||||
Please check [`TLS.md`](TLS.md).
|
||||
|
||||
### DNS configuration
|
||||
|
||||
Please refer to [`DNS.md`](DNS.md).
|
||||
|
||||
### Policy ACLs
|
||||
|
||||
Headscale implements the same policy ACLs as Tailscale.com, adapted to the self-hosted environment.
|
||||
|
||||
For instance, instead of referring to users when defining groups you must
|
||||
use namespaces (which are the equivalent to user/logins in Tailscale.com).
|
||||
|
||||
Please check https://tailscale.com/kb/1018/acls/, and `./tests/acls/` in this repo for working examples.
|
||||
|
||||
### Apple devices
|
||||
|
||||
An endpoint with information on how to connect your Apple devices (currently macOS only) is available at `/apple` on your running instance.
|
37
docs/DNS.md
37
docs/DNS.md
@@ -1,37 +0,0 @@
|
||||
# DNS in headscale
|
||||
|
||||
headscale supports Tailscale's DNS configuration and MagicDNS. Please have a look to their KB to better understand what this means:
|
||||
|
||||
- https://tailscale.com/kb/1054/dns/
|
||||
- https://tailscale.com/kb/1081/magicdns/
|
||||
- https://tailscale.com/blog/2021-09-private-dns-with-magicdns/
|
||||
|
||||
Long story short, you can define the DNS servers you want to use in your tailnets, activate MagicDNS (so you don't have to remember the IP addresses of your nodes), define search domains, as well as predefined hosts. headscale will inject that settings into your nodes.
|
||||
|
||||
## Configuration reference
|
||||
|
||||
The setup is done via the `config.yaml` file, under the `dns_config` key.
|
||||
|
||||
```yaml
|
||||
server_url: http://127.0.0.1:8001
|
||||
listen_addr: 0.0.0.0:8001
|
||||
dns_config:
|
||||
nameservers:
|
||||
- 1.1.1.1
|
||||
- 8.8.8.8
|
||||
restricted_nameservers:
|
||||
foo.bar.com:
|
||||
- 1.1.1.1
|
||||
darp.headscale.net:
|
||||
- 1.1.1.1
|
||||
- 8.8.8.8
|
||||
domains: []
|
||||
magic_dns: true
|
||||
base_domain: example.com
|
||||
```
|
||||
|
||||
- `nameservers`: The list of DNS servers to use.
|
||||
- `domains`: Search domains to inject.
|
||||
- `magic_dns`: Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/). Only works if there is at least a nameserver defined.
|
||||
- `base_domain`: Defines the base domain to create the hostnames for MagicDNS. `base_domain` must be a FQDNs, without the trailing dot. The FQDN of the hosts will be `hostname.namespace.base_domain` (e.g., _myhost.mynamespace.example.com_).
|
||||
- `restricted_nameservers`: Split DNS (see https://tailscale.com/kb/1054/dns/), list of search domains and the DNS to query for each one.
|
@@ -1,3 +0,0 @@
|
||||
# Glossary
|
||||
|
||||
- Namespace: Collection of Tailscale nodes that can see each other. In Tailscale.com this is called Tailnet.
|
53
docs/README.md
Normal file
53
docs/README.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# headscale documentation
|
||||
|
||||
This page contains the official and community contributed documentation for `headscale`.
|
||||
|
||||
If you are having trouble with following the documentation or get unexpected results,
|
||||
please ask on [Discord](https://discord.gg/c84AZQhmpx) instead of opening an Issue.
|
||||
|
||||
## Official documentation
|
||||
|
||||
### How-to
|
||||
|
||||
- [Running headscale on Linux](running-headscale-linux.md)
|
||||
- [Control headscale remotely](remote-cli.md)
|
||||
- [Using a Windows client with headscale](windows-client.md)
|
||||
|
||||
### References
|
||||
|
||||
- [Configuration](../config-example.yaml)
|
||||
- [Glossary](glossary.md)
|
||||
- [TLS](tls.md)
|
||||
|
||||
## Community documentation
|
||||
|
||||
Community documentation is not actively maintained by the headscale authors and is
|
||||
written by community members. It is _not_ verified by `headscale` developers.
|
||||
|
||||
**It might be outdated and it might miss necessary steps**.
|
||||
|
||||
- [Running headscale in a container](running-headscale-container.md)
|
||||
- [Running headscale on OpenBSD](running-headscale-openbsd.md)
|
||||
|
||||
## Misc
|
||||
|
||||
### Policy ACLs
|
||||
|
||||
Headscale implements the same policy ACLs as Tailscale.com, adapted to the self-hosted environment.
|
||||
|
||||
For instance, instead of referring to users when defining groups you must
|
||||
use namespaces (which are the equivalent to user/logins in Tailscale.com).
|
||||
|
||||
Please check https://tailscale.com/kb/1018/acls/, and `./tests/acls/` in this repo for working examples.
|
||||
|
||||
When using ACL's the Namespace borders are no longer applied. All machines
|
||||
whichever the Namespace have the ability to communicate with other hosts as
|
||||
long as the ACL's permits this exchange.
|
||||
|
||||
The [ACLs](acls.md) document should help understand a fictional case of setting
|
||||
up ACLs in a small company. All concepts presented in this document could be
|
||||
applied outside of business oriented usage.
|
||||
|
||||
### Apple devices
|
||||
|
||||
An endpoint with information on how to connect your Apple devices (currently macOS only) is available at `/apple` on your running instance.
|
191
docs/Running.md
191
docs/Running.md
@@ -1,191 +0,0 @@
|
||||
# Running headscale
|
||||
|
||||
## Server configuration
|
||||
|
||||
1. Download the headscale binary https://github.com/juanfont/headscale/releases, and place it somewhere in your $PATH or use the docker container
|
||||
|
||||
```shell
|
||||
docker pull headscale/headscale:x.x.x
|
||||
```
|
||||
|
||||
<!--
|
||||
or
|
||||
```shell
|
||||
docker pull ghrc.io/juanfont/headscale:x.x.x
|
||||
``` -->
|
||||
|
||||
2. When running headscale in a docker container, prepare a directory to hold all configuration
|
||||
|
||||
```shell
|
||||
mkdir config
|
||||
```
|
||||
|
||||
3. Get yourself a DB
|
||||
|
||||
a) Get a Postgres DB running in Docker:
|
||||
|
||||
```shell
|
||||
docker run --name headscale \
|
||||
-e POSTGRES_DB=headscale \
|
||||
-e POSTGRES_USER=foo \
|
||||
-e POSTGRES_PASSWORD=bar \
|
||||
-p 5432:5432 \
|
||||
-d postgres
|
||||
```
|
||||
|
||||
or b) Prepare a SQLite DB file:
|
||||
|
||||
```shell
|
||||
touch config/db.sqlite
|
||||
```
|
||||
|
||||
4. Create a headscale configuration, and a DERP map file. Refer to [tailscale sample](https://raw.githubusercontent.com/tailscale/tailscale/main/net/dnsfallback/dns-fallback-servers.json) for more guidance.
|
||||
|
||||
```shell
|
||||
cp config.yaml.[sqlite|postgres].example config/config.yaml
|
||||
|
||||
cp derp-example.yaml config/derp.yaml
|
||||
```
|
||||
|
||||
5. Create a namespace
|
||||
|
||||
```shell
|
||||
headscale namespaces create myfirstnamespace
|
||||
```
|
||||
|
||||
or Docker:
|
||||
|
||||
```shell
|
||||
docker run \
|
||||
-v $(pwd)/config:/etc/headscale/ \
|
||||
-p 127.0.0.1:8080:8080 \
|
||||
headscale/headscale:x.x.x \
|
||||
headscale namespaces create myfirstnamespace
|
||||
```
|
||||
|
||||
or if your server is already running in Docker:
|
||||
|
||||
```shell
|
||||
docker exec <container_name> \
|
||||
headscale namespaces create myfirstnamespace
|
||||
```
|
||||
|
||||
6. Run the server
|
||||
|
||||
```shell
|
||||
headscale serve
|
||||
```
|
||||
|
||||
or Docker:
|
||||
|
||||
```shell
|
||||
docker run \
|
||||
-v $(pwd)/config:/etc/headscale/ \
|
||||
-p 127.0.0.1:8080:8080 \
|
||||
headscale/headscale:x.x.x \
|
||||
headscale serve
|
||||
```
|
||||
|
||||
## Nodes configuration
|
||||
|
||||
If you used tailscale.com before in your nodes, make sure you clear the tailscaled data folder
|
||||
|
||||
```shell
|
||||
systemctl stop tailscaled
|
||||
rm -fr /var/lib/tailscale
|
||||
systemctl start tailscaled
|
||||
```
|
||||
|
||||
### Adding node based on MACHINEKEY
|
||||
|
||||
1. Add your first machine
|
||||
|
||||
```shell
|
||||
tailscale up --login-server YOUR_HEADSCALE_URL
|
||||
```
|
||||
|
||||
2. Navigate to the URL returned by `tailscale up`, where you'll find your machine key.
|
||||
|
||||
3. In the server, register your machine to a namespace with the CLI
|
||||
|
||||
```shell
|
||||
headscale -n myfirstnamespace nodes register -k YOURMACHINEKEY
|
||||
```
|
||||
|
||||
or Docker:
|
||||
|
||||
```shell
|
||||
docker run \
|
||||
-v $(pwd)/config:/etc/headscale/ \
|
||||
headscale/headscale:x.x.x \
|
||||
headscale -n myfirstnamespace nodes register -k YOURMACHINEKEY
|
||||
```
|
||||
|
||||
or if your server is already running in Docker:
|
||||
|
||||
```shell
|
||||
docker exec <container_name> \
|
||||
headscale -n myfirstnamespace nodes register -k YOURMACHINEKEY
|
||||
```
|
||||
|
||||
### Alternative: adding node with AUTHKEY
|
||||
|
||||
1. Create an authkey
|
||||
|
||||
```shell
|
||||
headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h
|
||||
```
|
||||
|
||||
or Docker:
|
||||
|
||||
```shell
|
||||
docker run \
|
||||
-v $(pwd)/config:/etc/headscale/ \
|
||||
headscale/headscale:x.x.x \
|
||||
headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h
|
||||
```
|
||||
|
||||
or if your server is already running in Docker:
|
||||
|
||||
```shell
|
||||
docker exec <container_name> \
|
||||
headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h
|
||||
```
|
||||
|
||||
2. Use the authkey on your node to register it:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server YOUR_HEADSCALE_URL --authkey YOURAUTHKEY
|
||||
```
|
||||
|
||||
If you create an authkey with the `--ephemeral` flag, that key will create ephemeral nodes. This implies that `--reusable` is true.
|
||||
|
||||
Please bear in mind that all headscale commands support adding `-o json` or `-o json-line` to get nicely JSON-formatted output.
|
||||
|
||||
## Debugging headscale running in Docker
|
||||
|
||||
The `headscale/headscale` Docker container is based on a "distroless" image that does not contain a shell or any other debug tools. If you need to debug your application running in the Docker container, you can use the `-debug` variant, for example `headscale/headscale:x.x.x-debug`.
|
||||
|
||||
### Running the debug Docker container
|
||||
|
||||
To run the debug Docker container, use the exact same commands as above, but replace `headscale/headscale:x.x.x` with `headscale/headscale:x.x.x-debug` (`x.x.x` is the version of headscale). The two containers are compatible with each other, so you can alternate between them.
|
||||
|
||||
### Executing commands in the debug container
|
||||
|
||||
The default command in the debug container is to run `headscale`, which is located at `/bin/headscale` inside the container.
|
||||
|
||||
Additionally, the debug container includes a minimalist Busybox shell.
|
||||
|
||||
To launch a shell in the container, use:
|
||||
|
||||
```
|
||||
docker run -it headscale/headscale:x.x.x-debug sh
|
||||
```
|
||||
|
||||
You can also execute commands directly, such as `ls /bin` in this example:
|
||||
|
||||
```
|
||||
docker run headscale/headscale:x.x.x-debug ls /bin
|
||||
```
|
||||
|
||||
Using `docker exec` allows you to run commands in an existing container.
|
176
docs/acls.md
Normal file
176
docs/acls.md
Normal file
@@ -0,0 +1,176 @@
|
||||
# ACLs use case example
|
||||
|
||||
Let's build an example use case for a small business (It may be the place where
|
||||
ACL's are the most useful).
|
||||
|
||||
We have a small company with a boss, an admin, two developers and an intern.
|
||||
|
||||
The boss should have access to all servers but not to the user's hosts. Admin
|
||||
should also have access to all hosts except that their permissions should be
|
||||
limited to maintaining the hosts (for example purposes). The developers can do
|
||||
anything they want on dev hosts but only watch on productions hosts. Intern
|
||||
can only interact with the development servers.
|
||||
|
||||
There's an additional server that acts as a router, connecting the VPN users
|
||||
to an internal network `10.20.0.0/16`. Developers must have access to those
|
||||
internal resources.
|
||||
|
||||
Each user have at least a device connected to the network and we have some
|
||||
servers.
|
||||
|
||||
- database.prod
|
||||
- database.dev
|
||||
- app-server1.prod
|
||||
- app-server1.dev
|
||||
- billing.internal
|
||||
- router.internal
|
||||
|
||||

|
||||
|
||||
## ACL setup
|
||||
|
||||
Note: Namespaces will be created automatically when users authenticate with the
|
||||
Headscale server.
|
||||
|
||||
ACLs could be written either on [huJSON](https://github.com/tailscale/hujson)
|
||||
or YAML. Check the [test ACLs](../tests/acls) for further information.
|
||||
|
||||
When registering the servers we will need to add the flag
|
||||
`--advertised-tags=tag:<tag1>,tag:<tag2>`, and the user (namespace) that is
|
||||
registering the server should be allowed to do it. Since anyone can add tags to
|
||||
a server they can register, the check of the tags is done on headscale server
|
||||
and only valid tags are applied. A tag is valid if the namespace that is
|
||||
registering it is allowed to do it.
|
||||
|
||||
Here are the ACL's to implement the same permissions as above:
|
||||
|
||||
```json
|
||||
{
|
||||
// groups are collections of users having a common scope. A user can be in multiple groups
|
||||
// groups cannot be composed of groups
|
||||
"groups": {
|
||||
"group:boss": ["boss"],
|
||||
"group:dev": ["dev1", "dev2"],
|
||||
"group:admin": ["admin1"],
|
||||
"group:intern": ["intern1"]
|
||||
},
|
||||
// tagOwners in tailscale is an association between a TAG and the people allowed to set this TAG on a server.
|
||||
// This is documented [here](https://tailscale.com/kb/1068/acl-tags#defining-a-tag)
|
||||
// and explained [here](https://tailscale.com/blog/rbac-like-it-was-meant-to-be/)
|
||||
"tagOwners": {
|
||||
// the administrators can add servers in production
|
||||
"tag:prod-databases": ["group:admin"],
|
||||
"tag:prod-app-servers": ["group:admin"],
|
||||
|
||||
// the boss can tag any server as internal
|
||||
"tag:internal": ["group:boss"],
|
||||
|
||||
// dev can add servers for dev purposes as well as admins
|
||||
"tag:dev-databases": ["group:admin", "group:dev"],
|
||||
"tag:dev-app-servers": ["group:admin", "group:dev"]
|
||||
|
||||
// interns cannot add servers
|
||||
},
|
||||
// hosts should be defined using its IP addresses and a subnet mask.
|
||||
// to define a single host, use a /32 mask. You cannot use DNS entries here,
|
||||
// as they're prone to be hijacked by replacing their IP addresses.
|
||||
// see https://github.com/tailscale/tailscale/issues/3800 for more information.
|
||||
"Hosts": {
|
||||
"postgresql.internal": "10.20.0.2/32",
|
||||
"webservers.internal": "10.20.10.1/29"
|
||||
},
|
||||
"acls": [
|
||||
// boss have access to all servers
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:boss"],
|
||||
"dst": [
|
||||
"tag:prod-databases:*",
|
||||
"tag:prod-app-servers:*",
|
||||
"tag:internal:*",
|
||||
"tag:dev-databases:*",
|
||||
"tag:dev-app-servers:*"
|
||||
]
|
||||
},
|
||||
|
||||
// admin have only access to administrative ports of the servers, in tcp/22
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:admin"],
|
||||
"proto": "tcp",
|
||||
"dst": [
|
||||
"tag:prod-databases:22",
|
||||
"tag:prod-app-servers:22",
|
||||
"tag:internal:22",
|
||||
"tag:dev-databases:22",
|
||||
"tag:dev-app-servers:22"
|
||||
]
|
||||
},
|
||||
|
||||
// we also allow admin to ping the servers
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:admin"],
|
||||
"proto": "icmp",
|
||||
"dst": [
|
||||
"tag:prod-databases:*",
|
||||
"tag:prod-app-servers:*",
|
||||
"tag:internal:*",
|
||||
"tag:dev-databases:*",
|
||||
"tag:dev-app-servers:*"
|
||||
]
|
||||
},
|
||||
|
||||
// developers have access to databases servers and application servers on all ports
|
||||
// they can only view the applications servers in prod and have no access to databases servers in production
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:dev"],
|
||||
"dst": [
|
||||
"tag:dev-databases:*",
|
||||
"tag:dev-app-servers:*",
|
||||
"tag:prod-app-servers:80,443"
|
||||
]
|
||||
},
|
||||
// developers have access to the internal network through the router.
|
||||
// the internal network is composed of HTTPS endpoints and Postgresql
|
||||
// database servers. There's an additional rule to allow traffic to be
|
||||
// forwarded to the internal subnet, 10.20.0.0/16. See this issue
|
||||
// https://github.com/juanfont/headscale/issues/502
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:dev"],
|
||||
"dst": ["10.20.0.0/16:443,5432", "router.internal:0"]
|
||||
},
|
||||
|
||||
// servers should be able to talk to database in tcp/5432. Database should not be able to initiate connections to
|
||||
// applications servers
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["tag:dev-app-servers"],
|
||||
"proto": "tcp",
|
||||
"dst": ["tag:dev-databases:5432"]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["tag:prod-app-servers"],
|
||||
"dst": ["tag:prod-databases:5432"]
|
||||
},
|
||||
|
||||
// interns have access to dev-app-servers only in reading mode
|
||||
{
|
||||
"action": "accept",
|
||||
"src": ["group:intern"],
|
||||
"dst": ["tag:dev-app-servers:80,443"]
|
||||
},
|
||||
|
||||
// We still have to allow internal namespaces communications since nothing guarantees that each user have
|
||||
// their own namespaces.
|
||||
{ "action": "accept", "src": ["boss"], "dst": ["boss:*"] },
|
||||
{ "action": "accept", "src": ["dev1"], "dst": ["dev1:*"] },
|
||||
{ "action": "accept", "src": ["dev2"], "dst": ["dev2:*"] },
|
||||
{ "action": "accept", "src": ["admin1"], "dst": ["admin1:*"] },
|
||||
{ "action": "accept", "src": ["intern1"], "dst": ["intern1:*"] }
|
||||
]
|
||||
}
|
||||
```
|
32
docs/build-headscale-container.md
Normal file
32
docs/build-headscale-container.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# Build docker from scratch
|
||||
|
||||
The Dockerfiles included in the repository are using the [buildx plugin](https://docs.docker.com/buildx/working-with-buildx/). This plugin is includes in docker newer than Docker-ce CLI 19.03.2. The plugin is used to be able to build different container arches. Building the Dockerfiles without buildx is not possible.
|
||||
|
||||
# Build native
|
||||
|
||||
To build the container on the native arch you can just use:
|
||||
```
|
||||
$ sudo docker buildx build -t headscale:custom-arch .
|
||||
```
|
||||
|
||||
For example: This will build a amd64(x86_64) container if your hostsystem is amd64(x86_64). Or a arm64 container on a arm64 hostsystem (raspberry pi4).
|
||||
|
||||
# Build cross platform
|
||||
|
||||
To build a arm64 container on a amd64 hostsystem you could use:
|
||||
```
|
||||
$ sudo docker buildx build --platform linux/arm64 -t headscale:custom-arm64 .
|
||||
|
||||
```
|
||||
|
||||
**Import: Currently arm32 build are not supported as there is a problem with a library used by headscale. Hopefully this will be fixed soon.**
|
||||
|
||||
# Build multiple arches
|
||||
|
||||
To build multiple archres you could use:
|
||||
|
||||
```
|
||||
$ sudo docker buildx create --use
|
||||
$ sudo docker buildx build --platform linux/amd64,linux/arm64 .
|
||||
|
||||
```
|
5
docs/examples/README.md
Normal file
5
docs/examples/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Examples
|
||||
|
||||
This directory contains examples on how to run `headscale` on different platforms.
|
||||
|
||||
All examples are provided by the community and they are not verified by the `headscale` authors.
|
@@ -1,5 +1,7 @@
|
||||
# Deploying headscale on Kubernetes
|
||||
|
||||
**Note:** This is contributed by the community and not verified by the headscale authors.
|
||||
|
||||
This directory contains [Kustomize](https://kustomize.io) templates that deploy
|
||||
headscale in various configurations.
|
||||
|
||||
@@ -66,7 +68,7 @@ tasks like creating namespaces, authkeys, etc.
|
||||
|
||||
headscale is an open source implementation of the Tailscale control server
|
||||
|
||||
https://gitlab.com/juanfont/headscale
|
||||
https://github.com/juanfont/headscale
|
||||
|
||||
Usage:
|
||||
headscale [command]
|
@@ -5,4 +5,5 @@ metadata:
|
||||
data:
|
||||
server_url: $(PUBLIC_PROTO)://$(PUBLIC_HOSTNAME)
|
||||
listen_addr: "0.0.0.0:8080"
|
||||
metrics_listen_addr: "127.0.0.1:9090"
|
||||
ephemeral_node_inactivity_timeout: "30m"
|
@@ -25,6 +25,11 @@ spec:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: listen_addr
|
||||
- name: METRICS_LISTEN_ADDR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: metrics_listen_addr
|
||||
- name: DERP_MAP_PATH
|
||||
value: /vol/config/derp.yaml
|
||||
- name: EPHEMERAL_NODE_INACTIVITY_TIMEOUT
|
@@ -26,6 +26,11 @@ spec:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: listen_addr
|
||||
- name: METRICS_LISTEN_ADDR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: metrics_listen_addr
|
||||
- name: DERP_MAP_PATH
|
||||
value: /vol/config/derp.yaml
|
||||
- name: EPHEMERAL_NODE_INACTIVITY_TIMEOUT
|
6
docs/glossary.md
Normal file
6
docs/glossary.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# Glossary
|
||||
|
||||
| Term | Description |
|
||||
| --------- | --------------------------------------------------------------------------------------------------------------------- |
|
||||
| Machine | A machine is a single entity connected to `headscale`, typically an installation of Tailscale. Also known as **Node** |
|
||||
| Namespace | A namespace is a logical grouping of machines "owned" by the same entity, in Tailscale, this is typically a User |
|
BIN
docs/images/headscale-acl-network.png
Normal file
BIN
docs/images/headscale-acl-network.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 56 KiB |
BIN
docs/images/windows-registry.png
Normal file
BIN
docs/images/windows-registry.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 101 KiB |
362
docs/proposals/001-acls.md
Normal file
362
docs/proposals/001-acls.md
Normal file
@@ -0,0 +1,362 @@
|
||||
# ACLs
|
||||
|
||||
A key component of tailscale is the notion of Tailnet. This notion is hidden
|
||||
but the implications that it have on how to use tailscale are not.
|
||||
|
||||
For tailscale an [tailnet](https://tailscale.com/kb/1136/tailnet/) is the
|
||||
following:
|
||||
|
||||
> For personal users, you are a tailnet of many devices and one person. Each
|
||||
> device gets a private Tailscale IP address in the CGNAT range and every
|
||||
> device can talk directly to every other device, wherever they are on the
|
||||
> internet.
|
||||
>
|
||||
> For businesses and organizations, a tailnet is many devices and many users.
|
||||
> It can be based on your Microsoft Active Directory, your Google Workspace, a
|
||||
> GitHub organization, Okta tenancy, or other identity provider namespace. All
|
||||
> of the devices and users in your tailnet can be seen by the tailnet
|
||||
> administrators in the Tailscale admin console. There you can apply
|
||||
> tailnet-wide configuration, such as ACLs that affect visibility of devices
|
||||
> inside your tailnet, DNS settings, and more.
|
||||
|
||||
## Current implementation and issues
|
||||
|
||||
Currently in headscale, the namespaces are used both as tailnet and users. The
|
||||
issue is that if we want to use the ACL's we can't use both at the same time.
|
||||
|
||||
Tailnet's cannot communicate with each others. So we can't have an ACL that
|
||||
authorize tailnet (namespace) A to talk to tailnet (namespace) B.
|
||||
|
||||
We also can't write ACLs based on the users (namespaces in headscale) since all
|
||||
devices belong to the same user.
|
||||
|
||||
With the current implementation the only ACL that we can user is to associate
|
||||
each headscale IP to a host manually then write the ACLs according to this
|
||||
manual mapping.
|
||||
|
||||
```json
|
||||
{
|
||||
"hosts": {
|
||||
"host1": "100.64.0.1",
|
||||
"server": "100.64.0.2"
|
||||
},
|
||||
"acls": [
|
||||
{ "action": "accept", "users": ["host1"], "ports": ["host2:80,443"] }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
While this works, it requires a lot of manual editing on the configuration and
|
||||
to keep track of all devices IP address.
|
||||
|
||||
## Proposition for a next implementation
|
||||
|
||||
In order to ease the use of ACL's we need to split the tailnet and users
|
||||
notion.
|
||||
|
||||
A solution could be to consider a headscale server (in it's entirety) as a
|
||||
tailnet.
|
||||
|
||||
For personal users the default behavior could either allow all communications
|
||||
between all namespaces (like tailscale) or dissallow all communications between
|
||||
namespaces (current behavior).
|
||||
|
||||
For businesses and organisations, viewing a headscale instance a single tailnet
|
||||
would allow users (namespace) to talk to each other with the ACLs. As described
|
||||
in tailscale's documentation [[1]], a server should be tagged and personnal
|
||||
devices should be tied to a user. Translated in headscale's terms each user can
|
||||
have multiple devices and all those devices should be in the same namespace.
|
||||
The servers should be tagged and used as such.
|
||||
|
||||
This implementation would render useless the sharing feature that is currently
|
||||
implemented since an ACL could do the same. Simplifying to only one user
|
||||
interface to do one thing is easier and less confusing for the users.
|
||||
|
||||
To better suit the ACLs in this proposition, it's advised to consider that each
|
||||
namespaces belong to one person. This person can have multiple devices, they
|
||||
will all be considered as the same user in the ACLs. OIDC feature wouldn't need
|
||||
to map people to namespace, just create a namespace if the person isn't
|
||||
registered yet.
|
||||
|
||||
As a sidenote, users would like to write ACLs as YAML. We should offer users
|
||||
the ability to rules in either format (HuJSON or YAML).
|
||||
|
||||
[1]: https://tailscale.com/kb/1068/acl-tags/
|
||||
|
||||
## Example
|
||||
|
||||
Let's build an example use case for a small business (It may be the place where
|
||||
ACL's are the most useful).
|
||||
|
||||
We have a small company with a boss, an admin, two developper and an intern.
|
||||
|
||||
The boss should have access to all servers but not to the users hosts. Admin
|
||||
should also have access to all hosts except that their permissions should be
|
||||
limited to maintaining the hosts (for example purposes). The developers can do
|
||||
anything they want on dev hosts, but only watch on productions hosts. Intern
|
||||
can only interact with the development servers.
|
||||
|
||||
Each user have at least a device connected to the network and we have some
|
||||
servers.
|
||||
|
||||
- database.prod
|
||||
- database.dev
|
||||
- app-server1.prod
|
||||
- app-server1.dev
|
||||
- billing.internal
|
||||
|
||||
### Current headscale implementation
|
||||
|
||||
Let's create some namespaces
|
||||
|
||||
```bash
|
||||
headscale namespaces create prod
|
||||
headscale namespaces create dev
|
||||
headscale namespaces create internal
|
||||
headscale namespaces create users
|
||||
|
||||
headscale nodes register -n users boss-computer
|
||||
headscale nodes register -n users admin1-computer
|
||||
headscale nodes register -n users dev1-computer
|
||||
headscale nodes register -n users dev1-phone
|
||||
headscale nodes register -n users dev2-computer
|
||||
headscale nodes register -n users intern1-computer
|
||||
|
||||
headscale nodes register -n prod database
|
||||
headscale nodes register -n prod app-server1
|
||||
|
||||
headscale nodes register -n dev database
|
||||
headscale nodes register -n dev app-server1
|
||||
|
||||
headscale nodes register -n internal billing
|
||||
|
||||
headscale nodes list
|
||||
ID | Name | Namespace | IP address
|
||||
1 | boss-computer | users | 100.64.0.1
|
||||
2 | admin1-computer | users | 100.64.0.2
|
||||
3 | dev1-computer | users | 100.64.0.3
|
||||
4 | dev1-phone | users | 100.64.0.4
|
||||
5 | dev2-computer | users | 100.64.0.5
|
||||
6 | intern1-computer | users | 100.64.0.6
|
||||
7 | database | prod | 100.64.0.7
|
||||
8 | app-server1 | prod | 100.64.0.8
|
||||
9 | database | dev | 100.64.0.9
|
||||
10 | app-server1 | dev | 100.64.0.10
|
||||
11 | internal | internal | 100.64.0.11
|
||||
```
|
||||
|
||||
In order to only allow the communications related to our description above we
|
||||
need to add the following ACLs
|
||||
|
||||
```json
|
||||
{
|
||||
"hosts": {
|
||||
"boss-computer": "100.64.0.1",
|
||||
"admin1-computer": "100.64.0.2",
|
||||
"dev1-computer": "100.64.0.3",
|
||||
"dev1-phone": "100.64.0.4",
|
||||
"dev2-computer": "100.64.0.5",
|
||||
"intern1-computer": "100.64.0.6",
|
||||
"prod-app-server1": "100.64.0.8"
|
||||
},
|
||||
"groups": {
|
||||
"group:dev": ["dev1-computer", "dev1-phone", "dev2-computer"],
|
||||
"group:admin": ["admin1-computer"],
|
||||
"group:boss": ["boss-computer"],
|
||||
"group:intern": ["intern1-computer"]
|
||||
},
|
||||
"acls": [
|
||||
// boss have access to all servers but no users hosts
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["group:boss"],
|
||||
"ports": ["prod:*", "dev:*", "internal:*"]
|
||||
},
|
||||
|
||||
// admin have access to adminstration port (lets only consider port 22 here)
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["group:admin"],
|
||||
"ports": ["prod:22", "dev:22", "internal:22"]
|
||||
},
|
||||
|
||||
// dev can do anything on dev servers and check access on prod servers
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["group:dev"],
|
||||
"ports": ["dev:*", "prod-app-server1:80,443"]
|
||||
},
|
||||
|
||||
// interns only have access to port 80 and 443 on dev servers (lame internship)
|
||||
{ "action": "accept", "users": ["group:intern"], "ports": ["dev:80,443"] },
|
||||
|
||||
// users can access their own devices
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["dev1-computer"],
|
||||
"ports": ["dev1-phone:*"]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["dev1-phone"],
|
||||
"ports": ["dev1-computer:*"]
|
||||
},
|
||||
|
||||
// internal namespace communications should still be allowed within the namespace
|
||||
{ "action": "accept", "users": ["dev"], "ports": ["dev:*"] },
|
||||
{ "action": "accept", "users": ["prod"], "ports": ["prod:*"] },
|
||||
{ "action": "accept", "users": ["internal"], "ports": ["internal:*"] }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Since communications between namespace isn't possible we also have to share the
|
||||
devices between the namespaces.
|
||||
|
||||
```bash
|
||||
|
||||
// add boss host to prod, dev and internal network
|
||||
headscale nodes share -i 1 -n prod
|
||||
headscale nodes share -i 1 -n dev
|
||||
headscale nodes share -i 1 -n internal
|
||||
|
||||
// add admin computer to prod, dev and internal network
|
||||
headscale nodes share -i 2 -n prod
|
||||
headscale nodes share -i 2 -n dev
|
||||
headscale nodes share -i 2 -n internal
|
||||
|
||||
// add all dev to prod and dev network
|
||||
headscale nodes share -i 3 -n dev
|
||||
headscale nodes share -i 4 -n dev
|
||||
headscale nodes share -i 3 -n prod
|
||||
headscale nodes share -i 4 -n prod
|
||||
headscale nodes share -i 5 -n dev
|
||||
headscale nodes share -i 5 -n prod
|
||||
|
||||
headscale nodes share -i 6 -n dev
|
||||
```
|
||||
|
||||
This fake network have not been tested but it should work. Operating it could
|
||||
be quite tedious if the company grows. Each time a new user join we have to add
|
||||
it to a group, and share it to the correct namespaces. If the user want
|
||||
multiple devices we have to allow communication to each of them one by one. If
|
||||
business conduct a change in the organisations we may have to rewrite all acls
|
||||
and reorganise all namespaces.
|
||||
|
||||
If we add servers in production we should also update the ACLs to allow dev
|
||||
access to certain category of them (only app servers for example).
|
||||
|
||||
### example based on the proposition in this document
|
||||
|
||||
Let's create the namespaces
|
||||
|
||||
```bash
|
||||
headscale namespaces create boss
|
||||
headscale namespaces create admin1
|
||||
headscale namespaces create dev1
|
||||
headscale namespaces create dev2
|
||||
headscale namespaces create intern1
|
||||
```
|
||||
|
||||
We don't need to create namespaces for the servers because the servers will be
|
||||
tagged. When registering the servers we will need to add the flag
|
||||
`--advertised-tags=tag:<tag1>,tag:<tag2>`, and the user (namespace) that is
|
||||
registering the server should be allowed to do it. Since anyone can add tags to
|
||||
a server they can register, the check of the tags is done on headscale server
|
||||
and only valid tags are applied. A tag is valid if the namespace that is
|
||||
registering it is allowed to do it.
|
||||
|
||||
Here are the ACL's to implement the same permissions as above:
|
||||
|
||||
```json
|
||||
{
|
||||
// groups are simpler and only list the namespaces name
|
||||
"groups": {
|
||||
"group:boss": ["boss"],
|
||||
"group:dev": ["dev1", "dev2"],
|
||||
"group:admin": ["admin1"],
|
||||
"group:intern": ["intern1"]
|
||||
},
|
||||
"tagOwners": {
|
||||
// the administrators can add servers in production
|
||||
"tag:prod-databases": ["group:admin"],
|
||||
"tag:prod-app-servers": ["group:admin"],
|
||||
|
||||
// the boss can tag any server as internal
|
||||
"tag:internal": ["group:boss"],
|
||||
|
||||
// dev can add servers for dev purposes as well as admins
|
||||
"tag:dev-databases": ["group:admin", "group:dev"],
|
||||
"tag:dev-app-servers": ["group:admin", "group:dev"]
|
||||
|
||||
// interns cannot add servers
|
||||
},
|
||||
"acls": [
|
||||
// boss have access to all servers
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["group:boss"],
|
||||
"ports": [
|
||||
"tag:prod-databases:*",
|
||||
"tag:prod-app-servers:*",
|
||||
"tag:internal:*",
|
||||
"tag:dev-databases:*",
|
||||
"tag:dev-app-servers:*"
|
||||
]
|
||||
},
|
||||
|
||||
// admin have only access to administrative ports of the servers
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["group:admin"],
|
||||
"ports": [
|
||||
"tag:prod-databases:22",
|
||||
"tag:prod-app-servers:22",
|
||||
"tag:internal:22",
|
||||
"tag:dev-databases:22",
|
||||
"tag:dev-app-servers:22"
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["group:dev"],
|
||||
"ports": [
|
||||
"tag:dev-databases:*",
|
||||
"tag:dev-app-servers:*",
|
||||
"tag:prod-app-servers:80,443"
|
||||
]
|
||||
},
|
||||
|
||||
// servers should be able to talk to database. Database should not be able to initiate connections to server
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["tag:dev-app-servers"],
|
||||
"ports": ["tag:dev-databases:5432"]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["tag:prod-app-servers"],
|
||||
"ports": ["tag:prod-databases:5432"]
|
||||
},
|
||||
|
||||
// interns have access to dev-app-servers only in reading mode
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["group:intern"],
|
||||
"ports": ["tag:dev-app-servers:80,443"]
|
||||
},
|
||||
|
||||
// we still have to allow internal namespaces communications since nothing guarantees that each user have their own namespaces. This could be talked over.
|
||||
{ "action": "accept", "users": ["boss"], "ports": ["boss:*"] },
|
||||
{ "action": "accept", "users": ["dev1"], "ports": ["dev1:*"] },
|
||||
{ "action": "accept", "users": ["dev2"], "ports": ["dev2:*"] },
|
||||
{ "action": "accept", "users": ["admin1"], "ports": ["admin1:*"] },
|
||||
{ "action": "accept", "users": ["intern1"], "ports": ["intern1:*"] }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
With this implementation, the sharing step is not necessary. Maintenance cost
|
||||
of the ACL file is lower and less tedious (no need to map hostname and IP's
|
||||
into it).
|
100
docs/remote-cli.md
Normal file
100
docs/remote-cli.md
Normal file
@@ -0,0 +1,100 @@
|
||||
# Controlling `headscale` with remote CLI
|
||||
|
||||
## Prerequisit
|
||||
|
||||
- A workstation to run `headscale` (could be Linux, macOS, other supported platforms)
|
||||
- A `headscale` server (version `0.13.0` or newer)
|
||||
- Access to create API keys (local access to the `headscale` server)
|
||||
- `headscale` _must_ be served over TLS/HTTPS
|
||||
- Remote access does _not_ support unencrypted traffic.
|
||||
- Port `50443` must be open in the firewall (or port overriden by `grpc_listen_addr` option)
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing a user how-to set control a `headscale` instance
|
||||
from a remote machine with the `headscale` command line binary.
|
||||
|
||||
## Create an API key
|
||||
|
||||
We need to create an API key to authenticate our remote `headscale` when using it from our workstation.
|
||||
|
||||
To create a API key, log into your `headscale` server and generate a key:
|
||||
|
||||
```shell
|
||||
headscale apikeys create --expiration 90d
|
||||
```
|
||||
|
||||
Copy the output of the command and save it for later. Please note that you can not retrieve a key again,
|
||||
if the key is lost, expire the old one, and create a new key.
|
||||
|
||||
To list the keys currently assosicated with the server:
|
||||
|
||||
```shell
|
||||
headscale apikeys list
|
||||
```
|
||||
|
||||
and to expire a key:
|
||||
|
||||
```shell
|
||||
headscale apikeys expire --prefix "<PREFIX>"
|
||||
```
|
||||
|
||||
## Download and configure `headscale`
|
||||
|
||||
1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases):
|
||||
|
||||
2. Put the binary somewhere in your `PATH`, e.g. `/usr/local/bin/headcale`
|
||||
|
||||
3. Make `headscale` executable:
|
||||
|
||||
```shell
|
||||
chmod +x /usr/local/bin/headscale
|
||||
```
|
||||
|
||||
4. Configure the CLI through Environment Variables
|
||||
|
||||
```shell
|
||||
export HEADSCALE_CLI_ADDRESS="<HEADSCALE ADDRESS>:<PORT>"
|
||||
export HEADSCALE_CLI_API_KEY="<API KEY FROM PREVIOUS STAGE>"
|
||||
```
|
||||
|
||||
for example:
|
||||
|
||||
```shell
|
||||
export HEADSCALE_CLI_ADDRESS="headscale.example.com:50443"
|
||||
export HEADSCALE_CLI_API_KEY="abcde12345"
|
||||
```
|
||||
|
||||
This will tell the `headscale` binary to connect to a remote instance, instead of looking
|
||||
for a local instance (which is what it does on the server).
|
||||
|
||||
The API key is needed to make sure that your are allowed to access the server. The key is _not_
|
||||
needed when running directly on the server, as the connection is local.
|
||||
|
||||
5. Test the connection
|
||||
|
||||
Let us run the headscale command to verify that we can connect by listing our nodes:
|
||||
|
||||
```shell
|
||||
headscale nodes list
|
||||
```
|
||||
|
||||
You should now be able to see a list of your nodes from your workstation, and you can
|
||||
now control the `headscale` server from your workstation.
|
||||
|
||||
## Behind a proxy
|
||||
|
||||
It is possible to run the gRPC remote endpoint behind a reverse proxy, like Nginx, and have it run on the _same_ port as `headscale`.
|
||||
|
||||
While this is _not a supported_ feature, an example on how this can be set up on
|
||||
[NixOS is shown here](https://github.com/kradalby/dotfiles/blob/4489cdbb19cddfbfae82cd70448a38fde5a76711/machines/headscale.oracldn/headscale.nix#L61-L91).
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Checklist:
|
||||
|
||||
- Make sure you have the _same_ `headscale` version on your server and workstation
|
||||
- Make sure you use version `0.13.0` or newer.
|
||||
- Verify that your TLS certificate is valid and trusted
|
||||
- If you do not have access to a trusted certificate (e.g. from Let's Encrypt), add your self signed certificate to the trust store of your OS or
|
||||
- Set `HEADSCALE_CLI_INSECURE` to 0 in your environement
|
163
docs/running-headscale-container.md
Normal file
163
docs/running-headscale-container.md
Normal file
@@ -0,0 +1,163 @@
|
||||
# Running headscale in a container
|
||||
|
||||
**Note:** the container documentation is maintained by the _community_ and there is no guarentee
|
||||
it is up to date, or working.
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing a user how-to set up and run `headscale` in a container.
|
||||
[Docker](https://www.docker.com) is used as the reference container implementation, but there is no reason that it should
|
||||
not work with alternatives like [Podman](https://podman.io). The Docker image can be found on Docker Hub [here](https://hub.docker.com/r/headscale/headscale).
|
||||
|
||||
## Configure and run `headscale`
|
||||
|
||||
1. Prepare a directory on the host Docker node in your directory of choice, used to hold `headscale` configuration and the [SQLite](https://www.sqlite.org/) database:
|
||||
|
||||
```shell
|
||||
mkdir -p ./headscale/config
|
||||
cd ./headscale
|
||||
```
|
||||
|
||||
2. Create an empty SQlite datebase in the headscale directory:
|
||||
|
||||
```shell
|
||||
touch ./config/db.sqlite
|
||||
```
|
||||
|
||||
3. **(Strongly Recommended)** Download a copy of the [example configuration](../config-example.yaml) from the [headscale repository](https://github.com/juanfont/headscale/).
|
||||
|
||||
Using wget:
|
||||
|
||||
```shell
|
||||
wget -O ./config/config.yaml https://raw.githubusercontent.com/juanfont/headscale/main/config-example.yaml
|
||||
```
|
||||
|
||||
Using curl:
|
||||
|
||||
```shell
|
||||
curl https://raw.githubusercontent.com/juanfont/headscale/main/config-example.yaml -o ./config/config.yaml
|
||||
```
|
||||
|
||||
**(Advanced)** If you would like to hand craft a config file **instead** of downloading the example config file, create a blank `headscale` configuration in the headscale directory to edit:
|
||||
|
||||
```shell
|
||||
touch ./config/config.yaml
|
||||
```
|
||||
|
||||
Modify the config file to your preferences before launching Docker container.
|
||||
Here are some settings that you likely want:
|
||||
|
||||
```yaml
|
||||
server_url: http://your-host-name:8080 # Change to your hostname or host IP
|
||||
# Listen to 0.0.0.0 so it's accessible outside the container
|
||||
metrics_listen_addr: 0.0.0.0:9090
|
||||
# The default /var/lib/headscale path is not writable in the container
|
||||
private_key_path: /etc/headscale/private.key
|
||||
# The default /var/lib/headscale path is not writable in the container
|
||||
db_path: /etc/headscale/db.sqlite
|
||||
```
|
||||
|
||||
4. Start the headscale server while working in the host headscale directory:
|
||||
|
||||
```shell
|
||||
docker run \
|
||||
--name headscale \
|
||||
--detach \
|
||||
--rm \
|
||||
--volume $(pwd)/config:/etc/headscale/ \
|
||||
--publish 127.0.0.1:8080:8080 \
|
||||
--publish 127.0.0.1:9090:9090 \
|
||||
headscale/headscale:<VERSION> \
|
||||
headscale serve
|
||||
|
||||
```
|
||||
|
||||
Note: use `0.0.0.0:8080:8080` instead of `127.0.0.1:8080:8080` if you want to expose the container externally.
|
||||
|
||||
This command will mount `config/` under `/etc/headscale`, forward port 8080 out of the container so the
|
||||
`headscale` instance becomes available and then detach so headscale runs in the background.
|
||||
|
||||
5. Verify `headscale` is running:
|
||||
|
||||
Follow the container logs:
|
||||
|
||||
```shell
|
||||
docker logs --follow headscale
|
||||
```
|
||||
|
||||
Verify running containers:
|
||||
|
||||
```shell
|
||||
docker ps
|
||||
```
|
||||
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
|
||||
6. Create a namespace ([tailnet](https://tailscale.com/kb/1136/tailnet/)):
|
||||
|
||||
```shell
|
||||
docker exec headscale \
|
||||
headscale namespaces create myfirstnamespace
|
||||
```
|
||||
|
||||
### Register a machine (normal login)
|
||||
|
||||
On a client machine, execute the `tailscale` login command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server YOUR_HEADSCALE_URL
|
||||
```
|
||||
|
||||
To register a machine when running `headscale` in a container, take the headscale command and pass it to the container:
|
||||
|
||||
```shell
|
||||
docker exec headscale \
|
||||
headscale --namespace myfirstnamespace nodes register --key <YOU_+MACHINE_KEY>
|
||||
```
|
||||
|
||||
### Register machine using a pre authenticated key
|
||||
|
||||
Generate a key using the command line:
|
||||
|
||||
```shell
|
||||
docker exec headscale \
|
||||
headscale --namespace myfirstnamespace preauthkeys create --reusable --expiration 24h
|
||||
```
|
||||
|
||||
This will return a pre-authenticated key that can be used to connect a node to `headscale` during the `tailscale` command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
## Debugging headscale running in Docker
|
||||
|
||||
The `headscale/headscale` Docker container is based on a "distroless" image that does not contain a shell or any other debug tools. If you need to debug your application running in the Docker container, you can use the `-debug` variant, for example `headscale/headscale:x.x.x-debug`.
|
||||
|
||||
### Running the debug Docker container
|
||||
|
||||
To run the debug Docker container, use the exact same commands as above, but replace `headscale/headscale:x.x.x` with `headscale/headscale:x.x.x-debug` (`x.x.x` is the version of headscale). The two containers are compatible with each other, so you can alternate between them.
|
||||
|
||||
### Executing commands in the debug container
|
||||
|
||||
The default command in the debug container is to run `headscale`, which is located at `/bin/headscale` inside the container.
|
||||
|
||||
Additionally, the debug container includes a minimalist Busybox shell.
|
||||
|
||||
To launch a shell in the container, use:
|
||||
|
||||
```
|
||||
docker run -it headscale/headscale:x.x.x-debug sh
|
||||
```
|
||||
|
||||
You can also execute commands directly, such as `ls /bin` in this example:
|
||||
|
||||
```
|
||||
docker run headscale/headscale:x.x.x-debug ls /bin
|
||||
```
|
||||
|
||||
Using `docker exec` allows you to run commands in an existing container.
|
191
docs/running-headscale-linux.md
Normal file
191
docs/running-headscale-linux.md
Normal file
@@ -0,0 +1,191 @@
|
||||
# Running headscale on Linux
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing a user how-to set up and run `headscale` on Linux.
|
||||
In additional to the "get up and running section", there is an optional [SystemD section](#running-headscale-in-the-background-with-systemd)
|
||||
describing how to make `headscale` run properly in a server environment.
|
||||
|
||||
## Configure and run `headscale`
|
||||
|
||||
1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases):
|
||||
|
||||
```shell
|
||||
wget --output-document=/usr/local/bin/headscale \
|
||||
https://github.com/juanfont/headscale/releases/download/v<HEADSCALE VERSION>/headscale_<HEADSCALE VERSION>_linux_<ARCH>
|
||||
```
|
||||
|
||||
2. Make `headscale` executable:
|
||||
|
||||
```shell
|
||||
chmod +x /usr/local/bin/headscale
|
||||
```
|
||||
|
||||
3. Prepare a directory to hold `headscale` configuration and the [SQLite](https://www.sqlite.org/) database:
|
||||
|
||||
```shell
|
||||
# Directory for configuration
|
||||
|
||||
mkdir -p /etc/headscale
|
||||
|
||||
# Directory for Database, and other variable data (like certificates)
|
||||
mkdir -p /var/lib/headscale
|
||||
# or if you create a headscale user:
|
||||
useradd \
|
||||
--create-home \
|
||||
--home-dir /var/lib/headscale/ \
|
||||
--system \
|
||||
--user-group \
|
||||
--shell /usr/bin/nologin \
|
||||
headscale
|
||||
```
|
||||
|
||||
4. Create an empty SQLite database:
|
||||
|
||||
```shell
|
||||
touch /var/lib/headscale/db.sqlite
|
||||
```
|
||||
|
||||
5. Create a `headscale` configuration:
|
||||
|
||||
```shell
|
||||
touch /etc/headscale/config.yaml
|
||||
```
|
||||
|
||||
It is **strongly recommended** to copy and modify the [example configuration](../config-example.yaml)
|
||||
from the [headscale repository](../)
|
||||
|
||||
6. Start the headscale server:
|
||||
|
||||
```shell
|
||||
headscale serve
|
||||
```
|
||||
|
||||
This command will start `headscale` in the current terminal session.
|
||||
|
||||
---
|
||||
|
||||
To continue the tutorial, open a new terminal and let it run in the background.
|
||||
Alternatively use terminal emulators like [tmux](https://github.com/tmux/tmux) or [screen](https://www.gnu.org/software/screen/).
|
||||
|
||||
To run `headscale` in the background, please follow the steps in the [SystemD section](#running-headscale-in-the-background-with-systemd) before continuing.
|
||||
|
||||
7. Verify `headscale` is running:
|
||||
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
|
||||
8. Create a namespace ([tailnet](https://tailscale.com/kb/1136/tailnet/)):
|
||||
|
||||
```shell
|
||||
headscale namespaces create myfirstnamespace
|
||||
```
|
||||
|
||||
### Register a machine (normal login)
|
||||
|
||||
On a client machine, execute the `tailscale` login command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server YOUR_HEADSCALE_URL
|
||||
```
|
||||
|
||||
Register the machine:
|
||||
|
||||
```shell
|
||||
headscale --namespace myfirstnamespace nodes register --key <YOU_+MACHINE_KEY>
|
||||
```
|
||||
|
||||
### Register machine using a pre authenticated key
|
||||
|
||||
Generate a key using the command line:
|
||||
|
||||
```shell
|
||||
headscale --namespace myfirstnamespace preauthkeys create --reusable --expiration 24h
|
||||
```
|
||||
|
||||
This will return a pre-authenticated key that can be used to connect a node to `headscale` during the `tailscale` command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
## Running `headscale` in the background with SystemD
|
||||
|
||||
This section demonstrates how to run `headscale` as a service in the background with [SystemD](https://www.freedesktop.org/wiki/Software/systemd/).
|
||||
This should work on most modern Linux distributions.
|
||||
|
||||
1. Create a SystemD service configuration at `/etc/systemd/system/headscale.service` containing:
|
||||
|
||||
```systemd
|
||||
[Unit]
|
||||
Description=headscale controller
|
||||
After=syslog.target
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=headscale
|
||||
Group=headscale
|
||||
ExecStart=/usr/local/bin/headscale serve
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
# Optional security enhancements
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
ReadWritePaths=/var/lib/headscale /var/run/headscale
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
RuntimeDirectory=headscale
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
Note that when running as the headscale user ensure that, either you add your current user to the headscale group:
|
||||
|
||||
```shell
|
||||
usermod -a -G headscale current_user
|
||||
```
|
||||
|
||||
or run all headscale commands as the headscale user:
|
||||
|
||||
```shell
|
||||
su - headscale
|
||||
```
|
||||
|
||||
2. In `/etc/headscale/config.yaml`, override the default `headscale` unix socket with path that is writable by the `headscale` user or group:
|
||||
|
||||
```yaml
|
||||
unix_socket: /var/run/headscale/headscale.sock
|
||||
```
|
||||
|
||||
3. Reload SystemD to load the new configuration file:
|
||||
|
||||
```shell
|
||||
systemctl daemon-reload
|
||||
```
|
||||
|
||||
4. Enable and start the new `headscale` service:
|
||||
|
||||
```shell
|
||||
systemctl enable --now headscale
|
||||
```
|
||||
|
||||
5. Verify the headscale service:
|
||||
|
||||
```shell
|
||||
systemctl status headscale
|
||||
```
|
||||
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
|
||||
`headscale` will now run in the background and start at boot.
|
206
docs/running-headscale-openbsd.md
Normal file
206
docs/running-headscale-openbsd.md
Normal file
@@ -0,0 +1,206 @@
|
||||
# Running headscale on OpenBSD
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing a user how-to install and run `headscale` on OpenBSD 7.1.
|
||||
In additional to the "get up and running section", there is an optional [rc.d section](#running-headscale-in-the-background-with-rcd)
|
||||
describing how to make `headscale` run properly in a server environment.
|
||||
|
||||
## Install `headscale`
|
||||
|
||||
1. Install from ports (Not Recommend)
|
||||
|
||||
As of OpenBSD 7.1, there's a headscale in ports collection, however, it's severely outdated(v0.12.4).
|
||||
You can install it via `pkg_add headscale`.
|
||||
|
||||
2. Install from source on OpenBSD 7.1
|
||||
|
||||
```shell
|
||||
# Install prerequistes
|
||||
# 1. go v1.18+: headscale newer than 0.15 needs go 1.18+ to compile
|
||||
# 2. gmake: Makefile in the headscale repo is written in GNU make syntax
|
||||
pkg_add -D snap go
|
||||
pkg_add gmake
|
||||
|
||||
git clone https://github.com/juanfont/headscale.git
|
||||
|
||||
cd headscale
|
||||
|
||||
# optionally checkout a release
|
||||
# option a. you can find offical relase at https://github.com/juanfont/headscale/releases/latest
|
||||
# option b. get latest tag, this may be a beta release
|
||||
latestTag=$(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
|
||||
git checkout $latestTag
|
||||
|
||||
gmake build
|
||||
|
||||
# make it executable
|
||||
chmod a+x headscale
|
||||
|
||||
# copy it to /usr/local/sbin
|
||||
cp headscale /usr/local/sbin
|
||||
```
|
||||
|
||||
3. Install from source via cross compile
|
||||
|
||||
```shell
|
||||
# Install prerequistes
|
||||
# 1. go v1.18+: headscale newer than 0.15 needs go 1.18+ to compile
|
||||
# 2. gmake: Makefile in the headscale repo is written in GNU make syntax
|
||||
|
||||
git clone https://github.com/juanfont/headscale.git
|
||||
|
||||
cd headscale
|
||||
|
||||
# optionally checkout a release
|
||||
# option a. you can find offical relase at https://github.com/juanfont/headscale/releases/latest
|
||||
# option b. get latest tag, this may be a beta release
|
||||
latestTag=$(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
|
||||
git checkout $latestTag
|
||||
|
||||
make build GOOS=openbsd
|
||||
|
||||
# copy headscale to openbsd machine and put it in /usr/local/sbin
|
||||
```
|
||||
|
||||
## Configure and run `headscale`
|
||||
|
||||
1. Prepare a directory to hold `headscale` configuration and the [SQLite](https://www.sqlite.org/) database:
|
||||
|
||||
```shell
|
||||
# Directory for configuration
|
||||
|
||||
mkdir -p /etc/headscale
|
||||
|
||||
# Directory for Database, and other variable data (like certificates)
|
||||
mkdir -p /var/lib/headscale
|
||||
```
|
||||
|
||||
2. Create an empty SQLite database:
|
||||
|
||||
```shell
|
||||
touch /var/lib/headscale/db.sqlite
|
||||
```
|
||||
|
||||
3. Create a `headscale` configuration:
|
||||
|
||||
```shell
|
||||
touch /etc/headscale/config.yaml
|
||||
```
|
||||
|
||||
It is **strongly recommended** to copy and modify the [example configuration](../config-example.yaml)
|
||||
from the [headscale repository](../)
|
||||
|
||||
4. Start the headscale server:
|
||||
|
||||
```shell
|
||||
headscale serve
|
||||
```
|
||||
|
||||
This command will start `headscale` in the current terminal session.
|
||||
|
||||
---
|
||||
|
||||
To continue the tutorial, open a new terminal and let it run in the background.
|
||||
Alternatively use terminal emulators like [tmux](https://github.com/tmux/tmux).
|
||||
|
||||
To run `headscale` in the background, please follow the steps in the [rc.d section](#running-headscale-in-the-background-with-rcd) before continuing.
|
||||
|
||||
5. Verify `headscale` is running:
|
||||
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
|
||||
6. Create a namespace ([tailnet](https://tailscale.com/kb/1136/tailnet/)):
|
||||
|
||||
```shell
|
||||
headscale namespaces create myfirstnamespace
|
||||
```
|
||||
|
||||
### Register a machine (normal login)
|
||||
|
||||
On a client machine, execute the `tailscale` login command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server YOUR_HEADSCALE_URL
|
||||
```
|
||||
|
||||
Register the machine:
|
||||
|
||||
```shell
|
||||
headscale --namespace myfirstnamespace nodes register --key <YOU_+MACHINE_KEY>
|
||||
```
|
||||
|
||||
### Register machine using a pre authenticated key
|
||||
|
||||
Generate a key using the command line:
|
||||
|
||||
```shell
|
||||
headscale --namespace myfirstnamespace preauthkeys create --reusable --expiration 24h
|
||||
```
|
||||
|
||||
This will return a pre-authenticated key that can be used to connect a node to `headscale` during the `tailscale` command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
## Running `headscale` in the background with rc.d
|
||||
|
||||
This section demonstrates how to run `headscale` as a service in the background with [rc.d](https://man.openbsd.org/rc.d).
|
||||
|
||||
1. Create a rc.d service at `/etc/rc.d/headscale` containing:
|
||||
|
||||
```shell
|
||||
#!/bin/ksh
|
||||
|
||||
daemon="/usr/local/sbin/headscale"
|
||||
daemon_logger="daemon.info"
|
||||
daemon_user="root"
|
||||
daemon_flags="serve"
|
||||
daemon_timeout=60
|
||||
|
||||
. /etc/rc.d/rc.subr
|
||||
|
||||
rc_bg=YES
|
||||
rc_reload=NO
|
||||
|
||||
rc_cmd $1
|
||||
```
|
||||
|
||||
2. `/etc/rc.d/headscale` needs execute permission:
|
||||
|
||||
```shell
|
||||
chmod a+x /etc/rc.d/headscale
|
||||
```
|
||||
|
||||
3. Start `headscale` service:
|
||||
|
||||
```shell
|
||||
rcctl start headscale
|
||||
```
|
||||
|
||||
4. Make `headscale` service start at boot:
|
||||
|
||||
```shell
|
||||
rcctl enable headscale
|
||||
```
|
||||
|
||||
5. Verify the headscale service:
|
||||
|
||||
```shell
|
||||
rcctl check headscale
|
||||
```
|
||||
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
|
||||
`headscale` will now run in the background and start at boot.
|
@@ -1,5 +1,9 @@
|
||||
# Running the service via TLS (optional)
|
||||
|
||||
## Let's Encrypt / ACME
|
||||
|
||||
To get a certificate automatically via [Let's Encrypt](https://letsencrypt.org/), set `tls_letsencrypt_hostname` to the desired certificate hostname. This name must resolve to the IP address(es) headscale is reachable on (i.e., it must correspond to the `server_url` configuration parameter). The certificate and Let's Encrypt account credentials will be stored in the directory configured in `tls_letsencrypt_cache_dir`. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from. The certificate will automatically be renewed as needed.
|
||||
|
||||
```yaml
|
||||
tls_letsencrypt_hostname: ""
|
||||
tls_letsencrypt_listen: ":http"
|
||||
@@ -7,21 +11,35 @@ tls_letsencrypt_cache_dir: ".cache"
|
||||
tls_letsencrypt_challenge_type: HTTP-01
|
||||
```
|
||||
|
||||
To get a certificate automatically via [Let's Encrypt](https://letsencrypt.org/), set `tls_letsencrypt_hostname` to the desired certificate hostname. This name must resolve to the IP address(es) headscale is reachable on (i.e., it must correspond to the `server_url` configuration parameter). The certificate and Let's Encrypt account credentials will be stored in the directory configured in `tls_letsencrypt_cache_dir`. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from. The certificate will automatically be renewed as needed.
|
||||
### Challenge type HTTP-01
|
||||
|
||||
The default challenge type `HTTP-01` requires that headscale is reachable on port 80 for the Let's Encrypt automated validation, in addition to whatever port is configured in `listen_addr`. By default, headscale listens on port 80 on all local IPs for Let's Encrypt automated validation.
|
||||
|
||||
If you need to change the ip and/or port used by headscale for the Let's Encrypt validation process, set `tls_letsencrypt_listen` to the appropriate value. This can be handy if you are running headscale as a non-root user (or can't run `setcap`). Keep in mind, however, that Let's Encrypt will _only_ connect to port 80 for the validation callback, so if you change `tls_letsencrypt_listen` you will also need to configure something else (e.g. a firewall rule) to forward the traffic from port 80 to the ip:port combination specified in `tls_letsencrypt_listen`.
|
||||
|
||||
### Challenge type TLS-ALPN-01
|
||||
|
||||
Alternatively, `tls_letsencrypt_challenge_type` can be set to `TLS-ALPN-01`. In this configuration, headscale listens on the ip:port combination defined in `listen_addr`. Let's Encrypt will _only_ connect to port 443 for the validation callback, so if `listen_addr` is not set to port 443, something else (e.g. a firewall rule) will be required to forward the traffic from port 443 to the ip:port combination specified in `listen_addr`.
|
||||
|
||||
## Bring your own certificate
|
||||
|
||||
headscale can also be configured to expose its web service via TLS. To configure the certificate and key file manually, set the `tls_cert_path` and `tls_cert_path` configuration parameters. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.
|
||||
|
||||
```yaml
|
||||
tls_cert_path: ""
|
||||
tls_key_path: ""
|
||||
```
|
||||
|
||||
headscale can also be configured to expose its web service via TLS. To configure the certificate and key file manually, set the `tls_cert_path` and `tls_cert_path` configuration parameters. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.
|
||||
### Configuring Mutual TLS Authentication (mTLS)
|
||||
|
||||
## Challenge type HTTP-01
|
||||
mTLS is a method by which an HTTPS server authenticates clients, e.g. Tailscale, using TLS certificates. This can be configured by applying one of the following values to the `tls_client_auth_mode` setting in the configuration file.
|
||||
|
||||
The default challenge type `HTTP-01` requires that headscale is reachable on port 80 for the Let's Encrypt automated validation, in addition to whatever port is configured in `listen_addr`. By default, headscale listens on port 80 on all local IPs for Let's Encrypt automated validation.
|
||||
| Value | Behavior |
|
||||
| ------------------- | ---------------------------------------------------------- |
|
||||
| `disabled` | Disable mTLS. |
|
||||
| `relaxed` (default) | A client certificate is required, but it is not verified. |
|
||||
| `enforced` | Requires clients to supply a certificate that is verified. |
|
||||
|
||||
If you need to change the ip and/or port used by headscale for the Let's Encrypt validation process, set `tls_letsencrypt_listen` to the appropriate value. This can be handy if you are running headscale as a non-root user (or can't run `setcap`). Keep in mind, however, that Let's Encrypt will _only_ connect to port 80 for the validation callback, so if you change `tls_letsencrypt_listen` you will also need to configure something else (e.g. a firewall rule) to forward the traffic from port 80 to the ip:port combination specified in `tls_letsencrypt_listen`.
|
||||
|
||||
## Challenge type TLS-ALPN-01
|
||||
|
||||
Alternatively, `tls_letsencrypt_challenge_type` can be set to `TLS-ALPN-01`. In this configuration, headscale listens on the ip:port combination defined in `listen_addr`. Let's Encrypt will _only_ connect to port 443 for the validation callback, so if `listen_addr` is not set to port 443, something else (e.g. a firewall rule) will be required to forward the traffic from port 443 to the ip:port combination specified in `listen_addr`.
|
||||
```yaml
|
||||
tls_client_auth_mode: ""
|
||||
```
|
50
docs/windows-client.md
Normal file
50
docs/windows-client.md
Normal file
@@ -0,0 +1,50 @@
|
||||
# Connecting a Windows client
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing how a user can use the official Windows [Tailscale](https://tailscale.com) client with `headscale`.
|
||||
|
||||
## Add registry keys
|
||||
|
||||
To make the Windows client behave as expected and to run well with `headscale`, two registry keys **must** be set:
|
||||
|
||||
- `HKLM:\SOFTWARE\Tailscale IPN\UnattendedMode` must be set to `always` as a `string` type, to allow Tailscale to run properly in the background
|
||||
- `HKLM:\SOFTWARE\Tailscale IPN\LoginURL` must be set to `<YOUR HEADSCALE URL>` as a `string` type, to ensure Tailscale contacts the correct control server.
|
||||
|
||||

|
||||
|
||||
The Tailscale Windows client has been observed to reset its configuration on logout/reboot and these two keys [resolves that issue](https://github.com/tailscale/tailscale/issues/2798).
|
||||
|
||||
For a guide on how to edit registry keys, [check out Computer Hope](https://www.computerhope.com/issues/ch001348.htm).
|
||||
|
||||
## Installation
|
||||
|
||||
Download the [Official Windows Client](https://tailscale.com/download/windows) and install it.
|
||||
|
||||
When the installation has finished, start Tailscale and log in (you might have to click the icon in the system tray).
|
||||
|
||||
The log in should open a browser Window and direct you to your `headscale` instance.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you are seeing repeated messages like:
|
||||
|
||||
```
|
||||
[GIN] 2022/02/10 - 16:39:34 | 200 | 1.105306ms | 127.0.0.1 | POST "/machine/redacted"
|
||||
```
|
||||
|
||||
in your `headscale` output, turn on `DEBUG` logging and look for:
|
||||
|
||||
```
|
||||
2022-02-11T00:59:29Z DBG Machine registration has expired. Sending a authurl to register machine=redacted
|
||||
```
|
||||
|
||||
This typically means that the registry keys above was not set appropriately.
|
||||
|
||||
To reset and try again, it is important to do the following:
|
||||
|
||||
1. Ensure the registry keys from the previous guide is correctly set.
|
||||
2. Shut down the Tailscale service (or the client running in the tray)
|
||||
3. Delete Tailscale Application data folder, located at `C:\Users\<USERNAME>\AppData\Local\Tailscale` and try to connect again.
|
||||
4. Ensure the Windows node is deleted from headscale (to ensure fresh setup)
|
||||
5. Start Tailscale on the windows machine and retry the login.
|
43
flake.lock
generated
Normal file
43
flake.lock
generated
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"nodes": {
|
||||
"flake-utils": {
|
||||
"locked": {
|
||||
"lastModified": 1653893745,
|
||||
"narHash": "sha256-0jntwV3Z8//YwuOjzhV2sgJJPt+HY6KhU7VZUL0fKZQ=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "1ed9fb1935d260de5fe1c2f7ee0ebaae17ed2fa1",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1654847188,
|
||||
"narHash": "sha256-MC+eP7XOGE1LAswOPqdcGoUqY9mEQ3ZaaxamVTbc0hM=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "8b66e3f2ebcc644b78cec9d6f152192f4e7d322f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-22.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user