mirror of
https://github.com/juanfont/headscale.git
synced 2025-12-15 16:31:49 +00:00
Compare commits
790 Commits
v0.8.1
...
v0.13.0-be
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
73497382b7 | ||
|
|
b2b2954545 | ||
|
|
a3360b082f | ||
|
|
b721502147 | ||
|
|
1869bff4ba | ||
|
|
0b9dd19ec7 | ||
|
|
b2889bc355 | ||
|
|
28c824acaf | ||
|
|
57f1da6dca | ||
|
|
c9640b2f3e | ||
|
|
546b1e8a05 | ||
|
|
3b54a68f5c | ||
|
|
1b1aac18d2 | ||
|
|
f30ee3d2df | ||
|
|
9f80349471 | ||
|
|
14b23544e4 | ||
|
|
4e54796384 | ||
|
|
c3b68adfed | ||
|
|
0018a78d5a | ||
|
|
50f0270543 | ||
|
|
bb80b679bc | ||
|
|
6fa0903a8e | ||
|
|
2bc8051ae5 | ||
|
|
4841e16386 | ||
|
|
d79ccfc05a | ||
|
|
ead8b68a03 | ||
|
|
3bb4c28c9a | ||
|
|
2fbcc38f8f | ||
|
|
315ff9daf0 | ||
|
|
4078e75b50 | ||
|
|
58bfea4e64 | ||
|
|
e18078d7f8 | ||
|
|
c73b57e7dc | ||
|
|
531298fa59 | ||
|
|
30a2ccd975 | ||
|
|
59e48993f2 | ||
|
|
bfc6f6e0eb | ||
|
|
811d3d510c | ||
|
|
2aba37d2ef | ||
|
|
8853ccd5b4 | ||
|
|
c794f32f58 | ||
|
|
dd8bae8c61 | ||
|
|
1b47ddd583 | ||
|
|
20991d6883 | ||
|
|
96f09e3f30 | ||
|
|
8f40696f35 | ||
|
|
c1845477ef | ||
|
|
1d40de3095 | ||
|
|
2357fb6f80 | ||
|
|
ba8afdb7be | ||
|
|
d9aaa0bdfc | ||
|
|
66ff34c2dd | ||
|
|
150652e939 | ||
|
|
8c79165b0d | ||
|
|
7b607b3fe8 | ||
|
|
41fbe47cdf | ||
|
|
e05c5e0b93 | ||
|
|
9e3318ca27 | ||
|
|
e9adfcd678 | ||
|
|
5b5ecd52e1 | ||
|
|
eddd62eee0 | ||
|
|
38c27f6bf8 | ||
|
|
90fb9aa4ed | ||
|
|
3af1253a65 | ||
|
|
eb1ce64b7c | ||
|
|
2c9ed63021 | ||
|
|
4c779d306b | ||
|
|
0862f60ff0 | ||
|
|
991175f2aa | ||
|
|
1815040d98 | ||
|
|
71ab4c9b2c | ||
|
|
e0c22a414b | ||
|
|
4e63bba4fe | ||
|
|
445c04baf7 | ||
|
|
ad4e3a89e0 | ||
|
|
6f6018bad5 | ||
|
|
ccd41b9a13 | ||
|
|
d8ce440309 | ||
|
|
2f576b2fb1 | ||
|
|
853a5288f1 | ||
|
|
cd0df1e46f | ||
|
|
b195c87418 | ||
|
|
45bcf39894 | ||
|
|
0a1db89d33 | ||
|
|
dbfb9e16e0 | ||
|
|
8aa2606853 | ||
|
|
a238a8b33a | ||
|
|
74f26d3685 | ||
|
|
e66f8b0eeb | ||
|
|
e7b69dbf91 | ||
|
|
13f23d2e7e | ||
|
|
7a86321252 | ||
|
|
7aace7eb6b | ||
|
|
7a6be36f46 | ||
|
|
bb27c80bad | ||
|
|
c0c3b7d511 | ||
|
|
6220836050 | ||
|
|
b122d06f12 | ||
|
|
6f9ed958ca | ||
|
|
39ce59fcb1 | ||
|
|
052fccdc98 | ||
|
|
17411b65f3 | ||
|
|
bf7ee78324 | ||
|
|
fbe5054a67 | ||
|
|
761147ea3b | ||
|
|
25ccf5ef18 | ||
|
|
b4f8961e44 | ||
|
|
726ccc8c1f | ||
|
|
126e694f26 | ||
|
|
ab45cd37f8 | ||
|
|
f59071ff1c | ||
|
|
537cd35cb2 | ||
|
|
56b6528e3b | ||
|
|
bae7ba46de | ||
|
|
fa197cc183 | ||
|
|
00c69ce50c | ||
|
|
a6e22387fd | ||
|
|
a730f007d8 | ||
|
|
3393363a67 | ||
|
|
8218ef96ef | ||
|
|
e8e573de62 | ||
|
|
05db1b7109 | ||
|
|
6e14fdf0d3 | ||
|
|
1fd57a3375 | ||
|
|
b4259fcd79 | ||
|
|
f9137f3bb0 | ||
|
|
b1a9b1ada1 | ||
|
|
b8e9024845 | ||
|
|
70d82ea184 | ||
|
|
9dc20580c7 | ||
|
|
4d60aeae18 | ||
|
|
67d1dd984f | ||
|
|
b02f8dd45d | ||
|
|
3837f1714a | ||
|
|
ed5498ef86 | ||
|
|
e2f8c69e2e | ||
|
|
beb3e9abc2 | ||
|
|
78039f4cea | ||
|
|
ed39b91f71 | ||
|
|
8f632e9062 | ||
|
|
a32175f791 | ||
|
|
d35fb8bba0 | ||
|
|
115d0cbe85 | ||
|
|
1a6e5d8770 | ||
|
|
3a3aecb774 | ||
|
|
8b40343277 | ||
|
|
7ec8346179 | ||
|
|
46cdce00af | ||
|
|
86f3f26a18 | ||
|
|
febbb6006f | ||
|
|
1d68509463 | ||
|
|
b6d0c4f2aa | ||
|
|
2c057c2d89 | ||
|
|
cf7effda1b | ||
|
|
19effe7034 | ||
|
|
41053482b3 | ||
|
|
e463283a58 | ||
|
|
99814b468b | ||
|
|
163ecb2a6b | ||
|
|
4660b265d9 | ||
|
|
1b6bad0b63 | ||
|
|
26623d794b | ||
|
|
e9cc60e49c | ||
|
|
be2a28dd61 | ||
|
|
cec236ce24 | ||
|
|
45d331da99 | ||
|
|
897fa558b0 | ||
|
|
d971cf1295 | ||
|
|
42bed58329 | ||
|
|
d9f52efe70 | ||
|
|
25b5eb8d7f | ||
|
|
81c60939c9 | ||
|
|
4edc96d14d | ||
|
|
6b7c74133d | ||
|
|
8da029bd14 | ||
|
|
1d01103b67 | ||
|
|
5df100539c | ||
|
|
11c86acbe3 | ||
|
|
86f36f9a43 | ||
|
|
271cb71754 | ||
|
|
80d196cbfd | ||
|
|
3ce3ccb559 | ||
|
|
a11c6fd8b9 | ||
|
|
a75c5a4cff | ||
|
|
8d504c35bf | ||
|
|
8a07a63b1c | ||
|
|
74fd5de43d | ||
|
|
f9e6722635 | ||
|
|
0bd4250a53 | ||
|
|
4b44aa2180 | ||
|
|
f78984f2ef | ||
|
|
3de311b7f4 | ||
|
|
5192841016 | ||
|
|
07384fd2bb | ||
|
|
a795e7c0c9 | ||
|
|
ebfbd4a37d | ||
|
|
fb933b7d41 | ||
|
|
1c7cb98042 | ||
|
|
fb634cdfc2 | ||
|
|
f60f62792a | ||
|
|
418fde2731 | ||
|
|
53108207be | ||
|
|
3fb3db6f20 | ||
|
|
5a504fa711 | ||
|
|
b4cce22415 | ||
|
|
54c2306637 | ||
|
|
bc8f5f484d | ||
|
|
d00780b574 | ||
|
|
686384ebb7 | ||
|
|
3a85c4d367 | ||
|
|
1b007d2208 | ||
|
|
5a7f669505 | ||
|
|
0c13d9da15 | ||
|
|
58ec26ee89 | ||
|
|
969bcf17c4 | ||
|
|
04d81a0e5c | ||
|
|
a6e99525ac | ||
|
|
7e95b3501d | ||
|
|
ab52acba4b | ||
|
|
07a437c707 | ||
|
|
d56fb1aaa1 | ||
|
|
c046ffbceb | ||
|
|
3435d95c80 | ||
|
|
acaab7a3de | ||
|
|
74ba452025 | ||
|
|
500be2de58 | ||
|
|
5bc0398aaf | ||
|
|
78eba97bf9 | ||
|
|
6350d528a7 | ||
|
|
42eb6b9e01 | ||
|
|
2e2fb68715 | ||
|
|
6fc6355d66 | ||
|
|
48fc93bbdc | ||
|
|
3e941ef959 | ||
|
|
1dc008133c | ||
|
|
2d2ae62176 | ||
|
|
8e9a94613c | ||
|
|
8932133ae7 | ||
|
|
5b8587037d | ||
|
|
e167be6d64 | ||
|
|
34f4109fbd | ||
|
|
fa813bc0d7 | ||
|
|
32006f3a20 | ||
|
|
ff8c961dbb | ||
|
|
e9d5214d1c | ||
|
|
6295b0bd84 | ||
|
|
550f4016dc | ||
|
|
2ae882d801 | ||
|
|
ef81845deb | ||
|
|
d96b681c83 | ||
|
|
59aeaa8476 | ||
|
|
cb2ea300ad | ||
|
|
c38f00fab8 | ||
|
|
0012c76170 | ||
|
|
cfd53bc4aa | ||
|
|
07418140a2 | ||
|
|
c63c259d31 | ||
|
|
50b47adaa3 | ||
|
|
b6ae60cc44 | ||
|
|
d944aa6e79 | ||
|
|
06f05d6cc2 | ||
|
|
0819c6515a | ||
|
|
c7f3e0632b | ||
|
|
58fd6c4ba5 | ||
|
|
aab4a6043a | ||
|
|
a52a4d45c0 | ||
|
|
45bc3f7a09 | ||
|
|
5620858549 | ||
|
|
f2e273b8a2 | ||
|
|
cec1e86b58 | ||
|
|
dcbf289470 | ||
|
|
fdd64d98c8 | ||
|
|
9968992be0 | ||
|
|
f50f9ac894 | ||
|
|
2eca344f0e | ||
|
|
349264830b | ||
|
|
0b5c29022b | ||
|
|
1f1c45a2c0 | ||
|
|
68dc2a70db | ||
|
|
caf1b1cabc | ||
|
|
021c464148 | ||
|
|
e600ead3e9 | ||
|
|
200c10e48c | ||
|
|
e8faff4fe2 | ||
|
|
5cbd4513a4 | ||
|
|
a477c808c7 | ||
|
|
74044f62f4 | ||
|
|
fcd4d94927 | ||
|
|
fac33e46e1 | ||
|
|
b152e53b13 | ||
|
|
1687e3b03f | ||
|
|
c2393685f1 | ||
|
|
fd5f42c2e6 | ||
|
|
bda2d9c3b0 | ||
|
|
c4ecc4db91 | ||
|
|
8ccc51ae57 | ||
|
|
a2b9f3bede | ||
|
|
bd1d1b1a3b | ||
|
|
f1c05f8010 | ||
|
|
f85a77edb5 | ||
|
|
1c7aff5dd9 | ||
|
|
e91f72fe4c | ||
|
|
5a2cae5081 | ||
|
|
6a9dd2029e | ||
|
|
9aac1fb255 | ||
|
|
106b1e7e8d | ||
|
|
836986aa59 | ||
|
|
58d1255357 | ||
|
|
981f712660 | ||
|
|
50dcb8bb75 | ||
|
|
a8a8f01429 | ||
|
|
35c3fe9608 | ||
|
|
f74b9f5fe2 | ||
|
|
de42fe3b04 | ||
|
|
49f835d8cf | ||
|
|
7bc2f41b33 | ||
|
|
a10388b709 | ||
|
|
bd7b5e97cb | ||
|
|
4b525a3967 | ||
|
|
d6739386a0 | ||
|
|
25b790d025 | ||
|
|
db8be91d8b | ||
|
|
c4d4c9c4e4 | ||
|
|
715542ac1c | ||
|
|
0c005a6b01 | ||
|
|
0c45f8d252 | ||
|
|
2dde1242cf | ||
|
|
78cfba0a31 | ||
|
|
8ae682b412 | ||
|
|
333be80f9c | ||
|
|
2efefca737 | ||
|
|
c6bc9fffe9 | ||
|
|
8454c1b52c | ||
|
|
471c0b4993 | ||
|
|
53ed749f45 | ||
|
|
ba084b9987 | ||
|
|
85f28a3f4a | ||
|
|
796072a5a4 | ||
|
|
9390348a65 | ||
|
|
c9c16c7fb8 | ||
|
|
19cd7a4eac | ||
|
|
0315f55fcd | ||
|
|
668e958d3e | ||
|
|
4ace54c4e1 | ||
|
|
89eb13c6cb | ||
|
|
d0ef850035 | ||
|
|
2f8e9f272c | ||
|
|
1af4a3b958 | ||
|
|
1969802c6b | ||
|
|
052883aa55 | ||
|
|
d2918edc14 | ||
|
|
f3da299457 | ||
|
|
e8726b1e22 | ||
|
|
b897a26f42 | ||
|
|
5ec7158b5d | ||
|
|
7d77acd88e | ||
|
|
c0f16603c5 | ||
|
|
34dba0ade8 | ||
|
|
acf7e462ad | ||
|
|
f94b0b54d8 | ||
|
|
806f0d3e6c | ||
|
|
b653572272 | ||
|
|
fa0922d5bb | ||
|
|
95b9f03fb3 | ||
|
|
24e0c944b1 | ||
|
|
148437f716 | ||
|
|
3ddd9962ce | ||
|
|
2634215f12 | ||
|
|
dae34ca8c5 | ||
|
|
03b7ec62ca | ||
|
|
edfcdc466c | ||
|
|
6b3114ad6f | ||
|
|
ba65092926 | ||
|
|
f44138c944 | ||
|
|
c290ce4b91 | ||
|
|
3b34c7b89a | ||
|
|
83e72ec57d | ||
|
|
49893305b4 | ||
|
|
0803c407a9 | ||
|
|
6371135459 | ||
|
|
43af11c46a | ||
|
|
b210858dc5 | ||
|
|
e1f45f9d07 | ||
|
|
dce6b8d72e | ||
|
|
67953bfe2f | ||
|
|
6076656373 | ||
|
|
9a26fa7989 | ||
|
|
d47b83f80b | ||
|
|
b11acad1c9 | ||
|
|
b15efb5201 | ||
|
|
2dfd42f80c | ||
|
|
ce3f79a3bf | ||
|
|
a249d3fe39 | ||
|
|
a6d487de00 | ||
|
|
3720da6386 | ||
|
|
26718e8308 | ||
|
|
f5a196088a | ||
|
|
74f0d08f50 | ||
|
|
046681f4ef | ||
|
|
29531a5e90 | ||
|
|
137a9d6333 | ||
|
|
8115f50d03 | ||
|
|
b75e8ae2bd | ||
|
|
3ad2350c79 | ||
|
|
204f99dd51 | ||
|
|
8df41b069f | ||
|
|
be4256b1d0 | ||
|
|
77a973878c | ||
|
|
7b0d2dfb4a | ||
|
|
79871d2463 | ||
|
|
dce82f4323 | ||
|
|
9e9049307e | ||
|
|
cd34a5d6f3 | ||
|
|
319237910b | ||
|
|
3eed356d70 | ||
|
|
706ff59d70 | ||
|
|
c2eb3f4d36 | ||
|
|
9acc3e0e73 | ||
|
|
94dbaa6822 | ||
|
|
5526ccc696 | ||
|
|
95690e614e | ||
|
|
77f5f8bd1c | ||
|
|
787814ea89 | ||
|
|
67adea5cab | ||
|
|
4226da3d6b | ||
|
|
5270361989 | ||
|
|
a6aa6a4f7b | ||
|
|
1c530be66c | ||
|
|
7c774bc547 | ||
|
|
9954a3c599 | ||
|
|
b91c115ade | ||
|
|
53df9afc2a | ||
|
|
8db45a4e75 | ||
|
|
1c9b1ea91a | ||
|
|
12f2a7cee0 | ||
|
|
3f30bf1e33 | ||
|
|
f968b0abdf | ||
|
|
16ccbf4cdb | ||
|
|
d803fe6123 | ||
|
|
ca15a53fad | ||
|
|
264e5964f6 | ||
|
|
223c611820 | ||
|
|
fbdfa55629 | ||
|
|
73d22cdf54 | ||
|
|
bac81176b2 | ||
|
|
cd2914dbc9 | ||
|
|
cbf3f5d640 | ||
|
|
018e42acad | ||
|
|
482a31b66b | ||
|
|
2b340e8fa4 | ||
|
|
434fac52b7 | ||
|
|
6aacada852 | ||
|
|
7301d7eb67 | ||
|
|
b2d2d5653e | ||
|
|
72fd2a2780 | ||
|
|
9ef031f0f8 | ||
|
|
81b8610dff | ||
|
|
eefd82a574 | ||
|
|
002b5c1dad | ||
|
|
68dab0fe7b | ||
|
|
6d10be8fff | ||
|
|
a23d82e33a | ||
|
|
c7fa9b6e4a | ||
|
|
07bbeafa3b | ||
|
|
06700c1dc4 | ||
|
|
2d252da221 | ||
|
|
2c071a8a2d | ||
|
|
f9187bdfc4 | ||
|
|
25c67cf2aa | ||
|
|
b00a2729e3 | ||
|
|
6c01b86e4c | ||
|
|
d086cf4691 | ||
|
|
5054ed41ac | ||
|
|
e91174e83f | ||
|
|
c9bd25d05c | ||
|
|
f779372154 | ||
|
|
acd9ebbdf8 | ||
|
|
6369cea10e | ||
|
|
2d92719095 | ||
|
|
d4265779ef | ||
|
|
8f2ef6a57d | ||
|
|
6e764942a2 | ||
|
|
11d987549f | ||
|
|
b8c89cd63c | ||
|
|
2f045b20fb | ||
|
|
caa4d33cbd | ||
|
|
a9da7c8fd9 | ||
|
|
b096a2e7e5 | ||
|
|
f9ece0087d | ||
|
|
c76d3b53d9 | ||
|
|
e8277595f5 | ||
|
|
4d3b638a3d | ||
|
|
1d9954d8e9 | ||
|
|
dd7557850e | ||
|
|
c8e1afb14b | ||
|
|
6d162eeff9 | ||
|
|
746d4037da | ||
|
|
1237e02f7c | ||
|
|
7da3d4ba50 | ||
|
|
c22b93734e | ||
|
|
8853315dcc | ||
|
|
5aaffaaecb | ||
|
|
389a8d47a3 | ||
|
|
a355769416 | ||
|
|
1a8c9216d6 | ||
|
|
81316ef644 | ||
|
|
4d4d0de356 | ||
|
|
b85adbc40a | ||
|
|
aefbd66317 | ||
|
|
d875cca69d | ||
|
|
0e902fe949 | ||
|
|
582eb57a09 | ||
|
|
177f1eca06 | ||
|
|
57f46ded83 | ||
|
|
aa245c2d06 | ||
|
|
e836db1ead | ||
|
|
5420347d24 | ||
|
|
9e2637d65f | ||
|
|
c6046597ed | ||
|
|
a46c8fe914 | ||
|
|
f822816cdb | ||
|
|
f3bf9b4bbb | ||
|
|
9f02899261 | ||
|
|
75f3e1fb03 | ||
|
|
9fbfa7c1f5 | ||
|
|
d5aef85bf2 | ||
|
|
88b32e4b18 | ||
|
|
e425e3ffd3 | ||
|
|
355483fd86 | ||
|
|
672d8474b9 | ||
|
|
73e4d38670 | ||
|
|
561c15bbe8 | ||
|
|
b93aa723cb | ||
|
|
636943c715 | ||
|
|
0a6a67da85 | ||
|
|
e9ffd366dd | ||
|
|
4be0b3f556 | ||
|
|
a0bfad6d6e | ||
|
|
bb1f17f5af | ||
|
|
95bc2ee241 | ||
|
|
16a90e799c | ||
|
|
4c2f84b211 | ||
|
|
b799635fbb | ||
|
|
bc145952d4 | ||
|
|
2c5701917d | ||
|
|
ed7b840fea | ||
|
|
23372e29cd | ||
|
|
fb569b0483 | ||
|
|
e2b5638ca0 | ||
|
|
8f5a1dce3e | ||
|
|
6b0f5da113 | ||
|
|
5159b6d085 | ||
|
|
03d97c3872 | ||
|
|
41c5a0ddf5 | ||
|
|
19165a40d2 | ||
|
|
d1ebcb59f1 | ||
|
|
31344128a0 | ||
|
|
86ecc2a234 | ||
|
|
d1e8ac7ba5 | ||
|
|
efe208fef5 | ||
|
|
7b40e99aec | ||
|
|
06706aab9a | ||
|
|
0318af5a33 | ||
|
|
995dcfc6ae | ||
|
|
2236cc8bf7 | ||
|
|
7bb354117b | ||
|
|
18b00b5d8d | ||
|
|
dbe193ad17 | ||
|
|
e7424222db | ||
|
|
d2a162e3ee | ||
|
|
da14750396 | ||
|
|
8fe72dcb74 | ||
|
|
d35f5fe498 | ||
|
|
677bd9b657 | ||
|
|
a347d276bd | ||
|
|
9e1253ada1 | ||
|
|
244e79f575 | ||
|
|
b4e6a32b4b | ||
|
|
023cd8f4cd | ||
|
|
10d24e64cd | ||
|
|
37e191a75d | ||
|
|
01a5fe3c51 | ||
|
|
9e3339b4f1 | ||
|
|
b06e34f144 | ||
|
|
710616f118 | ||
|
|
ddf042cab1 | ||
|
|
687e8d12be | ||
|
|
01f755ecf9 | ||
|
|
8094e6fdef | ||
|
|
061efa1822 | ||
|
|
9a7472218e | ||
|
|
7dcf4a5147 | ||
|
|
306a80cf57 | ||
|
|
d0cd5af419 | ||
|
|
afbfc1d370 | ||
|
|
a9a1a8fb3c | ||
|
|
85ddc0db33 | ||
|
|
fddc2aa8fa | ||
|
|
be3a379d10 | ||
|
|
d0daff180e | ||
|
|
be36480a64 | ||
|
|
9f52a64a6a | ||
|
|
52511af8e4 | ||
|
|
ddb6bd795c | ||
|
|
271660a284 | ||
|
|
0b0f7db534 | ||
|
|
5a7b377f6f | ||
|
|
654d2b9910 | ||
|
|
829a8c4381 | ||
|
|
0603e29c46 | ||
|
|
5807562b56 | ||
|
|
985c6e7cc9 | ||
|
|
0d13e16fed | ||
|
|
91d135e069 | ||
|
|
3e1e07e8c1 | ||
|
|
6c4c761408 | ||
|
|
abfb1791f1 | ||
|
|
7ce8c4c649 | ||
|
|
2ddca366f2 | ||
|
|
9a6ac6e3e6 | ||
|
|
cc3e8705bd | ||
|
|
809a5b84e7 | ||
|
|
06ae2a6c50 | ||
|
|
93517aa6f8 | ||
|
|
5f0f3705c0 | ||
|
|
70ae18c3a8 | ||
|
|
6aa763a1ae | ||
|
|
ebfb8c8c5e | ||
|
|
30788e1a70 | ||
|
|
27947c6746 | ||
|
|
6924b7bf4c | ||
|
|
fa8cd96108 | ||
|
|
dd1e425d02 | ||
|
|
7f2027d7f2 | ||
|
|
48f5a9a18c | ||
|
|
8843188b84 | ||
|
|
087c461762 | ||
|
|
d579c1718c | ||
|
|
4c5f667504 | ||
|
|
4c4c95198b | ||
|
|
5ce1526a06 | ||
|
|
d70c3d6189 | ||
|
|
74e6c1479e | ||
|
|
9a0c9768ad | ||
|
|
6884798404 | ||
|
|
c4487b73c4 | ||
|
|
32c3f09eb4 | ||
|
|
d4dc133e20 | ||
|
|
fc5153af3e | ||
|
|
2997f4d251 | ||
|
|
e407d423d4 | ||
|
|
35795c79c3 | ||
|
|
fd8d888ddb | ||
|
|
06f56411dd | ||
|
|
e4f197b709 | ||
|
|
13406175c1 | ||
|
|
20117c51a2 | ||
|
|
f0c54490ed | ||
|
|
95f726fb31 | ||
|
|
ba391bc2ed | ||
|
|
c487591437 | ||
|
|
c582c8d206 | ||
|
|
1a0f6f6e39 | ||
|
|
6981543db6 | ||
|
|
722084fbd3 | ||
|
|
a01a0d1039 | ||
|
|
8abc7575cd | ||
|
|
c9a411e341 | ||
|
|
b02a9f9769 | ||
|
|
a0fa652449 | ||
|
|
2eef535b4b | ||
|
|
61870a275f | ||
|
|
088e8248d3 | ||
|
|
da4a9dadd5 | ||
|
|
02bc7314f4 | ||
|
|
6fb8d67825 | ||
|
|
1a41a9f2c7 | ||
|
|
040a18e6f8 | ||
|
|
ec911981c2 | ||
|
|
f6a7564ec8 | ||
|
|
2eb57e6288 | ||
|
|
94ba5181fc | ||
|
|
1d5b090579 | ||
|
|
ef0f7c0c09 | ||
|
|
e60ceefea9 | ||
|
|
ed6b5bc279 | ||
|
|
d3ef39a58f | ||
|
|
07e32be5ce | ||
|
|
ed0b31d072 | ||
|
|
fcc6991d62 | ||
|
|
c09428acca | ||
|
|
931ef9482b | ||
|
|
772541afab | ||
|
|
2090a13dcd | ||
|
|
31b4f03f96 | ||
|
|
7793012409 | ||
|
|
566c2bc1fb | ||
|
|
99efeb98f8 | ||
|
|
836ee74e57 | ||
|
|
06689ed726 | ||
|
|
817cc1e567 | ||
|
|
8fa0fe65ba | ||
|
|
1d81333685 | ||
|
|
1bddf1147b | ||
|
|
63fa475913 | ||
|
|
d637a9c302 | ||
|
|
3c3189caa6 | ||
|
|
0d4a006536 | ||
|
|
0475eb6ef7 | ||
|
|
0d1b60ad63 | ||
|
|
78a0f3ca37 | ||
|
|
2c83eac36f | ||
|
|
42913e2c37 | ||
|
|
54daa0da23 | ||
|
|
0435089eba | ||
|
|
39abc4e973 | ||
|
|
cefe2d5bcc | ||
|
|
ed728f57e0 | ||
|
|
6ffea2225d | ||
|
|
64185cc2bc | ||
|
|
990ff153c0 | ||
|
|
47dcc940c0 | ||
|
|
8d60ae2c7e | ||
|
|
19492650d4 | ||
|
|
36ae14bccf | ||
|
|
45e71ecba0 | ||
|
|
e432e98413 | ||
|
|
656237e167 | ||
|
|
5dbf6b5127 | ||
|
|
c9e4da3ff5 | ||
|
|
cfd4781eb4 | ||
|
|
986725519f | ||
|
|
0393ab524c | ||
|
|
3f3cfedffa | ||
|
|
e9ea698130 | ||
|
|
a6adcdafa9 | ||
|
|
7c37086dd6 | ||
|
|
2048e9e136 | ||
|
|
cc054d71fe | ||
|
|
0bbf343348 | ||
|
|
8248b71153 | ||
|
|
9811809f6a | ||
|
|
237a14858a | ||
|
|
59c3d4bcfe | ||
|
|
7612cc84d2 | ||
|
|
4aa91bc420 | ||
|
|
c801a8c553 | ||
|
|
5626f598ce | ||
|
|
7d0da8b578 | ||
|
|
eb87fc9215 | ||
|
|
b22a9781a2 | ||
|
|
ada40960bd | ||
|
|
83ead36fce | ||
|
|
05a5f21c3d | ||
|
|
e7a2501fe8 | ||
|
|
a36328dbfc | ||
|
|
cab5641d95 | ||
|
|
b83894abd6 | ||
|
|
8e588ae146 | ||
|
|
83815f567d | ||
|
|
7db91c68be | ||
|
|
109115c13b | ||
|
|
11e0402396 | ||
|
|
fd94105483 | ||
|
|
96e8142540 | ||
|
|
9615138202 | ||
|
|
9900b215cc | ||
|
|
d5ea224e11 | ||
|
|
024d6ee7c3 | ||
|
|
f653b00258 | ||
|
|
ff1ee4ca64 | ||
|
|
830aa250e1 | ||
|
|
f0bbc3c7d8 | ||
|
|
994b4eef17 | ||
|
|
f905812afa | ||
|
|
d68d201722 | ||
|
|
da209e89a7 | ||
|
|
7940dbc78b | ||
|
|
4d22b4252f | ||
|
|
b3efd1e47b | ||
|
|
2d39d6602c | ||
|
|
dfcab2b6d5 | ||
|
|
40c5263927 | ||
|
|
bf26e37e0e | ||
|
|
e154e7a0fb | ||
|
|
b28ebb5d20 | ||
|
|
5840f88251 | ||
|
|
2c2968473a | ||
|
|
8f1f48b7d0 | ||
|
|
0aac79f8fa |
@@ -15,3 +15,4 @@ README.md
|
||||
LICENSE
|
||||
.vscode
|
||||
|
||||
*.sock
|
||||
|
||||
2
.github/FUNDING.yml
vendored
Normal file
2
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
ko_fi: kradalby
|
||||
github: [kradalby]
|
||||
28
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
28
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
name: "Bug report"
|
||||
about: "Create a bug report to help us improve"
|
||||
title: ""
|
||||
labels: ["bug"]
|
||||
assignees: ""
|
||||
---
|
||||
|
||||
**Bug description**
|
||||
|
||||
<!-- A clear and concise description of what the bug is. Describe the expected bahavior
|
||||
and how it is currently different. If you are unsure if it is a bug, consider discussing
|
||||
it on our Discord server first. -->
|
||||
|
||||
**To Reproduce**
|
||||
|
||||
<!-- Steps to reproduce the behavior. -->
|
||||
|
||||
**Context info**
|
||||
|
||||
<!-- Please add relevant information about your system. For example:
|
||||
- Version of headscale used
|
||||
- Version of tailscale client
|
||||
- OS (e.g. Linux, Mac, Cygwin, WSL, etc.) and version
|
||||
- Kernel version
|
||||
- The relevant config parameters you used
|
||||
- Log output
|
||||
-->
|
||||
11
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
11
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# Issues must have some content
|
||||
blank_issues_enabled: false
|
||||
|
||||
# Contact links
|
||||
contact_links:
|
||||
- name: "headscale usage documentation"
|
||||
url: "https://github.com/juanfont/headscale/blob/main/docs"
|
||||
about: "Find documentation about how to configure and run headscale."
|
||||
- name: "headscale Discord community"
|
||||
url: "https://discord.com/invite/XcQxk2VHjx"
|
||||
about: "Please ask and answer questions about usage of headscale here."
|
||||
15
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
15
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
name: "Feature request"
|
||||
about: "Suggest an idea for headscale"
|
||||
title: ""
|
||||
labels: ["enhancement"]
|
||||
assignees: ""
|
||||
---
|
||||
|
||||
**Feature request**
|
||||
|
||||
<!-- A clear and precise description of what new or changed feature you want. -->
|
||||
|
||||
<!-- Please include the reason, why you would need the feature. E.g. what problem
|
||||
does it solve? Or which workflow is currently frustrating and will be improved by
|
||||
this? -->
|
||||
28
.github/ISSUE_TEMPLATE/other_issue.md
vendored
Normal file
28
.github/ISSUE_TEMPLATE/other_issue.md
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
name: "Other issue"
|
||||
about: "Report a different issue"
|
||||
title: ""
|
||||
labels: ["bug"]
|
||||
assignees: ""
|
||||
---
|
||||
|
||||
<!-- If you have a question, please consider using our Discord for asking questions -->
|
||||
|
||||
**Issue description**
|
||||
|
||||
<!-- Please add your issue description. -->
|
||||
|
||||
**To Reproduce**
|
||||
|
||||
<!-- Steps to reproduce the behavior. -->
|
||||
|
||||
**Context info**
|
||||
|
||||
<!-- Please add relevant information about your system. For example:
|
||||
- Version of headscale used
|
||||
- Version of tailscale client
|
||||
- OS (e.g. Linux, Mac, Cygwin, WSL, etc.) and version
|
||||
- Kernel version
|
||||
- The relevant config parameters you used
|
||||
- Log output
|
||||
-->
|
||||
10
.github/pull_request_template.md
vendored
Normal file
10
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
<!-- Please tick if the following things apply. You… -->
|
||||
|
||||
- [] read the [CONTRIBUTING guidelines](README.md#user-content-contributing)
|
||||
- [] raised a GitHub issue or discussed it on the projects chat beforehand
|
||||
- [] added unit tests
|
||||
- [] added integration tests
|
||||
- [] updated documentation if needed
|
||||
- [] updated CHANGELOG.md
|
||||
|
||||
<!-- If applicable, please reference the issue using `Fixes #XXX` and add tests to cover your new code. -->
|
||||
51
.github/workflows/build.yml
vendored
Normal file
51
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
with:
|
||||
files: |
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- name: Setup Go
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17.7"
|
||||
|
||||
- name: Install dependencies
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
go version
|
||||
sudo apt update
|
||||
sudo apt install -y make
|
||||
|
||||
- name: Run build
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: make build
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: headscale-linux
|
||||
path: headscale
|
||||
24
.github/workflows/contributors.yml
vendored
Normal file
24
.github/workflows/contributors.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Contributors
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
add-contributors:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: BobAnkh/add-contributors@master
|
||||
with:
|
||||
CONTRIBUTOR: "## Contributors"
|
||||
COLUMN_PER_ROW: "6"
|
||||
ACCESS_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||
IMG_WIDTH: "100"
|
||||
FONT_SIZE: "14"
|
||||
PATH: "/README.md"
|
||||
COMMIT_MESSAGE: "docs(README): update contributors"
|
||||
AVATAR_SHAPE: "round"
|
||||
BRANCH: "update-contributors"
|
||||
PULL_REQUEST: "main"
|
||||
87
.github/workflows/lint.yml
vendored
87
.github/workflows/lint.yml
vendored
@@ -1,39 +1,74 @@
|
||||
---
|
||||
name: CI
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
# The "build" workflow
|
||||
lint:
|
||||
# The type of runner that the job will run on
|
||||
golangci-lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
# Install and run golangci-lint as a separate step, it's much faster this
|
||||
# way because this action has caching. It'll get run again in `make lint`
|
||||
# below, but it's still much faster in the end than installing
|
||||
# golangci-lint manually in the `Run lint` step.
|
||||
- uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
args: --timeout 5m
|
||||
fetch-depth: 2
|
||||
|
||||
# Setup Go
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
with:
|
||||
go-version: "1.16.3" # The Go version to download (if necessary) and use.
|
||||
files: |
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
# Install all the dependencies
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
go version
|
||||
go install golang.org/x/lint/golint@latest
|
||||
sudo apt update
|
||||
sudo apt install -y make
|
||||
- name: golangci-lint
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
version: latest
|
||||
|
||||
- name: Run lint
|
||||
run: make lint
|
||||
# Only block PRs on new problems.
|
||||
# If this is not enabled, we will end up having PRs
|
||||
# blocked because new linters has appared and other
|
||||
# parts of the code is affected.
|
||||
only-new-issues: true
|
||||
|
||||
prettier-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
with:
|
||||
files: |
|
||||
**/*.md
|
||||
**/*.yml
|
||||
**/*.yaml
|
||||
**/*.ts
|
||||
**/*.js
|
||||
**/*.sass
|
||||
**/*.css
|
||||
**/*.scss
|
||||
**/*.html
|
||||
|
||||
- name: Prettify code
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: creyD/prettier_action@v4.0
|
||||
with:
|
||||
prettier_options: >-
|
||||
--check **/*.{ts,js,md,yaml,yml,sass,css,scss,html}
|
||||
only_changed: false
|
||||
dry: true
|
||||
|
||||
proto-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: bufbuild/buf-setup-action@v0.7.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: "proto"
|
||||
|
||||
185
.github/workflows/release.yml
vendored
185
.github/workflows/release.yml
vendored
@@ -4,25 +4,27 @@ name: release
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "*" # triggers only if push new tag version
|
||||
- "*" # triggers only if push new tag version
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-18.04 # due to CGO we need to user an older version
|
||||
runs-on: ubuntu-18.04 # due to CGO we need to user an older version
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Set up Go
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
-
|
||||
name: Run GoReleaser
|
||||
go-version: 1.17.7
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install -y gcc-aarch64-linux-gnu
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
with:
|
||||
distribution: goreleaser
|
||||
@@ -34,13 +36,24 @@ jobs:
|
||||
docker-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Docker meta
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Set up QEMU for multiple platforms
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: arm64,amd64
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
@@ -49,24 +62,23 @@ jobs:
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/headscale
|
||||
ghcr.io/${{ github.repository_owner }}/headscale
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value=latest
|
||||
type=sha
|
||||
-
|
||||
name: Login to DockerHub
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
-
|
||||
name: Login to GHCR
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
-
|
||||
name: Build and push
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
@@ -74,3 +86,138 @@ jobs:
|
||||
context: .
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new
|
||||
- name: Prepare cache for next build
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
|
||||
docker-debug-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Set up QEMU for multiple platforms
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: arm64,amd64
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache-debug
|
||||
key: ${{ runner.os }}-buildx-debug-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-debug-
|
||||
- name: Docker meta
|
||||
id: meta-debug
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/headscale
|
||||
ghcr.io/${{ github.repository_owner }}/headscale
|
||||
flavor: |
|
||||
latest=false
|
||||
tags: |
|
||||
type=semver,pattern={{version}}-debug
|
||||
type=semver,pattern={{major}}.{{minor}}-debug
|
||||
type=semver,pattern={{major}}-debug
|
||||
type=raw,value=latest-debug
|
||||
type=sha,suffix=-debug
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
file: Dockerfile.debug
|
||||
tags: ${{ steps.meta-debug.outputs.tags }}
|
||||
labels: ${{ steps.meta-debug.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache-debug
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-debug-new
|
||||
- name: Prepare cache for next build
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache-debug
|
||||
mv /tmp/.buildx-cache-debug-new /tmp/.buildx-cache-debug
|
||||
|
||||
docker-alpine-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Set up QEMU for multiple platforms
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: arm64,amd64
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache-alpine
|
||||
key: ${{ runner.os }}-buildx-alpine-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-alpine-
|
||||
- name: Docker meta
|
||||
id: meta-alpine
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/headscale
|
||||
ghcr.io/${{ github.repository_owner }}/headscale
|
||||
flavor: |
|
||||
latest=false
|
||||
tags: |
|
||||
type=semver,pattern={{version}}-alpine
|
||||
type=semver,pattern={{major}}.{{minor}}-alpine
|
||||
type=semver,pattern={{major}}-alpine
|
||||
type=raw,value=latest-alpine
|
||||
type=sha,suffix=-alpine
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
file: Dockerfile.alpine
|
||||
tags: ${{ steps.meta-alpine.outputs.tags }}
|
||||
labels: ${{ steps.meta-alpine.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache-alpine
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-alpine-new
|
||||
- name: Prepare cache for next build
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache-alpine
|
||||
mv /tmp/.buildx-cache-alpine-new /tmp/.buildx-cache-alpine
|
||||
|
||||
21
.github/workflows/test-integration.yml
vendored
21
.github/workflows/test-integration.yml
vendored
@@ -3,21 +3,30 @@ name: CI
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
# The "build" workflow
|
||||
integration-test:
|
||||
# The type of runner that the job will run on
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
with:
|
||||
files: |
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
# Setup Go
|
||||
- name: Setup Go
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16.3"
|
||||
go-version: "1.17.7"
|
||||
|
||||
- name: Run Integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: go test -tags integration -timeout 30m
|
||||
|
||||
24
.github/workflows/test.yml
vendored
24
.github/workflows/test.yml
vendored
@@ -3,31 +3,41 @@ name: CI
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
# The "build" workflow
|
||||
test:
|
||||
# The type of runner that the job will run on
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
with:
|
||||
files: |
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
# Setup Go
|
||||
- name: Setup Go
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16.3" # The Go version to download (if necessary) and use.
|
||||
go-version: "1.17.7"
|
||||
|
||||
# Install all the dependencies
|
||||
- name: Install dependencies
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
go version
|
||||
sudo apt update
|
||||
sudo apt install -y make
|
||||
|
||||
- name: Run tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: make test
|
||||
|
||||
- name: Run build
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: make
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -16,6 +16,9 @@
|
||||
|
||||
/headscale
|
||||
config.json
|
||||
config.yaml
|
||||
derp.yaml
|
||||
*.hujson
|
||||
*.key
|
||||
/db.sqlite
|
||||
*.sqlite3
|
||||
|
||||
56
.golangci.yaml
Normal file
56
.golangci.yaml
Normal file
@@ -0,0 +1,56 @@
|
||||
---
|
||||
run:
|
||||
timeout: 10m
|
||||
|
||||
issues:
|
||||
skip-dirs:
|
||||
- gen
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- exhaustivestruct
|
||||
- revive
|
||||
- lll
|
||||
- interfacer
|
||||
- scopelint
|
||||
- maligned
|
||||
- golint
|
||||
- gofmt
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocognit
|
||||
- funlen
|
||||
- exhaustivestruct
|
||||
- tagliatelle
|
||||
- godox
|
||||
- ireturn
|
||||
|
||||
# We should strive to enable these:
|
||||
- wrapcheck
|
||||
- dupl
|
||||
- makezero
|
||||
|
||||
# We might want to enable this, but it might be a lot of work
|
||||
- cyclop
|
||||
- nestif
|
||||
- wsl # might be incompatible with gofumpt
|
||||
- testpackage
|
||||
- paralleltest
|
||||
|
||||
linters-settings:
|
||||
varnamelen:
|
||||
ignore-type-assert-ok: true
|
||||
ignore-map-index-ok: true
|
||||
ignore-names:
|
||||
- err
|
||||
- db
|
||||
- id
|
||||
- ip
|
||||
- ok
|
||||
- c
|
||||
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- appendAssign
|
||||
# TODO(kradalby): Remove this
|
||||
- ifElseChain
|
||||
@@ -1,12 +1,15 @@
|
||||
# This is an example .goreleaser.yml file with some sane defaults.
|
||||
# Make sure to check the documentation at http://goreleaser.com
|
||||
---
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
- go mod tidy -compat=1.17
|
||||
|
||||
release:
|
||||
prerelease: auto
|
||||
|
||||
builds:
|
||||
- id: darwin-amd64
|
||||
main: ./cmd/headscale/headscale.go
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
goos:
|
||||
- darwin
|
||||
goarch:
|
||||
@@ -19,16 +22,17 @@ builds:
|
||||
flags:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w -X main.version={{.Version}}
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
|
||||
- id: linux-armhf
|
||||
main: ./cmd/headscale/headscale.go
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- arm
|
||||
goarm:
|
||||
- 7
|
||||
- "7"
|
||||
env:
|
||||
- CC=arm-linux-gnueabihf-gcc
|
||||
- CXX=arm-linux-gnueabihf-g++
|
||||
@@ -39,8 +43,7 @@ builds:
|
||||
flags:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w -X main.version={{.Version}}
|
||||
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
|
||||
- id: linux-amd64
|
||||
env:
|
||||
@@ -49,11 +52,23 @@ builds:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
goarm:
|
||||
- 6
|
||||
- 7
|
||||
main: ./cmd/headscale/headscale.go
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
ldflags:
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
|
||||
- id: linux-arm64
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- arm64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- CC=aarch64-linux-gnu-gcc
|
||||
main: ./cmd/headscale/headscale.go
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
ldflags:
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
|
||||
archives:
|
||||
- id: golang-cross
|
||||
@@ -61,16 +76,17 @@ archives:
|
||||
- darwin-amd64
|
||||
- linux-armhf
|
||||
- linux-amd64
|
||||
- linux-arm64
|
||||
name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||
format: binary
|
||||
|
||||
checksum:
|
||||
name_template: 'checksums.txt'
|
||||
name_template: "checksums.txt"
|
||||
snapshot:
|
||||
name_template: "{{ .Tag }}-next"
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- '^docs:'
|
||||
- '^test:'
|
||||
- "^docs:"
|
||||
- "^test:"
|
||||
|
||||
74
CHANGELOG.md
Normal file
74
CHANGELOG.md
Normal file
@@ -0,0 +1,74 @@
|
||||
# CHANGELOG
|
||||
|
||||
**TBD (TBD):**
|
||||
|
||||
**0.13.0 (2022-xx-xx):**
|
||||
|
||||
**Features**:
|
||||
|
||||
- Add IPv6 support to the prefix assigned to namespaces
|
||||
- Add API Key support
|
||||
- Enable remote control of `headscale` via CLI [docs](docs/remote-cli.md)
|
||||
- Enable HTTP API (beta, subject to change)
|
||||
|
||||
**Changes**:
|
||||
|
||||
- `ip_prefix` is now superseded by `ip_prefixes` in the configuration [#208](https://github.com/juanfont/headscale/pull/208)
|
||||
- Upgrade `tailscale` (1.20.4) and other dependencies to latest [#314](https://github.com/juanfont/headscale/pull/314)
|
||||
- fix swapped machine<->namespace labels in `/metrics` [#312](https://github.com/juanfont/headscale/pull/312)
|
||||
- remove key-value based update mechanism for namespace changes [#316](https://github.com/juanfont/headscale/pull/316)
|
||||
|
||||
**0.12.4 (2022-01-29):**
|
||||
|
||||
**Changes**:
|
||||
|
||||
- Make gRPC Unix Socket permissions configurable [#292](https://github.com/juanfont/headscale/pull/292)
|
||||
- Trim whitespace before reading Private Key from file [#289](https://github.com/juanfont/headscale/pull/289)
|
||||
- Add new command to generate a private key for `headscale` [#290](https://github.com/juanfont/headscale/pull/290)
|
||||
- Fixed issue where hosts deleted from control server may be written back to the database, as long as they are connected to the control server [#278](https://github.com/juanfont/headscale/pull/278)
|
||||
|
||||
**0.12.3 (2022-01-13):**
|
||||
|
||||
**Changes**:
|
||||
|
||||
- Added Alpine container [#270](https://github.com/juanfont/headscale/pull/270)
|
||||
- Minor updates in dependencies [#271](https://github.com/juanfont/headscale/pull/271)
|
||||
|
||||
**0.12.2 (2022-01-11):**
|
||||
|
||||
Happy New Year!
|
||||
|
||||
**Changes**:
|
||||
|
||||
- Fix Docker release [#258](https://github.com/juanfont/headscale/pull/258)
|
||||
- Rewrite main docs [#262](https://github.com/juanfont/headscale/pull/262)
|
||||
- Improve Docker docs [#263](https://github.com/juanfont/headscale/pull/263)
|
||||
|
||||
**0.12.1 (2021-12-24):**
|
||||
|
||||
(We are skipping 0.12.0 to correct a mishap done weeks ago with the version tagging)
|
||||
|
||||
**BREAKING**:
|
||||
|
||||
- Upgrade to Tailscale 1.18 [#229](https://github.com/juanfont/headscale/pull/229)
|
||||
- This change requires a new format for private key, private keys are now generated automatically:
|
||||
1. Delete your current key
|
||||
2. Restart `headscale`, a new key will be generated.
|
||||
3. Restart all Tailscale clients to fetch the new key
|
||||
|
||||
**Changes**:
|
||||
|
||||
- Unify configuration example [#197](https://github.com/juanfont/headscale/pull/197)
|
||||
- Add stricter linting and formatting [#223](https://github.com/juanfont/headscale/pull/223)
|
||||
|
||||
**Features**:
|
||||
|
||||
- Add gRPC and HTTP API (HTTP API is currently disabled) [#204](https://github.com/juanfont/headscale/pull/204)
|
||||
- Use gRPC between the CLI and the server [#206](https://github.com/juanfont/headscale/pull/206), [#212](https://github.com/juanfont/headscale/pull/212)
|
||||
- Beta OpenID Connect support [#126](https://github.com/juanfont/headscale/pull/126), [#227](https://github.com/juanfont/headscale/pull/227)
|
||||
|
||||
**0.11.0 (2021-10-25):**
|
||||
|
||||
**BREAKING**:
|
||||
|
||||
- Make headscale fetch DERP map from URL and file [#196](https://github.com/juanfont/headscale/pull/196)
|
||||
13
Dockerfile
13
Dockerfile
@@ -1,18 +1,21 @@
|
||||
FROM golang:latest AS build
|
||||
# Builder image
|
||||
FROM docker.io/golang:1.17.7-bullseye AS build
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
COPY go.mod go.sum /go/src/headscale/
|
||||
WORKDIR /go/src/headscale
|
||||
RUN go mod download
|
||||
|
||||
COPY . /go/src/headscale
|
||||
COPY . .
|
||||
|
||||
RUN go install -a -ldflags="-extldflags=-static" -tags netgo,sqlite_omit_load_extension ./cmd/headscale
|
||||
RUN strip /go/bin/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
FROM ubuntu:20.04
|
||||
# Production image
|
||||
FROM gcr.io/distroless/base-debian11
|
||||
|
||||
COPY --from=build /go/bin/headscale /usr/local/bin/headscale
|
||||
COPY --from=build /go/bin/headscale /bin/headscale
|
||||
ENV TZ UTC
|
||||
|
||||
EXPOSE 8080/tcp
|
||||
|
||||
23
Dockerfile.alpine
Normal file
23
Dockerfile.alpine
Normal file
@@ -0,0 +1,23 @@
|
||||
# Builder image
|
||||
FROM docker.io/golang:1.17.7-alpine AS build
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
COPY go.mod go.sum /go/src/headscale/
|
||||
RUN apk add gcc musl-dev
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN go install -a -ldflags="-extldflags=-static" -tags netgo,sqlite_omit_load_extension ./cmd/headscale
|
||||
RUN strip /go/bin/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
# Production image
|
||||
FROM docker.io/alpine:latest
|
||||
|
||||
COPY --from=build /go/bin/headscale /bin/headscale
|
||||
ENV TZ UTC
|
||||
|
||||
EXPOSE 8080/tcp
|
||||
CMD ["headscale"]
|
||||
23
Dockerfile.debug
Normal file
23
Dockerfile.debug
Normal file
@@ -0,0 +1,23 @@
|
||||
# Builder image
|
||||
FROM docker.io/golang:1.17.7-bullseye AS build
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
COPY go.mod go.sum /go/src/headscale/
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN go install -a -ldflags="-extldflags=-static" -tags netgo,sqlite_omit_load_extension ./cmd/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
# Debug image
|
||||
FROM gcr.io/distroless/base-debian11:debug
|
||||
|
||||
COPY --from=build /go/bin/headscale /bin/headscale
|
||||
ENV TZ UTC
|
||||
|
||||
# Need to reset the entrypoint or everything will run as a busybox script
|
||||
ENTRYPOINT []
|
||||
EXPOSE 8080/tcp
|
||||
CMD ["headscale"]
|
||||
@@ -1,9 +1,11 @@
|
||||
FROM ubuntu:latest
|
||||
|
||||
ARG TAILSCALE_VERSION
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y gnupg curl \
|
||||
&& curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.gpg | apt-key add - \
|
||||
&& curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.list | tee /etc/apt/sources.list.d/tailscale.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y tailscale \
|
||||
&& apt-get install -y tailscale=${TAILSCALE_VERSION} dnsutils \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
36
Makefile
36
Makefile
@@ -1,8 +1,16 @@
|
||||
# Calculate version
|
||||
version = $(shell ./scripts/version-at-commit.sh)
|
||||
|
||||
rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d))
|
||||
|
||||
# GO_SOURCES = $(wildcard *.go)
|
||||
# PROTO_SOURCES = $(wildcard **/*.proto)
|
||||
GO_SOURCES = $(call rwildcard,,*.go)
|
||||
PROTO_SOURCES = $(call rwildcard,,*.proto)
|
||||
|
||||
|
||||
build:
|
||||
go build -ldflags "-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.version=$(version)" cmd/headscale/headscale.go
|
||||
go build -ldflags "-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$(version)" cmd/headscale/headscale.go
|
||||
|
||||
dev: lint test build
|
||||
|
||||
@@ -10,7 +18,10 @@ test:
|
||||
@go test -coverprofile=coverage.out ./...
|
||||
|
||||
test_integration:
|
||||
go test -tags integration -timeout 30m ./...
|
||||
go test -tags integration -timeout 30m -count=1 ./...
|
||||
|
||||
test_integration_cli:
|
||||
go test -tags integration -v integration_cli_test.go integration_common_test.go
|
||||
|
||||
coverprofile_func:
|
||||
go tool cover -func=coverage.out
|
||||
@@ -19,9 +30,26 @@ coverprofile_html:
|
||||
go tool cover -html=coverage.out
|
||||
|
||||
lint:
|
||||
golint
|
||||
golangci-lint run --timeout 5m
|
||||
golangci-lint run --fix --timeout 10m
|
||||
|
||||
fmt:
|
||||
prettier --write '**/**.{ts,js,md,yaml,yml,sass,css,scss,html}'
|
||||
golines --max-len=88 --base-formatter=gofumpt -w $(GO_SOURCES)
|
||||
clang-format -style="{BasedOnStyle: Google, IndentWidth: 4, AlignConsecutiveDeclarations: true, AlignConsecutiveAssignments: true, ColumnLimit: 0}" -i $(PROTO_SOURCES)
|
||||
|
||||
proto-lint:
|
||||
cd proto/ && buf lint
|
||||
|
||||
compress: build
|
||||
upx --brute headscale
|
||||
|
||||
generate:
|
||||
rm -rf gen
|
||||
buf generate proto
|
||||
|
||||
install-protobuf-plugins:
|
||||
go install \
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway \
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2 \
|
||||
google.golang.org/protobuf/cmd/protoc-gen-go \
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc
|
||||
|
||||
445
README.md
445
README.md
@@ -1,9 +1,13 @@
|
||||
# Headscale
|
||||
# headscale
|
||||
|
||||
[](https://gitter.im/headscale-dev/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) 
|
||||

|
||||
|
||||
An open source, self-hosted implementation of the Tailscale coordination server.
|
||||
|
||||
Join our [Discord](https://discord.gg/XcQxk2VHjx) server for a chat.
|
||||
|
||||
**Note:** Always select the same GitHub tag as the released version you use to ensure you have the correct example configuration and documentation. The `main` branch might contain unreleased changes.
|
||||
|
||||
## Overview
|
||||
|
||||
Tailscale is [a modern VPN](https://tailscale.com/) built on top of [Wireguard](https://www.wireguard.com/). It [works like an overlay network](https://tailscale.com/blog/how-tailscale-works/) between the computers of your networks - using all kinds of [NAT traversal sorcery](https://tailscale.com/blog/how-nat-traversal-works/).
|
||||
@@ -12,218 +16,271 @@ Everything in Tailscale is Open Source, except the GUI clients for proprietary O
|
||||
|
||||
The control server works as an exchange point of Wireguard public keys for the nodes in the Tailscale network. It also assigns the IP addresses of the clients, creates the boundaries between each user, enables sharing machines between users, and exposes the advertised routes of your nodes.
|
||||
|
||||
Headscale implements this coordination server.
|
||||
headscale implements this coordination server.
|
||||
|
||||
## Support
|
||||
|
||||
If you like `headscale` and find it useful, there is sponsorship and donation buttons available in the repo.
|
||||
|
||||
If you would like to sponsor features, bugs or prioritisation, reach out to one of the maintainers.
|
||||
|
||||
## Status
|
||||
|
||||
- [x] Base functionality (nodes can communicate with each other)
|
||||
- [x] Node registration through the web flow
|
||||
- [x] Network changes are relied to the nodes
|
||||
- [x] Namespace support (~equivalent to multi-user in Tailscale.com)
|
||||
- [x] Network changes are relayed to the nodes
|
||||
- [x] Namespaces support (~tailnets in Tailscale.com naming)
|
||||
- [x] Routing (advertise & accept, including exit nodes)
|
||||
- [x] Node registration via pre-auth keys (including reusable keys, and ephemeral node support)
|
||||
- [X] JSON-formatted output
|
||||
- [X] ACLs
|
||||
- [X] Support for alternative IP ranges in the tailnets (default Tailscale's 100.64.0.0/10)
|
||||
- [X] DNS (passing DNS servers to nodes)
|
||||
- [X] Share nodes between ~~users~~ namespaces
|
||||
- [ ] MagicDNS / Smart DNS
|
||||
- [x] JSON-formatted output
|
||||
- [x] ACLs
|
||||
- [x] Taildrop (File Sharing)
|
||||
- [x] Support for alternative IP ranges in the tailnets (default Tailscale's 100.64.0.0/10)
|
||||
- [x] DNS (passing DNS servers to nodes)
|
||||
- [x] Single-Sign-On (via Open ID Connect)
|
||||
- [x] Share nodes between namespaces
|
||||
- [x] MagicDNS (see `docs/`)
|
||||
|
||||
## Client OS support
|
||||
|
||||
| OS | Supports headscale |
|
||||
| ------- | ----------------------------------------------------------------------------------------------------------------- |
|
||||
| Linux | Yes |
|
||||
| OpenBSD | Yes |
|
||||
| macOS | Yes (see `/apple` on your headscale for more information) |
|
||||
| Windows | Yes [docs](./docs/windows-client.md) |
|
||||
| Android | [You need to compile the client yourself](https://github.com/juanfont/headscale/issues/58#issuecomment-885255270) |
|
||||
| iOS | Not yet |
|
||||
|
||||
## Roadmap 🤷
|
||||
|
||||
Suggestions/PRs welcomed!
|
||||
|
||||
## Running headscale
|
||||
|
||||
|
||||
## Running it
|
||||
|
||||
1. Download the Headscale binary https://github.com/juanfont/headscale/releases, and place it somewhere in your PATH or use the docker container
|
||||
|
||||
```shell
|
||||
docker pull headscale/headscale:x.x.x
|
||||
```
|
||||
|
||||
or
|
||||
```shell
|
||||
docker pull ghrc.io/juanfont/headscale:x.x.x
|
||||
```
|
||||
|
||||
2. (Optional, you can also use SQLite) Get yourself a PostgreSQL DB running
|
||||
|
||||
```shell
|
||||
docker run --name headscale -e POSTGRES_DB=headscale -e \
|
||||
POSTGRES_USER=foo -e POSTGRES_PASSWORD=bar -p 5432:5432 -d postgres
|
||||
```
|
||||
|
||||
3. Set some stuff up (headscale Wireguard keys & the config.json file)
|
||||
```shell
|
||||
wg genkey > private.key
|
||||
wg pubkey < private.key > public.key # not needed
|
||||
|
||||
# Postgres
|
||||
cp config.json.postgres.example config.json
|
||||
# or
|
||||
# SQLite
|
||||
cp config.json.sqlite.example config.json
|
||||
```
|
||||
|
||||
4. Create a namespace (a namespace is a 'tailnet', a group of Tailscale nodes that can talk to each other)
|
||||
```shell
|
||||
headscale namespaces create myfirstnamespace
|
||||
```
|
||||
or docker:
|
||||
```shell
|
||||
docker run -v ./private.key:/private.key -v ./config.json:/config.json headscale/headscale:x.x.x headscale namespace create myfirstnamespace
|
||||
```
|
||||
|
||||
5. Run the server
|
||||
```shell
|
||||
headscale serve
|
||||
```
|
||||
or docker:
|
||||
```shell
|
||||
docker run -v $(pwd)/private.key:/private.key -v $(pwd)/config.json:/config.json -v $(pwd)/derb.yaml:/derb.yaml -p 127.0.0.1:8080:8080 headscale/headscale:x.x.x headscale serve
|
||||
```
|
||||
|
||||
6. If you used tailscale.com before in your nodes, make sure you clear the tailscaled data folder
|
||||
```shell
|
||||
systemctl stop tailscaled
|
||||
rm -fr /var/lib/tailscale
|
||||
systemctl start tailscaled
|
||||
```
|
||||
|
||||
7. Add your first machine
|
||||
```shell
|
||||
tailscale up -login-server YOUR_HEADSCALE_URL
|
||||
```
|
||||
|
||||
8. Navigate to the URL you will get with `tailscale up`, where you'll find your machine key.
|
||||
|
||||
9. In the server, register your machine to a namespace with the CLI
|
||||
```shell
|
||||
headscale -n myfirstnamespace node register YOURMACHINEKEY
|
||||
```
|
||||
or docker:
|
||||
```shell
|
||||
docker run -v ./private.key:/private.key -v ./config.json:/config.json headscale/headscale:x.x.x headscale -n myfirstnamespace node register YOURMACHINEKEY
|
||||
```
|
||||
|
||||
Alternatively, you can use Auth Keys to register your machines:
|
||||
|
||||
1. Create an authkey
|
||||
```shell
|
||||
headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h
|
||||
```
|
||||
or docker:
|
||||
```shell
|
||||
docker run -v ./private.key:/private.key -v ./config.json:/config.json headscale/headscale:x.x.x headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h
|
||||
```
|
||||
|
||||
2. Use the authkey from your machine to register it
|
||||
```shell
|
||||
tailscale up -login-server YOUR_HEADSCALE_URL --authkey YOURAUTHKEY
|
||||
```
|
||||
|
||||
If you create an authkey with the `--ephemeral` flag, that key will create ephemeral nodes. This implies that `--reusable` is true.
|
||||
|
||||
Please bear in mind that all the commands from headscale support adding `-o json` or `-o json-line` to get a nicely JSON-formatted output.
|
||||
|
||||
|
||||
## Configuration reference
|
||||
|
||||
Headscale's configuration file is named `config.json` or `config.yaml`. Headscale will look for it in `/etc/headscale`, `~/.headscale` and finally the directory from where the Headscale binary is executed.
|
||||
|
||||
```
|
||||
"server_url": "http://192.168.1.12:8080",
|
||||
"listen_addr": "0.0.0.0:8080",
|
||||
"ip_prefix": "100.64.0.0/10"
|
||||
```
|
||||
|
||||
`server_url` is the external URL via which Headscale is reachable. `listen_addr` is the IP address and port the Headscale program should listen on. `ip_prefix` is the IP prefix (range) in which IP addresses for nodes will be allocated (default 100.64.0.0/10, e.g., 192.168.4.0/24, 10.0.0.0/8)
|
||||
|
||||
```
|
||||
"log_level": "debug"
|
||||
```
|
||||
`log_level` can be used to set the Log level for Headscale, it defaults to `debug`, and the available levels are: `trace`, `debug`, `info`, `warn` and `error`.
|
||||
|
||||
```
|
||||
"private_key_path": "private.key",
|
||||
```
|
||||
|
||||
`private_key_path` is the path to the Wireguard private key. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.
|
||||
|
||||
```
|
||||
"derp_map_path": "derp.yaml",
|
||||
```
|
||||
|
||||
`derp_map_path` is the path to the [DERP](https://pkg.go.dev/tailscale.com/derp) map file. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.
|
||||
|
||||
```
|
||||
"ephemeral_node_inactivity_timeout": "30m",
|
||||
```
|
||||
|
||||
`ephemeral_node_inactivity_timeout` is the timeout after which inactive ephemeral node records will be deleted from the database. The default is 30 minutes. This value must be higher than 65 seconds (the keepalive timeout for the HTTP long poll is 60 seconds, plus a few seconds to avoid race conditions).
|
||||
|
||||
```
|
||||
"db_host": "localhost",
|
||||
"db_port": 5432,
|
||||
"db_name": "headscale",
|
||||
"db_user": "foo",
|
||||
"db_pass": "bar",
|
||||
```
|
||||
|
||||
The fields starting with `db_` are used for the PostgreSQL connection information.
|
||||
|
||||
|
||||
### Running the service via TLS (optional)
|
||||
|
||||
```
|
||||
"tls_cert_path": ""
|
||||
"tls_key_path": ""
|
||||
```
|
||||
|
||||
Headscale can be configured to expose its web service via TLS. To configure the certificate and key file manually, set the `tls_cert_path` and `tls_cert_path` configuration parameters. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.
|
||||
|
||||
```
|
||||
"tls_letsencrypt_hostname": "",
|
||||
"tls_letsencrypt_listen": ":http",
|
||||
"tls_letsencrypt_cache_dir": ".cache",
|
||||
"tls_letsencrypt_challenge_type": "HTTP-01",
|
||||
```
|
||||
|
||||
To get a certificate automatically via [Let's Encrypt](https://letsencrypt.org/), set `tls_letsencrypt_hostname` to the desired certificate hostname. This name must resolve to the IP address(es) Headscale is reachable on (i.e., it must correspond to the `server_url` configuration parameter). The certificate and Let's Encrypt account credentials will be stored in the directory configured in `tls_letsencrypt_cache_dir`. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from. The certificate will automatically be renewed as needed.
|
||||
|
||||
#### Challenge type HTTP-01
|
||||
|
||||
The default challenge type `HTTP-01` requires that Headscale is reachable on port 80 for the Let's Encrypt automated validation, in addition to whatever port is configured in `listen_addr`. By default, Headscale listens on port 80 on all local IPs for Let's Encrypt automated validation.
|
||||
|
||||
If you need to change the ip and/or port used by Headscale for the Let's Encrypt validation process, set `tls_letsencrypt_listen` to the appropriate value. This can be handy if you are running Headscale as a non-root user (or can't run `setcap`). Keep in mind, however, that Let's Encrypt will _only_ connect to port 80 for the validation callback, so if you change `tls_letsencrypt_listen` you will also need to configure something else (e.g. a firewall rule) to forward the traffic from port 80 to the ip:port combination specified in `tls_letsencrypt_listen`.
|
||||
|
||||
#### Challenge type TLS-ALPN-01
|
||||
|
||||
Alternatively, `tls_letsencrypt_challenge_type` can be set to `TLS-ALPN-01`. In this configuration, Headscale listens on the ip:port combination defined in `listen_addr`. Let's Encrypt will _only_ connect to port 443 for the validation callback, so if `listen_addr` is not set to port 443, something else (e.g. a firewall rule) will be required to forward the traffic from port 443 to the ip:port combination specified in `listen_addr`.
|
||||
|
||||
### Policy ACLs
|
||||
|
||||
Headscale implements the same policy ACLs as Tailscale.com, adapted to the self-hosted environment.
|
||||
|
||||
For instance, instead of referring to users when defining groups you must
|
||||
use namespaces (which are the equivalent to user/logins in Tailscale.com).
|
||||
|
||||
Please check https://tailscale.com/kb/1018/acls/, and `./tests/acls/` in this repo for working examples.
|
||||
|
||||
Please have a look at the documentation under [`docs/`](docs/).
|
||||
|
||||
## Disclaimer
|
||||
|
||||
1. We have nothing to do with Tailscale, or Tailscale Inc.
|
||||
2. The purpose of writing this was to learn how Tailscale works.
|
||||
2. The purpose of Headscale is maintaining a working, self-hosted Tailscale control panel.
|
||||
|
||||
## Contributing
|
||||
|
||||
To contribute to Headscale you would need the lastest version of [Go](https://golang.org) and [Buf](https://buf.build)(Protobuf generator).
|
||||
|
||||
## More on Tailscale
|
||||
### Code style
|
||||
|
||||
- https://tailscale.com/blog/how-tailscale-works/
|
||||
- https://tailscale.com/blog/tailscale-key-management/
|
||||
- https://tailscale.com/blog/an-unlikely-database-migration/
|
||||
To ensure we have some consistency with a growing number of contributions, this project has adopted linting and style/formatting rules:
|
||||
|
||||
The **Go** code is linted with [`golangci-lint`](https://golangci-lint.run) and
|
||||
formatted with [`golines`](https://github.com/segmentio/golines) (width 88) and
|
||||
[`gofumpt`](https://github.com/mvdan/gofumpt).
|
||||
Please configure your editor to run the tools while developing and make sure to
|
||||
run `make lint` and `make fmt` before committing any code.
|
||||
|
||||
The **Proto** code is linted with [`buf`](https://docs.buf.build/lint/overview) and
|
||||
formatted with [`clang-format`](https://clang.llvm.org/docs/ClangFormat.html).
|
||||
|
||||
The **rest** (Markdown, YAML, etc) is formatted with [`prettier`](https://prettier.io).
|
||||
|
||||
Check out the `.golangci.yaml` and `Makefile` to see the specific configuration.
|
||||
|
||||
### Install development tools
|
||||
|
||||
- Go
|
||||
- Buf
|
||||
- Protobuf tools:
|
||||
|
||||
```shell
|
||||
make install-protobuf-plugins
|
||||
```
|
||||
|
||||
### Testing and building
|
||||
|
||||
Some parts of the project require the generation of Go code from Protobuf (if changes are made in `proto/`) and it must be (re-)generated with:
|
||||
|
||||
```shell
|
||||
make generate
|
||||
```
|
||||
|
||||
**Note**: Please check in changes from `gen/` in a separate commit to make it easier to review.
|
||||
|
||||
To run the tests:
|
||||
|
||||
```shell
|
||||
make test
|
||||
```
|
||||
|
||||
To build the program:
|
||||
|
||||
```shell
|
||||
make build
|
||||
```
|
||||
|
||||
## Contributors
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/juanfont>
|
||||
<img src=https://avatars.githubusercontent.com/u/181059?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Juan Font/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Juan Font</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/kradalby>
|
||||
<img src=https://avatars.githubusercontent.com/u/98431?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Kristoffer Dalby/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Kristoffer Dalby</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/cure>
|
||||
<img src=https://avatars.githubusercontent.com/u/149135?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ward Vandewege/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Ward Vandewege</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ohdearaugustin>
|
||||
<img src=https://avatars.githubusercontent.com/u/14001491?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ohdearaugustin/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>ohdearaugustin</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/unreality>
|
||||
<img src=https://avatars.githubusercontent.com/u/352522?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=unreality/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>unreality</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/qbit>
|
||||
<img src=https://avatars.githubusercontent.com/u/68368?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Aaron Bieber/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Aaron Bieber</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ptman>
|
||||
<img src=https://avatars.githubusercontent.com/u/24669?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Paul Tötterman/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Paul Tötterman</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/cmars>
|
||||
<img src=https://avatars.githubusercontent.com/u/23741?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Casey Marshall/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Casey Marshall</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/SilverBut>
|
||||
<img src=https://avatars.githubusercontent.com/u/6560655?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Silver Bullet/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Silver Bullet</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/t56k>
|
||||
<img src=https://avatars.githubusercontent.com/u/12165422?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=thomas/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>thomas</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/awoimbee>
|
||||
<img src=https://avatars.githubusercontent.com/u/22431493?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Arthur Woimbée/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Arthur Woimbée</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/fkr>
|
||||
<img src=https://avatars.githubusercontent.com/u/51063?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Felix Kronlage-Dammers/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Felix Kronlage-Dammers</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/felixonmars>
|
||||
<img src=https://avatars.githubusercontent.com/u/1006477?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Felix Yan/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Felix Yan</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/shaananc>
|
||||
<img src=https://avatars.githubusercontent.com/u/2287839?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Shaanan Cohney/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Shaanan Cohney</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Teteros>
|
||||
<img src=https://avatars.githubusercontent.com/u/5067989?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Teteros/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Teteros</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/gitter-badger>
|
||||
<img src=https://avatars.githubusercontent.com/u/8518239?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=The Gitter Badger/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>The Gitter Badger</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/tianon>
|
||||
<img src=https://avatars.githubusercontent.com/u/161631?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tianon Gravi/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Tianon Gravi</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/woudsma>
|
||||
<img src=https://avatars.githubusercontent.com/u/6162978?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tjerk Woudsma/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Tjerk Woudsma</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/zekker6>
|
||||
<img src=https://avatars.githubusercontent.com/u/1367798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zakhar Bessarab/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Zakhar Bessarab</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/derelm>
|
||||
<img src=https://avatars.githubusercontent.com/u/465155?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=derelm/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>derelm</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ignoramous>
|
||||
<img src=https://avatars.githubusercontent.com/u/852289?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ignoramous/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>ignoramous</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/xpzouying>
|
||||
<img src=https://avatars.githubusercontent.com/u/3946563?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=zy/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>zy</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
200
acls.go
200
acls.go
@@ -9,22 +9,39 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"github.com/tailscale/hujson"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
const errorEmptyPolicy = Error("empty policy")
|
||||
const errorInvalidAction = Error("invalid action")
|
||||
const errorInvalidUserSection = Error("invalid user section")
|
||||
const errorInvalidGroup = Error("invalid group")
|
||||
const errorInvalidTag = Error("invalid tag")
|
||||
const errorInvalidNamespace = Error("invalid namespace")
|
||||
const errorInvalidPortFormat = Error("invalid port format")
|
||||
const (
|
||||
errEmptyPolicy = Error("empty policy")
|
||||
errInvalidAction = Error("invalid action")
|
||||
errInvalidUserSection = Error("invalid user section")
|
||||
errInvalidGroup = Error("invalid group")
|
||||
errInvalidTag = Error("invalid tag")
|
||||
errInvalidNamespace = Error("invalid namespace")
|
||||
errInvalidPortFormat = Error("invalid port format")
|
||||
)
|
||||
|
||||
// LoadACLPolicy loads the ACL policy from the specify path, and generates the ACL rules
|
||||
const (
|
||||
Base8 = 8
|
||||
Base10 = 10
|
||||
BitSize16 = 16
|
||||
BitSize32 = 32
|
||||
BitSize64 = 64
|
||||
portRangeBegin = 0
|
||||
portRangeEnd = 65535
|
||||
expectedTokenItems = 2
|
||||
)
|
||||
|
||||
// LoadACLPolicy loads the ACL policy from the specify path, and generates the ACL rules.
|
||||
func (h *Headscale) LoadACLPolicy(path string) error {
|
||||
log.Debug().
|
||||
Str("func", "LoadACLPolicy").
|
||||
Str("path", path).
|
||||
Msg("Loading ACL policy from path")
|
||||
|
||||
policyFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -32,16 +49,23 @@ func (h *Headscale) LoadACLPolicy(path string) error {
|
||||
defer policyFile.Close()
|
||||
|
||||
var policy ACLPolicy
|
||||
b, err := io.ReadAll(policyFile)
|
||||
policyBytes, err := io.ReadAll(policyFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = hujson.Unmarshal(b, &policy)
|
||||
|
||||
ast, err := hujson.Parse(policyBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ast.Standardize()
|
||||
policyBytes = ast.Pack()
|
||||
err = json.Unmarshal(policyBytes, &policy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if policy.IsZero() {
|
||||
return errorEmptyPolicy
|
||||
return errEmptyPolicy
|
||||
}
|
||||
|
||||
h.aclPolicy = &policy
|
||||
@@ -50,40 +74,45 @@ func (h *Headscale) LoadACLPolicy(path string) error {
|
||||
return err
|
||||
}
|
||||
h.aclRules = rules
|
||||
|
||||
log.Trace().Interface("ACL", rules).Msg("ACL rules generated")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Headscale) generateACLRules() (*[]tailcfg.FilterRule, error) {
|
||||
func (h *Headscale) generateACLRules() ([]tailcfg.FilterRule, error) {
|
||||
rules := []tailcfg.FilterRule{}
|
||||
|
||||
for i, a := range h.aclPolicy.ACLs {
|
||||
if a.Action != "accept" {
|
||||
return nil, errorInvalidAction
|
||||
for index, acl := range h.aclPolicy.ACLs {
|
||||
if acl.Action != "accept" {
|
||||
return nil, errInvalidAction
|
||||
}
|
||||
|
||||
r := tailcfg.FilterRule{}
|
||||
filterRule := tailcfg.FilterRule{}
|
||||
|
||||
srcIPs := []string{}
|
||||
for j, u := range a.Users {
|
||||
srcs, err := h.generateACLPolicySrcIP(u)
|
||||
for innerIndex, user := range acl.Users {
|
||||
srcs, err := h.generateACLPolicySrcIP(user)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing ACL %d, User %d", i, j)
|
||||
Msgf("Error parsing ACL %d, User %d", index, innerIndex)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
srcIPs = append(srcIPs, *srcs...)
|
||||
srcIPs = append(srcIPs, srcs...)
|
||||
}
|
||||
r.SrcIPs = srcIPs
|
||||
filterRule.SrcIPs = srcIPs
|
||||
|
||||
destPorts := []tailcfg.NetPortRange{}
|
||||
for j, d := range a.Ports {
|
||||
dests, err := h.generateACLPolicyDestPorts(d)
|
||||
for innerIndex, ports := range acl.Ports {
|
||||
dests, err := h.generateACLPolicyDestPorts(ports)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing ACL %d, Port %d", i, j)
|
||||
Msgf("Error parsing ACL %d, Port %d", index, innerIndex)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
destPorts = append(destPorts, *dests...)
|
||||
destPorts = append(destPorts, dests...)
|
||||
}
|
||||
|
||||
rules = append(rules, tailcfg.FilterRule{
|
||||
@@ -92,17 +121,19 @@ func (h *Headscale) generateACLRules() (*[]tailcfg.FilterRule, error) {
|
||||
})
|
||||
}
|
||||
|
||||
return &rules, nil
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) generateACLPolicySrcIP(u string) (*[]string, error) {
|
||||
func (h *Headscale) generateACLPolicySrcIP(u string) ([]string, error) {
|
||||
return h.expandAlias(u)
|
||||
}
|
||||
|
||||
func (h *Headscale) generateACLPolicyDestPorts(d string) (*[]tailcfg.NetPortRange, error) {
|
||||
func (h *Headscale) generateACLPolicyDestPorts(
|
||||
d string,
|
||||
) ([]tailcfg.NetPortRange, error) {
|
||||
tokens := strings.Split(d, ":")
|
||||
if len(tokens) < 2 || len(tokens) > 3 {
|
||||
return nil, errorInvalidPortFormat
|
||||
if len(tokens) < expectedTokenItems || len(tokens) > 3 {
|
||||
return nil, errInvalidPortFormat
|
||||
}
|
||||
|
||||
var alias string
|
||||
@@ -112,7 +143,7 @@ func (h *Headscale) generateACLPolicyDestPorts(d string) (*[]tailcfg.NetPortRang
|
||||
// tag:montreal-webserver:80,443
|
||||
// tag:api-server:443
|
||||
// example-host-1:*
|
||||
if len(tokens) == 2 {
|
||||
if len(tokens) == expectedTokenItems {
|
||||
alias = tokens[0]
|
||||
} else {
|
||||
alias = fmt.Sprintf("%s:%s", tokens[0], tokens[1])
|
||||
@@ -128,7 +159,7 @@ func (h *Headscale) generateACLPolicyDestPorts(d string) (*[]tailcfg.NetPortRang
|
||||
}
|
||||
|
||||
dests := []tailcfg.NetPortRange{}
|
||||
for _, d := range *expanded {
|
||||
for _, d := range expanded {
|
||||
for _, p := range *ports {
|
||||
pr := tailcfg.NetPortRange{
|
||||
IP: d,
|
||||
@@ -137,34 +168,36 @@ func (h *Headscale) generateACLPolicyDestPorts(d string) (*[]tailcfg.NetPortRang
|
||||
dests = append(dests, pr)
|
||||
}
|
||||
}
|
||||
return &dests, nil
|
||||
|
||||
return dests, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) expandAlias(s string) (*[]string, error) {
|
||||
if s == "*" {
|
||||
return &[]string{"*"}, nil
|
||||
func (h *Headscale) expandAlias(alias string) ([]string, error) {
|
||||
if alias == "*" {
|
||||
return []string{"*"}, nil
|
||||
}
|
||||
|
||||
if strings.HasPrefix(s, "group:") {
|
||||
if _, ok := h.aclPolicy.Groups[s]; !ok {
|
||||
return nil, errorInvalidGroup
|
||||
if strings.HasPrefix(alias, "group:") {
|
||||
if _, ok := h.aclPolicy.Groups[alias]; !ok {
|
||||
return nil, errInvalidGroup
|
||||
}
|
||||
ips := []string{}
|
||||
for _, n := range h.aclPolicy.Groups[s] {
|
||||
for _, n := range h.aclPolicy.Groups[alias] {
|
||||
nodes, err := h.ListMachinesInNamespace(n)
|
||||
if err != nil {
|
||||
return nil, errorInvalidNamespace
|
||||
return nil, errInvalidNamespace
|
||||
}
|
||||
for _, node := range *nodes {
|
||||
ips = append(ips, node.IPAddress)
|
||||
for _, node := range nodes {
|
||||
ips = append(ips, node.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
}
|
||||
return &ips, nil
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
if strings.HasPrefix(s, "tag:") {
|
||||
if _, ok := h.aclPolicy.TagOwners[s]; !ok {
|
||||
return nil, errorInvalidTag
|
||||
if strings.HasPrefix(alias, "tag:") {
|
||||
if _, ok := h.aclPolicy.TagOwners[alias]; !ok {
|
||||
return nil, errInvalidTag
|
||||
}
|
||||
|
||||
// This will have HORRIBLE performance.
|
||||
@@ -174,10 +207,10 @@ func (h *Headscale) expandAlias(s string) (*[]string, error) {
|
||||
return nil, err
|
||||
}
|
||||
ips := []string{}
|
||||
for _, m := range machines {
|
||||
for _, machine := range machines {
|
||||
hostinfo := tailcfg.Hostinfo{}
|
||||
if len(m.HostInfo) != 0 {
|
||||
hi, err := m.HostInfo.MarshalJSON()
|
||||
if len(machine.HostInfo) != 0 {
|
||||
hi, err := machine.HostInfo.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -188,69 +221,76 @@ func (h *Headscale) expandAlias(s string) (*[]string, error) {
|
||||
|
||||
// FIXME: Check TagOwners allows this
|
||||
for _, t := range hostinfo.RequestTags {
|
||||
if s[4:] == t {
|
||||
ips = append(ips, m.IPAddress)
|
||||
if alias[4:] == t {
|
||||
ips = append(ips, machine.IPAddresses.ToStringSlice()...)
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return &ips, nil
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
n, err := h.GetNamespace(s)
|
||||
n, err := h.GetNamespace(alias)
|
||||
if err == nil {
|
||||
nodes, err := h.ListMachinesInNamespace(n.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ips := []string{}
|
||||
for _, n := range *nodes {
|
||||
ips = append(ips, n.IPAddress)
|
||||
for _, n := range nodes {
|
||||
ips = append(ips, n.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
return &ips, nil
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
if h, ok := h.aclPolicy.Hosts[s]; ok {
|
||||
return &[]string{h.String()}, nil
|
||||
if h, ok := h.aclPolicy.Hosts[alias]; ok {
|
||||
return []string{h.String()}, nil
|
||||
}
|
||||
|
||||
ip, err := netaddr.ParseIP(s)
|
||||
ip, err := netaddr.ParseIP(alias)
|
||||
if err == nil {
|
||||
return &[]string{ip.String()}, nil
|
||||
return []string{ip.String()}, nil
|
||||
}
|
||||
|
||||
cidr, err := netaddr.ParseIPPrefix(s)
|
||||
cidr, err := netaddr.ParseIPPrefix(alias)
|
||||
if err == nil {
|
||||
return &[]string{cidr.String()}, nil
|
||||
return []string{cidr.String()}, nil
|
||||
}
|
||||
|
||||
return nil, errorInvalidUserSection
|
||||
return nil, errInvalidUserSection
|
||||
}
|
||||
|
||||
func (h *Headscale) expandPorts(s string) (*[]tailcfg.PortRange, error) {
|
||||
if s == "*" {
|
||||
return &[]tailcfg.PortRange{{First: 0, Last: 65535}}, nil
|
||||
func (h *Headscale) expandPorts(portsStr string) (*[]tailcfg.PortRange, error) {
|
||||
if portsStr == "*" {
|
||||
return &[]tailcfg.PortRange{
|
||||
{First: portRangeBegin, Last: portRangeEnd},
|
||||
}, nil
|
||||
}
|
||||
|
||||
ports := []tailcfg.PortRange{}
|
||||
for _, p := range strings.Split(s, ",") {
|
||||
rang := strings.Split(p, "-")
|
||||
if len(rang) == 1 {
|
||||
pi, err := strconv.ParseUint(rang[0], 10, 16)
|
||||
for _, portStr := range strings.Split(portsStr, ",") {
|
||||
rang := strings.Split(portStr, "-")
|
||||
switch len(rang) {
|
||||
case 1:
|
||||
port, err := strconv.ParseUint(rang[0], Base10, BitSize16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ports = append(ports, tailcfg.PortRange{
|
||||
First: uint16(pi),
|
||||
Last: uint16(pi),
|
||||
First: uint16(port),
|
||||
Last: uint16(port),
|
||||
})
|
||||
} else if len(rang) == 2 {
|
||||
start, err := strconv.ParseUint(rang[0], 10, 16)
|
||||
|
||||
case expectedTokenItems:
|
||||
start, err := strconv.ParseUint(rang[0], Base10, BitSize16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
last, err := strconv.ParseUint(rang[1], 10, 16)
|
||||
last, err := strconv.ParseUint(rang[1], Base10, BitSize16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -258,9 +298,11 @@ func (h *Headscale) expandPorts(s string) (*[]tailcfg.PortRange, error) {
|
||||
First: uint16(start),
|
||||
Last: uint16(last),
|
||||
})
|
||||
} else {
|
||||
return nil, errorInvalidPortFormat
|
||||
|
||||
default:
|
||||
return nil, errInvalidPortFormat
|
||||
}
|
||||
}
|
||||
|
||||
return &ports, nil
|
||||
}
|
||||
|
||||
135
acls_test.go
135
acls_test.go
@@ -5,156 +5,163 @@ import (
|
||||
)
|
||||
|
||||
func (s *Suite) TestWrongPath(c *check.C) {
|
||||
err := h.LoadACLPolicy("asdfg")
|
||||
err := app.LoadACLPolicy("asdfg")
|
||||
c.Assert(err, check.NotNil)
|
||||
}
|
||||
|
||||
func (s *Suite) TestBrokenHuJson(c *check.C) {
|
||||
err := h.LoadACLPolicy("./tests/acls/broken.hujson")
|
||||
err := app.LoadACLPolicy("./tests/acls/broken.hujson")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
}
|
||||
|
||||
func (s *Suite) TestInvalidPolicyHuson(c *check.C) {
|
||||
err := h.LoadACLPolicy("./tests/acls/invalid.hujson")
|
||||
err := app.LoadACLPolicy("./tests/acls/invalid.hujson")
|
||||
c.Assert(err, check.NotNil)
|
||||
c.Assert(err, check.Equals, errorEmptyPolicy)
|
||||
c.Assert(err, check.Equals, errEmptyPolicy)
|
||||
}
|
||||
|
||||
func (s *Suite) TestParseHosts(c *check.C) {
|
||||
var hs Hosts
|
||||
err := hs.UnmarshalJSON([]byte(`{"example-host-1": "100.100.100.100","example-host-2": "100.100.101.100/24"}`))
|
||||
c.Assert(hs, check.NotNil)
|
||||
var hosts Hosts
|
||||
err := hosts.UnmarshalJSON(
|
||||
[]byte(
|
||||
`{"example-host-1": "100.100.100.100","example-host-2": "100.100.101.100/24"}`,
|
||||
),
|
||||
)
|
||||
c.Assert(hosts, check.NotNil)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *Suite) TestParseInvalidCIDR(c *check.C) {
|
||||
var hs Hosts
|
||||
err := hs.UnmarshalJSON([]byte(`{"example-host-1": "100.100.100.100/42"}`))
|
||||
c.Assert(hs, check.IsNil)
|
||||
var hosts Hosts
|
||||
err := hosts.UnmarshalJSON([]byte(`{"example-host-1": "100.100.100.100/42"}`))
|
||||
c.Assert(hosts, check.IsNil)
|
||||
c.Assert(err, check.NotNil)
|
||||
}
|
||||
|
||||
func (s *Suite) TestRuleInvalidGeneration(c *check.C) {
|
||||
err := h.LoadACLPolicy("./tests/acls/acl_policy_invalid.hujson")
|
||||
err := app.LoadACLPolicy("./tests/acls/acl_policy_invalid.hujson")
|
||||
c.Assert(err, check.NotNil)
|
||||
}
|
||||
|
||||
func (s *Suite) TestBasicRule(c *check.C) {
|
||||
err := h.LoadACLPolicy("./tests/acls/acl_policy_basic_1.hujson")
|
||||
err := app.LoadACLPolicy("./tests/acls/acl_policy_basic_1.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := h.generateACLRules()
|
||||
rules, err := app.generateACLRules()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
}
|
||||
|
||||
func (s *Suite) TestPortRange(c *check.C) {
|
||||
err := h.LoadACLPolicy("./tests/acls/acl_policy_basic_range.hujson")
|
||||
err := app.LoadACLPolicy("./tests/acls/acl_policy_basic_range.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := h.generateACLRules()
|
||||
rules, err := app.generateACLRules()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
c.Assert(*rules, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.First, check.Equals, uint16(5400))
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.Last, check.Equals, uint16(5500))
|
||||
c.Assert(rules, check.HasLen, 1)
|
||||
c.Assert(rules[0].DstPorts, check.HasLen, 1)
|
||||
c.Assert(rules[0].DstPorts[0].Ports.First, check.Equals, uint16(5400))
|
||||
c.Assert(rules[0].DstPorts[0].Ports.Last, check.Equals, uint16(5500))
|
||||
}
|
||||
|
||||
func (s *Suite) TestPortWildcard(c *check.C) {
|
||||
err := h.LoadACLPolicy("./tests/acls/acl_policy_basic_wildcards.hujson")
|
||||
err := app.LoadACLPolicy("./tests/acls/acl_policy_basic_wildcards.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := h.generateACLRules()
|
||||
rules, err := app.generateACLRules()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
c.Assert(*rules, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.First, check.Equals, uint16(0))
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.Last, check.Equals, uint16(65535))
|
||||
c.Assert((*rules)[0].SrcIPs, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].SrcIPs[0], check.Equals, "*")
|
||||
c.Assert(rules, check.HasLen, 1)
|
||||
c.Assert(rules[0].DstPorts, check.HasLen, 1)
|
||||
c.Assert(rules[0].DstPorts[0].Ports.First, check.Equals, uint16(0))
|
||||
c.Assert(rules[0].DstPorts[0].Ports.Last, check.Equals, uint16(65535))
|
||||
c.Assert(rules[0].SrcIPs, check.HasLen, 1)
|
||||
c.Assert(rules[0].SrcIPs[0], check.Equals, "*")
|
||||
}
|
||||
|
||||
func (s *Suite) TestPortNamespace(c *check.C) {
|
||||
n, err := h.CreateNamespace("testnamespace")
|
||||
namespace, err := app.CreateNamespace("testnamespace")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = h.GetMachine("testnamespace", "testmachine")
|
||||
_, err = app.GetMachine("testnamespace", "testmachine")
|
||||
c.Assert(err, check.NotNil)
|
||||
ip, _ := h.getAvailableIP()
|
||||
m := Machine{
|
||||
ips, _ := app.getAvailableIPs()
|
||||
machine := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
NamespaceID: n.ID,
|
||||
NamespaceID: namespace.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
IPAddress: ip.String(),
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: ips,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
}
|
||||
h.db.Save(&m)
|
||||
app.db.Save(&machine)
|
||||
|
||||
err = h.LoadACLPolicy("./tests/acls/acl_policy_basic_namespace_as_user.hujson")
|
||||
err = app.LoadACLPolicy(
|
||||
"./tests/acls/acl_policy_basic_namespace_as_user.hujson",
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := h.generateACLRules()
|
||||
rules, err := app.generateACLRules()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
c.Assert(*rules, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.First, check.Equals, uint16(0))
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.Last, check.Equals, uint16(65535))
|
||||
c.Assert((*rules)[0].SrcIPs, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].SrcIPs[0], check.Not(check.Equals), "not an ip")
|
||||
c.Assert((*rules)[0].SrcIPs[0], check.Equals, ip.String())
|
||||
c.Assert(rules, check.HasLen, 1)
|
||||
c.Assert(rules[0].DstPorts, check.HasLen, 1)
|
||||
c.Assert(rules[0].DstPorts[0].Ports.First, check.Equals, uint16(0))
|
||||
c.Assert(rules[0].DstPorts[0].Ports.Last, check.Equals, uint16(65535))
|
||||
c.Assert(rules[0].SrcIPs, check.HasLen, 1)
|
||||
c.Assert(rules[0].SrcIPs[0], check.Not(check.Equals), "not an ip")
|
||||
c.Assert(len(ips), check.Equals, 1)
|
||||
c.Assert(rules[0].SrcIPs[0], check.Equals, ips[0].String())
|
||||
}
|
||||
|
||||
func (s *Suite) TestPortGroup(c *check.C) {
|
||||
n, err := h.CreateNamespace("testnamespace")
|
||||
namespace, err := app.CreateNamespace("testnamespace")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = h.GetMachine("testnamespace", "testmachine")
|
||||
_, err = app.GetMachine("testnamespace", "testmachine")
|
||||
c.Assert(err, check.NotNil)
|
||||
ip, _ := h.getAvailableIP()
|
||||
m := Machine{
|
||||
ips, _ := app.getAvailableIPs()
|
||||
machine := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
NamespaceID: n.ID,
|
||||
NamespaceID: namespace.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
IPAddress: ip.String(),
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: ips,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
}
|
||||
h.db.Save(&m)
|
||||
app.db.Save(&machine)
|
||||
|
||||
err = h.LoadACLPolicy("./tests/acls/acl_policy_basic_groups.hujson")
|
||||
err = app.LoadACLPolicy("./tests/acls/acl_policy_basic_groups.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := h.generateACLRules()
|
||||
rules, err := app.generateACLRules()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
c.Assert(*rules, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.First, check.Equals, uint16(0))
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.Last, check.Equals, uint16(65535))
|
||||
c.Assert((*rules)[0].SrcIPs, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].SrcIPs[0], check.Not(check.Equals), "not an ip")
|
||||
c.Assert((*rules)[0].SrcIPs[0], check.Equals, ip.String())
|
||||
c.Assert(rules, check.HasLen, 1)
|
||||
c.Assert(rules[0].DstPorts, check.HasLen, 1)
|
||||
c.Assert(rules[0].DstPorts[0].Ports.First, check.Equals, uint16(0))
|
||||
c.Assert(rules[0].DstPorts[0].Ports.Last, check.Equals, uint16(65535))
|
||||
c.Assert(rules[0].SrcIPs, check.HasLen, 1)
|
||||
c.Assert(rules[0].SrcIPs[0], check.Not(check.Equals), "not an ip")
|
||||
c.Assert(len(ips), check.Equals, 1)
|
||||
c.Assert(rules[0].SrcIPs[0], check.Equals, ips[0].String())
|
||||
}
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
|
||||
"github.com/tailscale/hujson"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
|
||||
// ACLPolicy represents a Tailscale ACL Policy
|
||||
// ACLPolicy represents a Tailscale ACL Policy.
|
||||
type ACLPolicy struct {
|
||||
Groups Groups `json:"Groups"`
|
||||
Hosts Hosts `json:"Hosts"`
|
||||
@@ -16,55 +17,63 @@ type ACLPolicy struct {
|
||||
Tests []ACLTest `json:"Tests"`
|
||||
}
|
||||
|
||||
// ACL is a basic rule for the ACL Policy
|
||||
// ACL is a basic rule for the ACL Policy.
|
||||
type ACL struct {
|
||||
Action string `json:"Action"`
|
||||
Users []string `json:"Users"`
|
||||
Ports []string `json:"Ports"`
|
||||
}
|
||||
|
||||
// Groups references a series of alias in the ACL rules
|
||||
// Groups references a series of alias in the ACL rules.
|
||||
type Groups map[string][]string
|
||||
|
||||
// Hosts are alias for IP addresses or subnets
|
||||
// Hosts are alias for IP addresses or subnets.
|
||||
type Hosts map[string]netaddr.IPPrefix
|
||||
|
||||
// TagOwners specify what users (namespaces?) are allow to use certain tags
|
||||
// TagOwners specify what users (namespaces?) are allow to use certain tags.
|
||||
type TagOwners map[string][]string
|
||||
|
||||
// ACLTest is not implemented, but should be use to check if a certain rule is allowed
|
||||
// ACLTest is not implemented, but should be use to check if a certain rule is allowed.
|
||||
type ACLTest struct {
|
||||
User string `json:"User"`
|
||||
Allow []string `json:"Allow"`
|
||||
Deny []string `json:"Deny,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON allows to parse the Hosts directly into netaddr objects
|
||||
func (h *Hosts) UnmarshalJSON(data []byte) error {
|
||||
hosts := Hosts{}
|
||||
hs := make(map[string]string)
|
||||
err := hujson.Unmarshal(data, &hs)
|
||||
// UnmarshalJSON allows to parse the Hosts directly into netaddr objects.
|
||||
func (hosts *Hosts) UnmarshalJSON(data []byte) error {
|
||||
newHosts := Hosts{}
|
||||
hostIPPrefixMap := make(map[string]string)
|
||||
ast, err := hujson.Parse(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range hs {
|
||||
if !strings.Contains(v, "/") {
|
||||
v = v + "/32"
|
||||
ast.Standardize()
|
||||
data = ast.Pack()
|
||||
err = json.Unmarshal(data, &hostIPPrefixMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for host, prefixStr := range hostIPPrefixMap {
|
||||
if !strings.Contains(prefixStr, "/") {
|
||||
prefixStr += "/32"
|
||||
}
|
||||
prefix, err := netaddr.ParseIPPrefix(v)
|
||||
prefix, err := netaddr.ParseIPPrefix(prefixStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hosts[k] = prefix
|
||||
newHosts[host] = prefix
|
||||
}
|
||||
*h = hosts
|
||||
*hosts = newHosts
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsZero is perhaps a bit naive here
|
||||
func (p ACLPolicy) IsZero() bool {
|
||||
if len(p.Groups) == 0 && len(p.Hosts) == 0 && len(p.ACLs) == 0 {
|
||||
// IsZero is perhaps a bit naive here.
|
||||
func (policy ACLPolicy) IsZero() bool {
|
||||
if len(policy.Groups) == 0 && len(policy.Hosts) == 0 && len(policy.ACLs) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
663
api.go
663
api.go
@@ -1,40 +1,51 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"html/template"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/wgkey"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
const (
|
||||
reservedResponseHeaderSize = 4
|
||||
RegisterMethodAuthKey = "authKey"
|
||||
RegisterMethodOIDC = "oidc"
|
||||
RegisterMethodCLI = "cli"
|
||||
ErrRegisterMethodCLIDoesNotSupportExpire = Error(
|
||||
"machines registered with CLI does not support expire",
|
||||
)
|
||||
)
|
||||
|
||||
// KeyHandler provides the Headscale pub key
|
||||
// Listens in /key
|
||||
func (h *Headscale) KeyHandler(c *gin.Context) {
|
||||
c.Data(200, "text/plain; charset=utf-8", []byte(h.publicKey.HexString()))
|
||||
// Listens in /key.
|
||||
func (h *Headscale) KeyHandler(ctx *gin.Context) {
|
||||
ctx.Data(
|
||||
http.StatusOK,
|
||||
"text/plain; charset=utf-8",
|
||||
[]byte(MachinePublicKeyStripPrefix(h.privateKey.Public())),
|
||||
)
|
||||
}
|
||||
|
||||
// RegisterWebAPI shows a simple message in the browser to point to the CLI
|
||||
// Listens in /register
|
||||
func (h *Headscale) RegisterWebAPI(c *gin.Context) {
|
||||
mKeyStr := c.Query("key")
|
||||
if mKeyStr == "" {
|
||||
c.String(http.StatusBadRequest, "Wrong params")
|
||||
return
|
||||
}
|
||||
type registerWebAPITemplateConfig struct {
|
||||
Key string
|
||||
}
|
||||
|
||||
c.Data(http.StatusOK, "text/html; charset=utf-8", []byte(fmt.Sprintf(`
|
||||
<html>
|
||||
var registerWebAPITemplate = template.Must(
|
||||
template.New("registerweb").Parse(`<html>
|
||||
<body>
|
||||
<h1>headscale</h1>
|
||||
<p>
|
||||
@@ -43,223 +54,209 @@ func (h *Headscale) RegisterWebAPI(c *gin.Context) {
|
||||
|
||||
<p>
|
||||
<code>
|
||||
<b>headscale -n NAMESPACE nodes register %s</b>
|
||||
<b>headscale -n NAMESPACE nodes register --key {{.Key}}</b>
|
||||
</code>
|
||||
</p>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
</html>`),
|
||||
)
|
||||
|
||||
`, mKeyStr)))
|
||||
// RegisterWebAPI shows a simple message in the browser to point to the CLI
|
||||
// Listens in /register.
|
||||
func (h *Headscale) RegisterWebAPI(ctx *gin.Context) {
|
||||
machineKeyStr := ctx.Query("key")
|
||||
if machineKeyStr == "" {
|
||||
ctx.String(http.StatusBadRequest, "Wrong params")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
var content bytes.Buffer
|
||||
if err := registerWebAPITemplate.Execute(&content, registerWebAPITemplateConfig{
|
||||
Key: machineKeyStr,
|
||||
}); err != nil {
|
||||
log.Error().
|
||||
Str("func", "RegisterWebAPI").
|
||||
Err(err).
|
||||
Msg("Could not render register web API template")
|
||||
ctx.Data(
|
||||
http.StatusInternalServerError,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Could not render register web API template"),
|
||||
)
|
||||
}
|
||||
|
||||
ctx.Data(http.StatusOK, "text/html; charset=utf-8", content.Bytes())
|
||||
}
|
||||
|
||||
// RegistrationHandler handles the actual registration process of a machine
|
||||
// Endpoint /machine/:id
|
||||
func (h *Headscale) RegistrationHandler(c *gin.Context) {
|
||||
body, _ := io.ReadAll(c.Request.Body)
|
||||
mKeyStr := c.Param("id")
|
||||
mKey, err := wgkey.ParseHex(mKeyStr)
|
||||
// Endpoint /machine/:id.
|
||||
func (h *Headscale) RegistrationHandler(ctx *gin.Context) {
|
||||
body, _ := io.ReadAll(ctx.Request.Body)
|
||||
machineKeyStr := ctx.Param("id")
|
||||
|
||||
var machineKey key.MachinePublic
|
||||
err := machineKey.UnmarshalText([]byte(MachinePublicKeyEnsurePrefix(machineKeyStr)))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "Registration").
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot parse machine key")
|
||||
c.String(http.StatusInternalServerError, "Sad!")
|
||||
machineRegistrations.WithLabelValues("unknown", "web", "error", "unknown").Inc()
|
||||
ctx.String(http.StatusInternalServerError, "Sad!")
|
||||
|
||||
return
|
||||
}
|
||||
req := tailcfg.RegisterRequest{}
|
||||
err = decode(body, &req, &mKey, h.privateKey)
|
||||
err = decode(body, &req, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "Registration").
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot decode message")
|
||||
c.String(http.StatusInternalServerError, "Very sad!")
|
||||
machineRegistrations.WithLabelValues("unknown", "web", "error", "unknown").Inc()
|
||||
ctx.String(http.StatusInternalServerError, "Very sad!")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
var m Machine
|
||||
if result := h.db.Preload("Namespace").First(&m, "machine_key = ?", mKey.HexString()); errors.Is(result.Error, gorm.ErrRecordNotFound) {
|
||||
machine, err := h.GetMachineByMachineKey(machineKey)
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
log.Info().Str("machine", req.Hostinfo.Hostname).Msg("New machine")
|
||||
m = Machine{
|
||||
Expiry: &req.Expiry,
|
||||
MachineKey: mKey.HexString(),
|
||||
Name: req.Hostinfo.Hostname,
|
||||
NodeKey: wgkey.Key(req.NodeKey).HexString(),
|
||||
LastSuccessfulUpdate: &now,
|
||||
newMachine := Machine{
|
||||
Expiry: &time.Time{},
|
||||
MachineKey: MachinePublicKeyStripPrefix(machineKey),
|
||||
Name: req.Hostinfo.Hostname,
|
||||
}
|
||||
if err := h.db.Create(&m).Error; err != nil {
|
||||
if err := h.db.Create(&newMachine).Error; err != nil {
|
||||
log.Error().
|
||||
Str("handler", "Registration").
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Could not create row")
|
||||
machineRegistrations.WithLabelValues("unknown", "web", "error", machine.Namespace.Name).
|
||||
Inc()
|
||||
|
||||
return
|
||||
}
|
||||
machine = &newMachine
|
||||
}
|
||||
|
||||
if !m.Registered && req.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(c, h.db, mKey, req, m)
|
||||
return
|
||||
}
|
||||
if machine.Registered {
|
||||
// If the NodeKey stored in headscale is the same as the key presented in a registration
|
||||
// request, then we have a node that is either:
|
||||
// - Trying to log out (sending a expiry in the past)
|
||||
// - A valid, registered machine, looking for the node map
|
||||
// - Expired machine wanting to reauthenticate
|
||||
if machine.NodeKey == NodePublicKeyStripPrefix(req.NodeKey) {
|
||||
// The client sends an Expiry in the past if the client is requesting to expire the key (aka logout)
|
||||
// https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go#L648
|
||||
if !req.Expiry.IsZero() && req.Expiry.UTC().Before(now) {
|
||||
h.handleMachineLogOut(ctx, machineKey, *machine)
|
||||
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
// We have the updated key!
|
||||
if m.NodeKey == wgkey.Key(req.NodeKey).HexString() {
|
||||
if m.Registered {
|
||||
log.Debug().
|
||||
Str("handler", "Registration").
|
||||
Str("machine", m.Name).
|
||||
Msg("Client is registered and we have the current NodeKey. All clear to /map")
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.MachineAuthorized = true
|
||||
resp.User = *m.Namespace.toUser()
|
||||
respBody, err := encode(resp, &mKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "Registration").
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
c.String(http.StatusInternalServerError, "")
|
||||
return
|
||||
}
|
||||
c.Data(200, "application/json; charset=utf-8", respBody)
|
||||
|
||||
// If machine is not expired, and is register, we have a already accepted this machine,
|
||||
// let it proceed with a valid registration
|
||||
if !machine.isExpired() {
|
||||
h.handleMachineValidRegistration(ctx, machineKey, *machine)
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The NodeKey we have matches OldNodeKey, which means this is a refresh after a key expiration
|
||||
if machine.NodeKey == NodePublicKeyStripPrefix(req.OldNodeKey) &&
|
||||
!machine.isExpired() {
|
||||
h.handleMachineRefreshKey(ctx, machineKey, req, *machine)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Str("handler", "Registration").
|
||||
Str("machine", m.Name).
|
||||
Msg("Not registered and not NodeKey rotation. Sending a authurl to register")
|
||||
resp.AuthURL = fmt.Sprintf("%s/register?key=%s",
|
||||
h.cfg.ServerURL, mKey.HexString())
|
||||
respBody, err := encode(resp, &mKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "Registration").
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
c.String(http.StatusInternalServerError, "")
|
||||
return
|
||||
}
|
||||
c.Data(200, "application/json; charset=utf-8", respBody)
|
||||
// The machine has expired
|
||||
h.handleMachineExpired(ctx, machineKey, req, *machine)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// The NodeKey we have matches OldNodeKey, which means this is a refresh after an key expiration
|
||||
if m.NodeKey == wgkey.Key(req.OldNodeKey).HexString() {
|
||||
log.Debug().
|
||||
Str("handler", "Registration").
|
||||
Str("machine", m.Name).
|
||||
Msg("We have the OldNodeKey in the database. This is a key refresh")
|
||||
m.NodeKey = wgkey.Key(req.NodeKey).HexString()
|
||||
h.db.Save(&m)
|
||||
// If the machine has AuthKey set, handle registration via PreAuthKeys
|
||||
if req.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(ctx, machineKey, req, *machine)
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.User = *m.Namespace.toUser()
|
||||
respBody, err := encode(resp, &mKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "Registration").
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
c.String(http.StatusInternalServerError, "Extremely sad!")
|
||||
return
|
||||
}
|
||||
c.Data(200, "application/json; charset=utf-8", respBody)
|
||||
return
|
||||
}
|
||||
|
||||
// We arrive here after a client is restarted without finalizing the authentication flow or
|
||||
// when headscale is stopped in the middle of the auth process.
|
||||
if m.Registered {
|
||||
log.Debug().
|
||||
Str("handler", "Registration").
|
||||
Str("machine", m.Name).
|
||||
Msg("The node is sending us a new NodeKey, but machine is registered. All clear for /map")
|
||||
resp.AuthURL = ""
|
||||
resp.MachineAuthorized = true
|
||||
resp.User = *m.Namespace.toUser()
|
||||
respBody, err := encode(resp, &mKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "Registration").
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
c.String(http.StatusInternalServerError, "")
|
||||
return
|
||||
}
|
||||
c.Data(200, "application/json; charset=utf-8", respBody)
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Str("handler", "Registration").
|
||||
Str("machine", m.Name).
|
||||
Msg("The node is sending us a new NodeKey, sending auth url")
|
||||
resp.AuthURL = fmt.Sprintf("%s/register?key=%s",
|
||||
h.cfg.ServerURL, mKey.HexString())
|
||||
respBody, err := encode(resp, &mKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "Registration").
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
c.String(http.StatusInternalServerError, "")
|
||||
return
|
||||
}
|
||||
c.Data(200, "application/json; charset=utf-8", respBody)
|
||||
h.handleMachineRegistrationNew(ctx, machineKey, req, *machine)
|
||||
}
|
||||
|
||||
func (h *Headscale) getMapResponse(mKey wgkey.Key, req tailcfg.MapRequest, m Machine) (*[]byte, error) {
|
||||
func (h *Headscale) getMapResponse(
|
||||
machineKey key.MachinePublic,
|
||||
req tailcfg.MapRequest,
|
||||
machine *Machine,
|
||||
) ([]byte, error) {
|
||||
log.Trace().
|
||||
Str("func", "getMapResponse").
|
||||
Str("machine", req.Hostinfo.Hostname).
|
||||
Msg("Creating Map response")
|
||||
node, err := m.toNode(true)
|
||||
node, err := machine.toNode(h.cfg.BaseDomain, h.cfg.DNSConfig, true)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "getMapResponse").
|
||||
Err(err).
|
||||
Msg("Cannot convert to node")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
peers, err := h.getPeers(m)
|
||||
|
||||
peers, err := h.getValidPeers(machine)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "getMapResponse").
|
||||
Err(err).
|
||||
Msg("Cannot fetch peers")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
profile := tailcfg.UserProfile{
|
||||
ID: tailcfg.UserID(m.NamespaceID),
|
||||
LoginName: m.Namespace.Name,
|
||||
DisplayName: m.Namespace.Name,
|
||||
profiles := getMapResponseUserProfiles(*machine, peers)
|
||||
|
||||
nodePeers, err := peers.toNodes(h.cfg.BaseDomain, h.cfg.DNSConfig, true)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "getMapResponse").
|
||||
Err(err).
|
||||
Msg("Failed to convert peers to Tailscale nodes")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dnsConfig := getMapResponseDNSConfig(
|
||||
h.cfg.DNSConfig,
|
||||
h.cfg.BaseDomain,
|
||||
*machine,
|
||||
peers,
|
||||
)
|
||||
|
||||
resp := tailcfg.MapResponse{
|
||||
KeepAlive: false,
|
||||
Node: node,
|
||||
Peers: *peers,
|
||||
//TODO(kradalby): As per tailscale docs, if DNSConfig is nil,
|
||||
// it means its not updated, maybe we can have some logic
|
||||
// to check and only pass updates when its updates.
|
||||
// This is probably more relevant if we try to implement
|
||||
// "MagicDNS"
|
||||
DNSConfig: h.cfg.DNSConfig,
|
||||
SearchPaths: []string{},
|
||||
Domain: "headscale.net",
|
||||
PacketFilter: *h.aclRules,
|
||||
DERPMap: h.cfg.DerpMap,
|
||||
UserProfiles: []tailcfg.UserProfile{profile},
|
||||
KeepAlive: false,
|
||||
Node: node,
|
||||
Peers: nodePeers,
|
||||
DNSConfig: dnsConfig,
|
||||
Domain: h.cfg.BaseDomain,
|
||||
PacketFilter: h.aclRules,
|
||||
DERPMap: h.DERPMap,
|
||||
UserProfiles: profiles,
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("func", "getMapResponse").
|
||||
Str("machine", req.Hostinfo.Hostname).
|
||||
// Interface("payload", resp).
|
||||
Msgf("Generated map response: %s", tailMapResponseToString(resp))
|
||||
|
||||
var respBody []byte
|
||||
@@ -268,118 +265,352 @@ func (h *Headscale) getMapResponse(mKey wgkey.Key, req tailcfg.MapRequest, m Mac
|
||||
|
||||
encoder, _ := zstd.NewWriter(nil)
|
||||
srcCompressed := encoder.EncodeAll(src, nil)
|
||||
respBody, err = encodeMsg(srcCompressed, &mKey, h.privateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
respBody = h.privateKey.SealTo(machineKey, srcCompressed)
|
||||
} else {
|
||||
respBody, err = encode(resp, &mKey, h.privateKey)
|
||||
respBody, err = encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// declare the incoming size on the first 4 bytes
|
||||
data := make([]byte, 4)
|
||||
data := make([]byte, reservedResponseHeaderSize)
|
||||
binary.LittleEndian.PutUint32(data, uint32(len(respBody)))
|
||||
data = append(data, respBody...)
|
||||
return &data, nil
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) getMapKeepAliveResponse(mKey wgkey.Key, req tailcfg.MapRequest, m Machine) (*[]byte, error) {
|
||||
resp := tailcfg.MapResponse{
|
||||
func (h *Headscale) getMapKeepAliveResponse(
|
||||
machineKey key.MachinePublic,
|
||||
mapRequest tailcfg.MapRequest,
|
||||
) ([]byte, error) {
|
||||
mapResponse := tailcfg.MapResponse{
|
||||
KeepAlive: true,
|
||||
}
|
||||
var respBody []byte
|
||||
var err error
|
||||
if req.Compress == "zstd" {
|
||||
src, _ := json.Marshal(resp)
|
||||
if mapRequest.Compress == "zstd" {
|
||||
src, _ := json.Marshal(mapResponse)
|
||||
encoder, _ := zstd.NewWriter(nil)
|
||||
srcCompressed := encoder.EncodeAll(src, nil)
|
||||
respBody, err = encodeMsg(srcCompressed, &mKey, h.privateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
respBody = h.privateKey.SealTo(machineKey, srcCompressed)
|
||||
} else {
|
||||
respBody, err = encode(resp, &mKey, h.privateKey)
|
||||
respBody, err = encode(mapResponse, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
data := make([]byte, 4)
|
||||
data := make([]byte, reservedResponseHeaderSize)
|
||||
binary.LittleEndian.PutUint32(data, uint32(len(respBody)))
|
||||
data = append(data, respBody...)
|
||||
return &data, nil
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) handleAuthKey(c *gin.Context, db *gorm.DB, idKey wgkey.Key, req tailcfg.RegisterRequest, m Machine) {
|
||||
func (h *Headscale) handleMachineLogOut(
|
||||
ctx *gin.Context,
|
||||
machineKey key.MachinePublic,
|
||||
machine Machine,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
log.Info().
|
||||
Str("machine", machine.Name).
|
||||
Msg("Client requested logout")
|
||||
|
||||
h.ExpireMachine(&machine)
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.MachineAuthorized = false
|
||||
resp.User = *machine.Namespace.toUser()
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
ctx.String(http.StatusInternalServerError, "")
|
||||
|
||||
return
|
||||
}
|
||||
ctx.Data(http.StatusOK, "application/json; charset=utf-8", respBody)
|
||||
}
|
||||
|
||||
func (h *Headscale) handleMachineValidRegistration(
|
||||
ctx *gin.Context,
|
||||
machineKey key.MachinePublic,
|
||||
machine Machine,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
// The machine registration is valid, respond with redirect to /map
|
||||
log.Debug().
|
||||
Str("machine", machine.Name).
|
||||
Msg("Client is registered and we have the current NodeKey. All clear to /map")
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.MachineAuthorized = true
|
||||
resp.User = *machine.Namespace.toUser()
|
||||
resp.Login = *machine.Namespace.toLogin()
|
||||
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
machineRegistrations.WithLabelValues("update", "web", "error", machine.Namespace.Name).
|
||||
Inc()
|
||||
ctx.String(http.StatusInternalServerError, "")
|
||||
|
||||
return
|
||||
}
|
||||
machineRegistrations.WithLabelValues("update", "web", "success", machine.Namespace.Name).
|
||||
Inc()
|
||||
ctx.Data(http.StatusOK, "application/json; charset=utf-8", respBody)
|
||||
}
|
||||
|
||||
func (h *Headscale) handleMachineExpired(
|
||||
ctx *gin.Context,
|
||||
machineKey key.MachinePublic,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machine Machine,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
// The client has registered before, but has expired
|
||||
log.Debug().
|
||||
Str("machine", machine.Name).
|
||||
Msg("Machine registration has expired. Sending a authurl to register")
|
||||
|
||||
if registerRequest.Auth.AuthKey != "" {
|
||||
h.handleAuthKey(ctx, machineKey, registerRequest, machine)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if h.cfg.OIDC.Issuer != "" {
|
||||
resp.AuthURL = fmt.Sprintf("%s/oidc/register/%s",
|
||||
strings.TrimSuffix(h.cfg.ServerURL, "/"), machineKey.String())
|
||||
} else {
|
||||
resp.AuthURL = fmt.Sprintf("%s/register?key=%s",
|
||||
strings.TrimSuffix(h.cfg.ServerURL, "/"), machineKey.String())
|
||||
}
|
||||
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
machineRegistrations.WithLabelValues("reauth", "web", "error", machine.Namespace.Name).
|
||||
Inc()
|
||||
ctx.String(http.StatusInternalServerError, "")
|
||||
|
||||
return
|
||||
}
|
||||
machineRegistrations.WithLabelValues("reauth", "web", "success", machine.Namespace.Name).
|
||||
Inc()
|
||||
ctx.Data(http.StatusOK, "application/json; charset=utf-8", respBody)
|
||||
}
|
||||
|
||||
func (h *Headscale) handleMachineRefreshKey(
|
||||
ctx *gin.Context,
|
||||
machineKey key.MachinePublic,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machine Machine,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
log.Debug().
|
||||
Str("machine", machine.Name).
|
||||
Msg("We have the OldNodeKey in the database. This is a key refresh")
|
||||
machine.NodeKey = NodePublicKeyStripPrefix(registerRequest.NodeKey)
|
||||
h.db.Save(&machine)
|
||||
|
||||
resp.AuthURL = ""
|
||||
resp.User = *machine.Namespace.toUser()
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
ctx.String(http.StatusInternalServerError, "Extremely sad!")
|
||||
|
||||
return
|
||||
}
|
||||
ctx.Data(http.StatusOK, "application/json; charset=utf-8", respBody)
|
||||
}
|
||||
|
||||
func (h *Headscale) handleMachineRegistrationNew(
|
||||
ctx *gin.Context,
|
||||
machineKey key.MachinePublic,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machine Machine,
|
||||
) {
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
|
||||
// The machine registration is new, redirect the client to the registration URL
|
||||
log.Debug().
|
||||
Str("machine", machine.Name).
|
||||
Msg("The node is sending us a new NodeKey, sending auth url")
|
||||
if h.cfg.OIDC.Issuer != "" {
|
||||
resp.AuthURL = fmt.Sprintf(
|
||||
"%s/oidc/register/%s",
|
||||
strings.TrimSuffix(h.cfg.ServerURL, "/"),
|
||||
machineKey.String(),
|
||||
)
|
||||
} else {
|
||||
resp.AuthURL = fmt.Sprintf("%s/register?key=%s",
|
||||
strings.TrimSuffix(h.cfg.ServerURL, "/"), MachinePublicKeyStripPrefix(machineKey))
|
||||
}
|
||||
|
||||
if !registerRequest.Expiry.IsZero() {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("machine", machine.Name).
|
||||
Time("expiry", registerRequest.Expiry).
|
||||
Msg("Non-zero expiry time requested, adding to cache")
|
||||
h.requestedExpiryCache.Set(
|
||||
machineKey.String(),
|
||||
registerRequest.Expiry,
|
||||
requestedExpiryCacheExpiration,
|
||||
)
|
||||
}
|
||||
|
||||
machine.NodeKey = NodePublicKeyStripPrefix(registerRequest.NodeKey)
|
||||
|
||||
// save the NodeKey
|
||||
h.db.Save(&machine)
|
||||
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
ctx.String(http.StatusInternalServerError, "")
|
||||
|
||||
return
|
||||
}
|
||||
ctx.Data(http.StatusOK, "application/json; charset=utf-8", respBody)
|
||||
}
|
||||
|
||||
// TODO: check if any locks are needed around IP allocation.
|
||||
func (h *Headscale) handleAuthKey(
|
||||
ctx *gin.Context,
|
||||
machineKey key.MachinePublic,
|
||||
registerRequest tailcfg.RegisterRequest,
|
||||
machine Machine,
|
||||
) {
|
||||
log.Debug().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", req.Hostinfo.Hostname).
|
||||
Msgf("Processing auth key for %s", req.Hostinfo.Hostname)
|
||||
Str("machine", registerRequest.Hostinfo.Hostname).
|
||||
Msgf("Processing auth key for %s", registerRequest.Hostinfo.Hostname)
|
||||
resp := tailcfg.RegisterResponse{}
|
||||
pak, err := h.checkKeyValidity(req.Auth.AuthKey)
|
||||
pak, err := h.checkKeyValidity(registerRequest.Auth.AuthKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", machine.Name).
|
||||
Err(err).
|
||||
Msg("Failed authentication via AuthKey")
|
||||
resp.MachineAuthorized = false
|
||||
respBody, err := encode(resp, &idKey, h.privateKey)
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", m.Name).
|
||||
Str("machine", machine.Name).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
c.String(http.StatusInternalServerError, "")
|
||||
ctx.String(http.StatusInternalServerError, "")
|
||||
machineRegistrations.WithLabelValues("new", "authkey", "error", machine.Namespace.Name).
|
||||
Inc()
|
||||
|
||||
return
|
||||
}
|
||||
c.Data(200, "application/json; charset=utf-8", respBody)
|
||||
ctx.Data(http.StatusUnauthorized, "application/json; charset=utf-8", respBody)
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", m.Name).
|
||||
Str("machine", machine.Name).
|
||||
Msg("Failed authentication via AuthKey")
|
||||
machineRegistrations.WithLabelValues("new", "authkey", "error", machine.Namespace.Name).
|
||||
Inc()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", m.Name).
|
||||
Msg("Authentication key was valid, proceeding to acquire an IP address")
|
||||
ip, err := h.getAvailableIP()
|
||||
if err != nil {
|
||||
log.Error().
|
||||
if machine.isRegistered() {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("machine", machine.Name).
|
||||
Msg("machine already registered, reauthenticating")
|
||||
|
||||
h.RefreshMachine(&machine, registerRequest.Expiry)
|
||||
} else {
|
||||
log.Debug().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", m.Name).
|
||||
Msg("Failed to find an available IP")
|
||||
return
|
||||
}
|
||||
log.Info().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", m.Name).
|
||||
Str("ip", ip.String()).
|
||||
Msgf("Assigning %s to %s", ip, m.Name)
|
||||
Str("machine", machine.Name).
|
||||
Msg("Authentication key was valid, proceeding to acquire IP addresses")
|
||||
ips, err := h.getAvailableIPs()
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", machine.Name).
|
||||
Msg("Failed to find an available IP address")
|
||||
machineRegistrations.WithLabelValues("new", "authkey", "error", machine.Namespace.Name).
|
||||
Inc()
|
||||
|
||||
m.AuthKeyID = uint(pak.ID)
|
||||
m.IPAddress = ip.String()
|
||||
m.NamespaceID = pak.NamespaceID
|
||||
m.NodeKey = wgkey.Key(req.NodeKey).HexString() // we update it just in case
|
||||
m.Registered = true
|
||||
m.RegisterMethod = "authKey"
|
||||
db.Save(&m)
|
||||
return
|
||||
}
|
||||
log.Info().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", machine.Name).
|
||||
Str("ips", strings.Join(ips.ToStringSlice(), ",")).
|
||||
Msgf("Assigning %s to %s", strings.Join(ips.ToStringSlice(), ","), machine.Name)
|
||||
|
||||
machine.Expiry = ®isterRequest.Expiry
|
||||
machine.AuthKeyID = uint(pak.ID)
|
||||
machine.IPAddresses = ips
|
||||
machine.NamespaceID = pak.NamespaceID
|
||||
|
||||
machine.NodeKey = NodePublicKeyStripPrefix(registerRequest.NodeKey)
|
||||
// we update it just in case
|
||||
machine.Registered = true
|
||||
machine.RegisterMethod = RegisterMethodAuthKey
|
||||
h.db.Save(&machine)
|
||||
}
|
||||
|
||||
pak.Used = true
|
||||
h.db.Save(&pak)
|
||||
|
||||
resp.MachineAuthorized = true
|
||||
resp.User = *pak.Namespace.toUser()
|
||||
respBody, err := encode(resp, &idKey, h.privateKey)
|
||||
respBody, err := encode(resp, &machineKey, h.privateKey)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", m.Name).
|
||||
Str("machine", machine.Name).
|
||||
Err(err).
|
||||
Msg("Cannot encode message")
|
||||
c.String(http.StatusInternalServerError, "Extremely sad!")
|
||||
machineRegistrations.WithLabelValues("new", "authkey", "error", machine.Namespace.Name).
|
||||
Inc()
|
||||
ctx.String(http.StatusInternalServerError, "Extremely sad!")
|
||||
|
||||
return
|
||||
}
|
||||
c.Data(200, "application/json; charset=utf-8", respBody)
|
||||
machineRegistrations.WithLabelValues("new", "authkey", "success", machine.Namespace.Name).
|
||||
Inc()
|
||||
ctx.Data(http.StatusOK, "application/json; charset=utf-8", respBody)
|
||||
log.Info().
|
||||
Str("func", "handleAuthKey").
|
||||
Str("machine", m.Name).
|
||||
Str("ip", ip.String()).
|
||||
Str("machine", machine.Name).
|
||||
Str("ips", strings.Join(machine.IPAddresses.ToStringSlice(), ", ")).
|
||||
Msg("Successfully authenticated via AuthKey")
|
||||
}
|
||||
|
||||
164
api_key.go
Normal file
164
api_key.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const (
|
||||
apiPrefixLength = 7
|
||||
apiKeyLength = 32
|
||||
apiKeyParts = 2
|
||||
|
||||
errAPIKeyFailedToParse = Error("Failed to parse ApiKey")
|
||||
)
|
||||
|
||||
// APIKey describes the datamodel for API keys used to remotely authenticate with
|
||||
// headscale.
|
||||
type APIKey struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
Prefix string `gorm:"uniqueIndex"`
|
||||
Hash []byte
|
||||
|
||||
CreatedAt *time.Time
|
||||
Expiration *time.Time
|
||||
LastSeen *time.Time
|
||||
}
|
||||
|
||||
// CreateAPIKey creates a new ApiKey in a namespace, and returns it.
|
||||
func (h *Headscale) CreateAPIKey(
|
||||
expiration *time.Time,
|
||||
) (string, *APIKey, error) {
|
||||
prefix, err := GenerateRandomStringURLSafe(apiPrefixLength)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
toBeHashed, err := GenerateRandomStringURLSafe(apiKeyLength)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// Key to return to user, this will only be visible _once_
|
||||
keyStr := prefix + "." + toBeHashed
|
||||
|
||||
hash, err := bcrypt.GenerateFromPassword([]byte(toBeHashed), bcrypt.DefaultCost)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
key := APIKey{
|
||||
Prefix: prefix,
|
||||
Hash: hash,
|
||||
Expiration: expiration,
|
||||
}
|
||||
h.db.Save(&key)
|
||||
|
||||
return keyStr, &key, nil
|
||||
}
|
||||
|
||||
// ListAPIKeys returns the list of ApiKeys for a namespace.
|
||||
func (h *Headscale) ListAPIKeys() ([]APIKey, error) {
|
||||
keys := []APIKey{}
|
||||
if err := h.db.Find(&keys).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// GetAPIKey returns a ApiKey for a given key.
|
||||
func (h *Headscale) GetAPIKey(prefix string) (*APIKey, error) {
|
||||
key := APIKey{}
|
||||
if result := h.db.First(&key, "prefix = ?", prefix); result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
return &key, nil
|
||||
}
|
||||
|
||||
// GetAPIKeyByID returns a ApiKey for a given id.
|
||||
func (h *Headscale) GetAPIKeyByID(id uint64) (*APIKey, error) {
|
||||
key := APIKey{}
|
||||
if result := h.db.Find(&APIKey{ID: id}).First(&key); result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
return &key, nil
|
||||
}
|
||||
|
||||
// DestroyAPIKey destroys a ApiKey. Returns error if the ApiKey
|
||||
// does not exist.
|
||||
func (h *Headscale) DestroyAPIKey(key APIKey) error {
|
||||
if result := h.db.Unscoped().Delete(key); result.Error != nil {
|
||||
return result.Error
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExpireAPIKey marks a ApiKey as expired.
|
||||
func (h *Headscale) ExpireAPIKey(key *APIKey) error {
|
||||
if err := h.db.Model(&key).Update("Expiration", time.Now()).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Headscale) ValidateAPIKey(keyStr string) (bool, error) {
|
||||
prefix, hash, err := splitAPIKey(keyStr)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to validate api key: %w", err)
|
||||
}
|
||||
|
||||
key, err := h.GetAPIKey(prefix)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to validate api key: %w", err)
|
||||
}
|
||||
|
||||
if key.Expiration.Before(time.Now()) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err := bcrypt.CompareHashAndPassword(key.Hash, []byte(hash)); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func splitAPIKey(key string) (string, string, error) {
|
||||
parts := strings.Split(key, ".")
|
||||
if len(parts) != apiKeyParts {
|
||||
return "", "", errAPIKeyFailedToParse
|
||||
}
|
||||
|
||||
return parts[0], parts[1], nil
|
||||
}
|
||||
|
||||
func (key *APIKey) toProto() *v1.ApiKey {
|
||||
protoKey := v1.ApiKey{
|
||||
Id: key.ID,
|
||||
Prefix: key.Prefix,
|
||||
}
|
||||
|
||||
if key.Expiration != nil {
|
||||
protoKey.Expiration = timestamppb.New(*key.Expiration)
|
||||
}
|
||||
|
||||
if key.CreatedAt != nil {
|
||||
protoKey.CreatedAt = timestamppb.New(*key.CreatedAt)
|
||||
}
|
||||
|
||||
if key.LastSeen != nil {
|
||||
protoKey.LastSeen = timestamppb.New(*key.LastSeen)
|
||||
}
|
||||
|
||||
return &protoKey
|
||||
}
|
||||
89
api_key_test.go
Normal file
89
api_key_test.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
func (*Suite) TestCreateAPIKey(c *check.C) {
|
||||
apiKeyStr, apiKey, err := app.CreateAPIKey(nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(apiKey, check.NotNil)
|
||||
|
||||
// Did we get a valid key?
|
||||
c.Assert(apiKey.Prefix, check.NotNil)
|
||||
c.Assert(apiKey.Hash, check.NotNil)
|
||||
c.Assert(apiKeyStr, check.Not(check.Equals), "")
|
||||
|
||||
_, err = app.ListAPIKeys()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
keys, err := app.ListAPIKeys()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(keys), check.Equals, 1)
|
||||
}
|
||||
|
||||
func (*Suite) TestAPIKeyDoesNotExist(c *check.C) {
|
||||
key, err := app.GetAPIKey("does-not-exist")
|
||||
c.Assert(err, check.NotNil)
|
||||
c.Assert(key, check.IsNil)
|
||||
}
|
||||
|
||||
func (*Suite) TestValidateAPIKeyOk(c *check.C) {
|
||||
nowPlus2 := time.Now().Add(2 * time.Hour)
|
||||
apiKeyStr, apiKey, err := app.CreateAPIKey(&nowPlus2)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(apiKey, check.NotNil)
|
||||
|
||||
valid, err := app.ValidateAPIKey(apiKeyStr)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(valid, check.Equals, true)
|
||||
}
|
||||
|
||||
func (*Suite) TestValidateAPIKeyNotOk(c *check.C) {
|
||||
nowMinus2 := time.Now().Add(time.Duration(-2) * time.Hour)
|
||||
apiKeyStr, apiKey, err := app.CreateAPIKey(&nowMinus2)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(apiKey, check.NotNil)
|
||||
|
||||
valid, err := app.ValidateAPIKey(apiKeyStr)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(valid, check.Equals, false)
|
||||
|
||||
now := time.Now()
|
||||
apiKeyStrNow, apiKey, err := app.CreateAPIKey(&now)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(apiKey, check.NotNil)
|
||||
|
||||
validNow, err := app.ValidateAPIKey(apiKeyStrNow)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(validNow, check.Equals, false)
|
||||
|
||||
validSilly, err := app.ValidateAPIKey("nota.validkey")
|
||||
c.Assert(err, check.NotNil)
|
||||
c.Assert(validSilly, check.Equals, false)
|
||||
|
||||
validWithErr, err := app.ValidateAPIKey("produceerrorkey")
|
||||
c.Assert(err, check.NotNil)
|
||||
c.Assert(validWithErr, check.Equals, false)
|
||||
}
|
||||
|
||||
func (*Suite) TestExpireAPIKey(c *check.C) {
|
||||
nowPlus2 := time.Now().Add(2 * time.Hour)
|
||||
apiKeyStr, apiKey, err := app.CreateAPIKey(&nowPlus2)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(apiKey, check.NotNil)
|
||||
|
||||
valid, err := app.ValidateAPIKey(apiKeyStr)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(valid, check.Equals, true)
|
||||
|
||||
err = app.ExpireAPIKey(apiKey)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(apiKey.Expiration, check.NotNil)
|
||||
|
||||
notValid, err := app.ValidateAPIKey(apiKeyStr)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(notValid, check.Equals, false)
|
||||
}
|
||||
726
app.go
726
app.go
@@ -1,32 +1,81 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"github.com/gin-gonic/gin"
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/patrickmn/go-cache"
|
||||
zerolog "github.com/philip-bui/grpc-zerolog"
|
||||
zl "github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
ginprometheus "github.com/zsais/go-gin-prometheus"
|
||||
"golang.org/x/crypto/acme"
|
||||
"golang.org/x/crypto/acme/autocert"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/reflection"
|
||||
"google.golang.org/grpc/status"
|
||||
"gorm.io/gorm"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/wgkey"
|
||||
"tailscale.com/types/dnstype"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
// Config contains the initial Headscale configuration
|
||||
const (
|
||||
AuthPrefix = "Bearer "
|
||||
Postgres = "postgres"
|
||||
Sqlite = "sqlite3"
|
||||
updateInterval = 5000
|
||||
HTTPReadTimeout = 30 * time.Second
|
||||
privateKeyFileMode = 0o600
|
||||
|
||||
requestedExpiryCacheExpiration = time.Minute * 5
|
||||
requestedExpiryCacheCleanupInterval = time.Minute * 10
|
||||
|
||||
errUnsupportedDatabase = Error("unsupported DB")
|
||||
errUnsupportedLetsEncryptChallengeType = Error(
|
||||
"unknown value for Lets Encrypt challenge type",
|
||||
)
|
||||
)
|
||||
|
||||
// Config contains the initial Headscale configuration.
|
||||
type Config struct {
|
||||
ServerURL string
|
||||
Addr string
|
||||
PrivateKeyPath string
|
||||
DerpMap *tailcfg.DERPMap
|
||||
GRPCAddr string
|
||||
GRPCAllowInsecure bool
|
||||
EphemeralNodeInactivityTimeout time.Duration
|
||||
IPPrefix netaddr.IPPrefix
|
||||
IPPrefixes []netaddr.IPPrefix
|
||||
PrivateKeyPath string
|
||||
BaseDomain string
|
||||
|
||||
DERP DERPConfig
|
||||
|
||||
DBtype string
|
||||
DBpath string
|
||||
@@ -44,76 +93,135 @@ type Config struct {
|
||||
TLSCertPath string
|
||||
TLSKeyPath string
|
||||
|
||||
ACMEURL string
|
||||
ACMEEmail string
|
||||
|
||||
DNSConfig *tailcfg.DNSConfig
|
||||
|
||||
UnixSocket string
|
||||
UnixSocketPermission fs.FileMode
|
||||
|
||||
OIDC OIDCConfig
|
||||
|
||||
CLI CLIConfig
|
||||
}
|
||||
|
||||
// Headscale represents the base app of the service
|
||||
type OIDCConfig struct {
|
||||
Issuer string
|
||||
ClientID string
|
||||
ClientSecret string
|
||||
MatchMap map[string]string
|
||||
}
|
||||
|
||||
type DERPConfig struct {
|
||||
URLs []url.URL
|
||||
Paths []string
|
||||
AutoUpdate bool
|
||||
UpdateFrequency time.Duration
|
||||
}
|
||||
|
||||
type CLIConfig struct {
|
||||
Address string
|
||||
APIKey string
|
||||
Timeout time.Duration
|
||||
Insecure bool
|
||||
}
|
||||
|
||||
// Headscale represents the base app of the service.
|
||||
type Headscale struct {
|
||||
cfg Config
|
||||
db *gorm.DB
|
||||
dbString string
|
||||
dbType string
|
||||
dbDebug bool
|
||||
publicKey *wgkey.Key
|
||||
privateKey *wgkey.Private
|
||||
privateKey *key.MachinePrivate
|
||||
|
||||
DERPMap *tailcfg.DERPMap
|
||||
|
||||
aclPolicy *ACLPolicy
|
||||
aclRules *[]tailcfg.FilterRule
|
||||
|
||||
clientsUpdateChannels sync.Map
|
||||
clientsUpdateChannelMutex sync.Mutex
|
||||
aclRules []tailcfg.FilterRule
|
||||
|
||||
lastStateChange sync.Map
|
||||
|
||||
oidcProvider *oidc.Provider
|
||||
oauth2Config *oauth2.Config
|
||||
oidcStateCache *cache.Cache
|
||||
|
||||
requestedExpiryCache *cache.Cache
|
||||
}
|
||||
|
||||
// NewHeadscale returns the Headscale app
|
||||
// NewHeadscale returns the Headscale app.
|
||||
func NewHeadscale(cfg Config) (*Headscale, error) {
|
||||
content, err := os.ReadFile(cfg.PrivateKeyPath)
|
||||
privKey, err := readOrCreatePrivateKey(cfg.PrivateKeyPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to read or create private key: %w", err)
|
||||
}
|
||||
privKey, err := wgkey.ParsePrivate(string(content))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pubKey := privKey.Public()
|
||||
|
||||
var dbString string
|
||||
switch cfg.DBtype {
|
||||
case "postgres":
|
||||
dbString = fmt.Sprintf("host=%s port=%d dbname=%s user=%s password=%s sslmode=disable", cfg.DBhost,
|
||||
cfg.DBport, cfg.DBname, cfg.DBuser, cfg.DBpass)
|
||||
case "sqlite3":
|
||||
case Postgres:
|
||||
dbString = fmt.Sprintf(
|
||||
"host=%s port=%d dbname=%s user=%s password=%s sslmode=disable",
|
||||
cfg.DBhost,
|
||||
cfg.DBport,
|
||||
cfg.DBname,
|
||||
cfg.DBuser,
|
||||
cfg.DBpass,
|
||||
)
|
||||
case Sqlite:
|
||||
dbString = cfg.DBpath
|
||||
default:
|
||||
return nil, errors.New("unsupported DB")
|
||||
return nil, errUnsupportedDatabase
|
||||
}
|
||||
|
||||
h := Headscale{
|
||||
cfg: cfg,
|
||||
dbType: cfg.DBtype,
|
||||
dbString: dbString,
|
||||
privateKey: privKey,
|
||||
publicKey: &pubKey,
|
||||
aclRules: &tailcfg.FilterAllowAll, // default allowall
|
||||
requestedExpiryCache := cache.New(
|
||||
requestedExpiryCacheExpiration,
|
||||
requestedExpiryCacheCleanupInterval,
|
||||
)
|
||||
|
||||
app := Headscale{
|
||||
cfg: cfg,
|
||||
dbType: cfg.DBtype,
|
||||
dbString: dbString,
|
||||
privateKey: privKey,
|
||||
aclRules: tailcfg.FilterAllowAll, // default allowall
|
||||
requestedExpiryCache: requestedExpiryCache,
|
||||
}
|
||||
|
||||
err = h.initDB()
|
||||
err = app.initDB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &h, nil
|
||||
if cfg.OIDC.Issuer != "" {
|
||||
err = app.initOIDC()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if app.cfg.DNSConfig != nil && app.cfg.DNSConfig.Proxied { // if MagicDNS
|
||||
magicDNSDomains := generateMagicDNSRootDomains(app.cfg.IPPrefixes)
|
||||
// we might have routes already from Split DNS
|
||||
if app.cfg.DNSConfig.Routes == nil {
|
||||
app.cfg.DNSConfig.Routes = make(map[string][]dnstype.Resolver)
|
||||
}
|
||||
for _, d := range magicDNSDomains {
|
||||
app.cfg.DNSConfig.Routes[d.WithoutTrailingDot()] = nil
|
||||
}
|
||||
}
|
||||
|
||||
return &app, nil
|
||||
}
|
||||
|
||||
// Redirect to our TLS url
|
||||
// Redirect to our TLS url.
|
||||
func (h *Headscale) redirect(w http.ResponseWriter, req *http.Request) {
|
||||
target := h.cfg.ServerURL + req.URL.RequestURI()
|
||||
http.Redirect(w, req, target, http.StatusFound)
|
||||
}
|
||||
|
||||
// expireEphemeralNodes deletes ephemeral machine records that have not been
|
||||
// seen for longer than h.cfg.EphemeralNodeInactivityTimeout
|
||||
// seen for longer than h.cfg.EphemeralNodeInactivityTimeout.
|
||||
func (h *Headscale) expireEphemeralNodes(milliSeconds int64) {
|
||||
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
|
||||
for range ticker.C {
|
||||
@@ -125,126 +233,542 @@ func (h *Headscale) expireEphemeralNodesWorker() {
|
||||
namespaces, err := h.ListNamespaces()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error listing namespaces")
|
||||
|
||||
return
|
||||
}
|
||||
for _, ns := range *namespaces {
|
||||
machines, err := h.ListMachinesInNamespace(ns.Name)
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
machines, err := h.ListMachinesInNamespace(namespace.Name)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("namespace", ns.Name).Msg("Error listing machines in namespace")
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("namespace", namespace.Name).
|
||||
Msg("Error listing machines in namespace")
|
||||
|
||||
return
|
||||
}
|
||||
for _, m := range *machines {
|
||||
if m.AuthKey != nil && m.LastSeen != nil && m.AuthKey.Ephemeral && time.Now().After(m.LastSeen.Add(h.cfg.EphemeralNodeInactivityTimeout)) {
|
||||
log.Info().Str("machine", m.Name).Msg("Ephemeral client removed from database")
|
||||
err = h.db.Unscoped().Delete(m).Error
|
||||
|
||||
for _, machine := range machines {
|
||||
if machine.AuthKey != nil && machine.LastSeen != nil &&
|
||||
machine.AuthKey.Ephemeral &&
|
||||
time.Now().
|
||||
After(machine.LastSeen.Add(h.cfg.EphemeralNodeInactivityTimeout)) {
|
||||
log.Info().
|
||||
Str("machine", machine.Name).
|
||||
Msg("Ephemeral client removed from database")
|
||||
|
||||
err = h.db.Unscoped().Delete(machine).Error
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("machine", m.Name).Msg("🤮 Cannot delete ephemeral machine from the database")
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("machine", machine.Name).
|
||||
Msg("🤮 Cannot delete ephemeral machine from the database")
|
||||
}
|
||||
h.notifyChangesToPeers(&m)
|
||||
}
|
||||
}
|
||||
|
||||
h.setLastStateChangeToNow(namespace.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// WatchForKVUpdates checks the KV DB table for requests to perform tailnet upgrades
|
||||
// This is a way to communitate the CLI with the headscale server
|
||||
func (h *Headscale) watchForKVUpdates(milliSeconds int64) {
|
||||
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
|
||||
for range ticker.C {
|
||||
h.watchForKVUpdatesWorker()
|
||||
func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
||||
req interface{},
|
||||
info *grpc.UnaryServerInfo,
|
||||
handler grpc.UnaryHandler) (interface{}, error) {
|
||||
// Check if the request is coming from the on-server client.
|
||||
// This is not secure, but it is to maintain maintainability
|
||||
// with the "legacy" database-based client
|
||||
// It is also neede for grpc-gateway to be able to connect to
|
||||
// the server
|
||||
client, _ := peer.FromContext(ctx)
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("client_address", client.Addr.String()).
|
||||
Msg("Client is trying to authenticate")
|
||||
|
||||
meta, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("client_address", client.Addr.String()).
|
||||
Msg("Retrieving metadata is failed")
|
||||
|
||||
return ctx, status.Errorf(
|
||||
codes.InvalidArgument,
|
||||
"Retrieving metadata is failed",
|
||||
)
|
||||
}
|
||||
|
||||
authHeader, ok := meta["authorization"]
|
||||
if !ok {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("client_address", client.Addr.String()).
|
||||
Msg("Authorization token is not supplied")
|
||||
|
||||
return ctx, status.Errorf(
|
||||
codes.Unauthenticated,
|
||||
"Authorization token is not supplied",
|
||||
)
|
||||
}
|
||||
|
||||
token := authHeader[0]
|
||||
|
||||
if !strings.HasPrefix(token, AuthPrefix) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("client_address", client.Addr.String()).
|
||||
Msg(`missing "Bearer " prefix in "Authorization" header`)
|
||||
|
||||
return ctx, status.Error(
|
||||
codes.Unauthenticated,
|
||||
`missing "Bearer " prefix in "Authorization" header`,
|
||||
)
|
||||
}
|
||||
|
||||
valid, err := h.ValidateAPIKey(strings.TrimPrefix(token, AuthPrefix))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Str("client_address", client.Addr.String()).
|
||||
Msg("failed to validate token")
|
||||
|
||||
return ctx, status.Error(codes.Internal, "failed to validate token")
|
||||
}
|
||||
|
||||
if !valid {
|
||||
log.Info().
|
||||
Str("client_address", client.Addr.String()).
|
||||
Msg("invalid token")
|
||||
|
||||
return ctx, status.Error(codes.Unauthenticated, "invalid token")
|
||||
}
|
||||
|
||||
return handler(ctx, req)
|
||||
}
|
||||
|
||||
func (h *Headscale) watchForKVUpdatesWorker() {
|
||||
h.checkForNamespacesPendingUpdates()
|
||||
// more functions will come here in the future
|
||||
func (h *Headscale) httpAuthenticationMiddleware(ctx *gin.Context) {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("client_address", ctx.ClientIP()).
|
||||
Msg("HTTP authentication invoked")
|
||||
|
||||
authHeader := ctx.GetHeader("authorization")
|
||||
|
||||
if !strings.HasPrefix(authHeader, AuthPrefix) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("client_address", ctx.ClientIP()).
|
||||
Msg(`missing "Bearer " prefix in "Authorization" header`)
|
||||
ctx.AbortWithStatus(http.StatusUnauthorized)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx.AbortWithStatus(http.StatusUnauthorized)
|
||||
|
||||
valid, err := h.ValidateAPIKey(strings.TrimPrefix(authHeader, AuthPrefix))
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Err(err).
|
||||
Str("client_address", ctx.ClientIP()).
|
||||
Msg("failed to validate token")
|
||||
|
||||
ctx.AbortWithStatus(http.StatusInternalServerError)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if !valid {
|
||||
log.Info().
|
||||
Str("client_address", ctx.ClientIP()).
|
||||
Msg("invalid token")
|
||||
|
||||
ctx.AbortWithStatus(http.StatusUnauthorized)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Next()
|
||||
}
|
||||
|
||||
// Serve launches a GIN server with the Headscale API
|
||||
// ensureUnixSocketIsAbsent will check if the given path for headscales unix socket is clear
|
||||
// and will remove it if it is not.
|
||||
func (h *Headscale) ensureUnixSocketIsAbsent() error {
|
||||
// File does not exist, all fine
|
||||
if _, err := os.Stat(h.cfg.UnixSocket); errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return os.Remove(h.cfg.UnixSocket)
|
||||
}
|
||||
|
||||
func (h *Headscale) createRouter(grpcMux *runtime.ServeMux) *gin.Engine {
|
||||
router := gin.Default()
|
||||
|
||||
prometheus := ginprometheus.NewPrometheus("gin")
|
||||
prometheus.Use(router)
|
||||
|
||||
router.GET(
|
||||
"/health",
|
||||
func(c *gin.Context) { c.JSON(http.StatusOK, gin.H{"healthy": "ok"}) },
|
||||
)
|
||||
router.GET("/key", h.KeyHandler)
|
||||
router.GET("/register", h.RegisterWebAPI)
|
||||
router.POST("/machine/:id/map", h.PollNetMapHandler)
|
||||
router.POST("/machine/:id", h.RegistrationHandler)
|
||||
router.GET("/oidc/register/:mkey", h.RegisterOIDC)
|
||||
router.GET("/oidc/callback", h.OIDCCallback)
|
||||
router.GET("/apple", h.AppleMobileConfig)
|
||||
router.GET("/apple/:platform", h.ApplePlatformConfig)
|
||||
router.GET("/swagger", SwaggerUI)
|
||||
router.GET("/swagger/v1/openapiv2.json", SwaggerAPIv1)
|
||||
|
||||
api := router.Group("/api")
|
||||
api.Use(h.httpAuthenticationMiddleware)
|
||||
{
|
||||
api.Any("/v1/*any", gin.WrapF(grpcMux.ServeHTTP))
|
||||
}
|
||||
|
||||
router.NoRoute(stdoutHandler)
|
||||
|
||||
return router
|
||||
}
|
||||
|
||||
// Serve launches a GIN server with the Headscale API.
|
||||
func (h *Headscale) Serve() error {
|
||||
r := gin.Default()
|
||||
r.GET("/health", func(c *gin.Context) { c.JSON(200, gin.H{"healthy": "ok"}) })
|
||||
r.GET("/key", h.KeyHandler)
|
||||
r.GET("/register", h.RegisterWebAPI)
|
||||
r.POST("/machine/:id/map", h.PollNetMapHandler)
|
||||
r.POST("/machine/:id", h.RegistrationHandler)
|
||||
var err error
|
||||
|
||||
timeout := 30 * time.Second
|
||||
// Fetch an initial DERP Map before we start serving
|
||||
h.DERPMap = GetDERPMap(h.cfg.DERP)
|
||||
|
||||
go h.watchForKVUpdates(5000)
|
||||
go h.expireEphemeralNodes(5000)
|
||||
|
||||
s := &http.Server{
|
||||
Addr: h.cfg.Addr,
|
||||
Handler: r,
|
||||
ReadTimeout: timeout,
|
||||
WriteTimeout: timeout,
|
||||
if h.cfg.DERP.AutoUpdate {
|
||||
derpMapCancelChannel := make(chan struct{})
|
||||
defer func() { derpMapCancelChannel <- struct{}{} }()
|
||||
go h.scheduledDERPMapUpdateWorker(derpMapCancelChannel)
|
||||
}
|
||||
|
||||
if h.cfg.TLSLetsEncryptHostname != "" {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "https://") {
|
||||
log.Warn().Msg("Listening with TLS but ServerURL does not start with https://")
|
||||
go h.expireEphemeralNodes(updateInterval)
|
||||
|
||||
if zl.GlobalLevel() == zl.TraceLevel {
|
||||
zerolog.RespLog = true
|
||||
} else {
|
||||
zerolog.RespLog = false
|
||||
}
|
||||
|
||||
// Prepare group for running listeners
|
||||
errorGroup := new(errgroup.Group)
|
||||
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
//
|
||||
//
|
||||
// Set up LOCAL listeners
|
||||
//
|
||||
|
||||
err = h.ensureUnixSocketIsAbsent()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to remove old socket file: %w", err)
|
||||
}
|
||||
|
||||
socketListener, err := net.Listen("unix", h.cfg.UnixSocket)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set up gRPC socket: %w", err)
|
||||
}
|
||||
|
||||
// Change socket permissions
|
||||
if err := os.Chmod(h.cfg.UnixSocket, h.cfg.UnixSocketPermission); err != nil {
|
||||
return fmt.Errorf("failed change permission of gRPC socket: %w", err)
|
||||
}
|
||||
|
||||
// Handle common process-killing signals so we can gracefully shut down:
|
||||
sigc := make(chan os.Signal, 1)
|
||||
signal.Notify(sigc, os.Interrupt, syscall.SIGTERM)
|
||||
go func(c chan os.Signal) {
|
||||
// Wait for a SIGINT or SIGKILL:
|
||||
sig := <-c
|
||||
log.Printf("Caught signal %s: shutting down.", sig)
|
||||
// Stop listening (and unlink the socket if unix type):
|
||||
socketListener.Close()
|
||||
// And we're done:
|
||||
os.Exit(0)
|
||||
}(sigc)
|
||||
|
||||
grpcGatewayMux := runtime.NewServeMux()
|
||||
|
||||
// Make the grpc-gateway connect to grpc over socket
|
||||
grpcGatewayConn, err := grpc.Dial(
|
||||
h.cfg.UnixSocket,
|
||||
[]grpc.DialOption{
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithContextDialer(GrpcSocketDialer),
|
||||
}...,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Connect to the gRPC server over localhost to skip
|
||||
// the authentication.
|
||||
err = v1.RegisterHeadscaleServiceHandler(ctx, grpcGatewayMux, grpcGatewayConn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start the local gRPC server without TLS and without authentication
|
||||
grpcSocket := grpc.NewServer(zerolog.UnaryInterceptor())
|
||||
|
||||
v1.RegisterHeadscaleServiceServer(grpcSocket, newHeadscaleV1APIServer(h))
|
||||
reflection.Register(grpcSocket)
|
||||
|
||||
errorGroup.Go(func() error { return grpcSocket.Serve(socketListener) })
|
||||
|
||||
//
|
||||
//
|
||||
// Set up REMOTE listeners
|
||||
//
|
||||
|
||||
tlsConfig, err := h.getTLSSettings()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed to set up TLS configuration")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
//
|
||||
//
|
||||
// gRPC setup
|
||||
//
|
||||
|
||||
// We are sadly not able to run gRPC and HTTPS (2.0) on the same
|
||||
// port because the connection mux does not support matching them
|
||||
// since they are so similar. There is multiple issues open and we
|
||||
// can revisit this if changes:
|
||||
// https://github.com/soheilhy/cmux/issues/68
|
||||
// https://github.com/soheilhy/cmux/issues/91
|
||||
|
||||
if tlsConfig != nil || h.cfg.GRPCAllowInsecure {
|
||||
log.Info().Msgf("Enabling remote gRPC at %s", h.cfg.GRPCAddr)
|
||||
|
||||
grpcOptions := []grpc.ServerOption{
|
||||
grpc.UnaryInterceptor(
|
||||
grpc_middleware.ChainUnaryServer(
|
||||
h.grpcAuthenticationInterceptor,
|
||||
zerolog.NewUnaryServerInterceptor(),
|
||||
),
|
||||
),
|
||||
}
|
||||
|
||||
m := autocert.Manager{
|
||||
if tlsConfig != nil {
|
||||
grpcOptions = append(grpcOptions,
|
||||
grpc.Creds(credentials.NewTLS(tlsConfig)),
|
||||
)
|
||||
} else {
|
||||
log.Warn().Msg("gRPC is running without security")
|
||||
}
|
||||
|
||||
grpcServer := grpc.NewServer(grpcOptions...)
|
||||
|
||||
v1.RegisterHeadscaleServiceServer(grpcServer, newHeadscaleV1APIServer(h))
|
||||
reflection.Register(grpcServer)
|
||||
|
||||
grpcListener, err := net.Listen("tcp", h.cfg.GRPCAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to bind to TCP address: %w", err)
|
||||
}
|
||||
|
||||
errorGroup.Go(func() error { return grpcServer.Serve(grpcListener) })
|
||||
|
||||
log.Info().
|
||||
Msgf("listening and serving gRPC on: %s", h.cfg.GRPCAddr)
|
||||
}
|
||||
|
||||
//
|
||||
//
|
||||
// HTTP setup
|
||||
//
|
||||
|
||||
router := h.createRouter(grpcGatewayMux)
|
||||
|
||||
httpServer := &http.Server{
|
||||
Addr: h.cfg.Addr,
|
||||
Handler: router,
|
||||
ReadTimeout: HTTPReadTimeout,
|
||||
// Go does not handle timeouts in HTTP very well, and there is
|
||||
// no good way to handle streaming timeouts, therefore we need to
|
||||
// keep this at unlimited and be careful to clean up connections
|
||||
// https://blog.cloudflare.com/the-complete-guide-to-golang-net-http-timeouts/#aboutstreaming
|
||||
WriteTimeout: 0,
|
||||
}
|
||||
|
||||
var httpListener net.Listener
|
||||
if tlsConfig != nil {
|
||||
httpServer.TLSConfig = tlsConfig
|
||||
httpListener, err = tls.Listen("tcp", h.cfg.Addr, tlsConfig)
|
||||
} else {
|
||||
httpListener, err = net.Listen("tcp", h.cfg.Addr)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to bind to TCP address: %w", err)
|
||||
}
|
||||
|
||||
errorGroup.Go(func() error { return httpServer.Serve(httpListener) })
|
||||
|
||||
log.Info().
|
||||
Msgf("listening and serving HTTP on: %s", h.cfg.Addr)
|
||||
|
||||
return errorGroup.Wait()
|
||||
}
|
||||
|
||||
func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
var err error
|
||||
if h.cfg.TLSLetsEncryptHostname != "" {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "https://") {
|
||||
log.Warn().
|
||||
Msg("Listening with TLS but ServerURL does not start with https://")
|
||||
}
|
||||
|
||||
certManager := autocert.Manager{
|
||||
Prompt: autocert.AcceptTOS,
|
||||
HostPolicy: autocert.HostWhitelist(h.cfg.TLSLetsEncryptHostname),
|
||||
Cache: autocert.DirCache(h.cfg.TLSLetsEncryptCacheDir),
|
||||
Client: &acme.Client{
|
||||
DirectoryURL: h.cfg.ACMEURL,
|
||||
},
|
||||
Email: h.cfg.ACMEEmail,
|
||||
}
|
||||
s := &http.Server{
|
||||
Addr: h.cfg.Addr,
|
||||
TLSConfig: m.TLSConfig(),
|
||||
Handler: r,
|
||||
ReadTimeout: timeout,
|
||||
WriteTimeout: timeout,
|
||||
}
|
||||
if h.cfg.TLSLetsEncryptChallengeType == "TLS-ALPN-01" {
|
||||
|
||||
switch h.cfg.TLSLetsEncryptChallengeType {
|
||||
case "TLS-ALPN-01":
|
||||
// Configuration via autocert with TLS-ALPN-01 (https://tools.ietf.org/html/rfc8737)
|
||||
// The RFC requires that the validation is done on port 443; in other words, headscale
|
||||
// must be reachable on port 443.
|
||||
err = s.ListenAndServeTLS("", "")
|
||||
} else if h.cfg.TLSLetsEncryptChallengeType == "HTTP-01" {
|
||||
return certManager.TLSConfig(), nil
|
||||
|
||||
case "HTTP-01":
|
||||
// Configuration via autocert with HTTP-01. This requires listening on
|
||||
// port 80 for the certificate validation in addition to the headscale
|
||||
// service, which can be configured to run on any other port.
|
||||
go func() {
|
||||
|
||||
log.Fatal().
|
||||
Err(http.ListenAndServe(h.cfg.TLSLetsEncryptListen, m.HTTPHandler(http.HandlerFunc(h.redirect)))).
|
||||
Caller().
|
||||
Err(http.ListenAndServe(h.cfg.TLSLetsEncryptListen, certManager.HTTPHandler(http.HandlerFunc(h.redirect)))).
|
||||
Msg("failed to set up a HTTP server")
|
||||
}()
|
||||
err = s.ListenAndServeTLS("", "")
|
||||
} else {
|
||||
return errors.New("unknown value for TLSLetsEncryptChallengeType")
|
||||
|
||||
return certManager.TLSConfig(), nil
|
||||
|
||||
default:
|
||||
return nil, errUnsupportedLetsEncryptChallengeType
|
||||
}
|
||||
} else if h.cfg.TLSCertPath == "" {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "http://") {
|
||||
log.Warn().Msg("Listening without TLS but ServerURL does not start with http://")
|
||||
}
|
||||
err = s.ListenAndServe()
|
||||
|
||||
return nil, err
|
||||
} else {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "https://") {
|
||||
log.Warn().Msg("Listening with TLS but ServerURL does not start with https://")
|
||||
}
|
||||
err = s.ListenAndServeTLS(h.cfg.TLSCertPath, h.cfg.TLSKeyPath)
|
||||
tlsConfig := &tls.Config{
|
||||
ClientAuth: tls.RequireAnyClientCert,
|
||||
NextProtos: []string{"http/1.1"},
|
||||
Certificates: make([]tls.Certificate, 1),
|
||||
MinVersion: tls.VersionTLS12,
|
||||
}
|
||||
tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(h.cfg.TLSCertPath, h.cfg.TLSKeyPath)
|
||||
|
||||
return tlsConfig, err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (h *Headscale) setLastStateChangeToNow(namespace string) {
|
||||
now := time.Now().UTC()
|
||||
lastStateUpdate.WithLabelValues("", "headscale").Set(float64(now.Unix()))
|
||||
h.lastStateChange.Store(namespace, now)
|
||||
}
|
||||
|
||||
func (h *Headscale) getLastStateChange(namespace string) time.Time {
|
||||
if wrapped, ok := h.lastStateChange.Load(namespace); ok {
|
||||
lastChange, _ := wrapped.(time.Time)
|
||||
return lastChange
|
||||
func (h *Headscale) getLastStateChange(namespaces ...string) time.Time {
|
||||
times := []time.Time{}
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
if wrapped, ok := h.lastStateChange.Load(namespace); ok {
|
||||
lastChange, _ := wrapped.(time.Time)
|
||||
|
||||
times = append(times, lastChange)
|
||||
}
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
h.lastStateChange.Store(namespace, now)
|
||||
return now
|
||||
sort.Slice(times, func(i, j int) bool {
|
||||
return times[i].After(times[j])
|
||||
})
|
||||
|
||||
log.Trace().Msgf("Latest times %#v", times)
|
||||
|
||||
if len(times) == 0 {
|
||||
return time.Now().UTC()
|
||||
} else {
|
||||
return times[0]
|
||||
}
|
||||
}
|
||||
|
||||
func stdoutHandler(ctx *gin.Context) {
|
||||
body, _ := io.ReadAll(ctx.Request.Body)
|
||||
|
||||
log.Trace().
|
||||
Interface("header", ctx.Request.Header).
|
||||
Interface("proto", ctx.Request.Proto).
|
||||
Interface("url", ctx.Request.URL).
|
||||
Bytes("body", body).
|
||||
Msg("Request did not match")
|
||||
}
|
||||
|
||||
func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
|
||||
privateKey, err := os.ReadFile(path)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
log.Info().Str("path", path).Msg("No private key file at path, creating...")
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
machineKeyStr, err := machineKey.MarshalText()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to convert private key to string for saving: %w",
|
||||
err,
|
||||
)
|
||||
}
|
||||
err = os.WriteFile(path, machineKeyStr, privateKeyFileMode)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to save private key to disk: %w",
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
return &machineKey, nil
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("failed to read private key file: %w", err)
|
||||
}
|
||||
|
||||
trimmedPrivateKey := strings.TrimSpace(string(privateKey))
|
||||
privateKeyEnsurePrefix := PrivateKeyEnsurePrefix(trimmedPrivateKey)
|
||||
|
||||
var machineKey key.MachinePrivate
|
||||
if err = machineKey.UnmarshalText([]byte(privateKeyEnsurePrefix)); err != nil {
|
||||
log.Info().
|
||||
Str("path", path).
|
||||
Msg("This might be due to a legacy (headscale pre-0.12) private key. " +
|
||||
"If the key is in WireGuard format, delete the key and restart headscale. " +
|
||||
"A new key will automatically be generated. All Tailscale clients will have to be restarted")
|
||||
|
||||
return nil, fmt.Errorf("failed to parse private key: %w", err)
|
||||
}
|
||||
|
||||
return &machineKey, nil
|
||||
}
|
||||
|
||||
23
app_test.go
23
app_test.go
@@ -5,6 +5,7 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"gopkg.in/check.v1"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
@@ -17,8 +18,10 @@ var _ = check.Suite(&Suite{})
|
||||
|
||||
type Suite struct{}
|
||||
|
||||
var tmpDir string
|
||||
var h Headscale
|
||||
var (
|
||||
tmpDir string
|
||||
app Headscale
|
||||
)
|
||||
|
||||
func (s *Suite) SetUpTest(c *check.C) {
|
||||
s.ResetDB(c)
|
||||
@@ -38,21 +41,27 @@ func (s *Suite) ResetDB(c *check.C) {
|
||||
c.Fatal(err)
|
||||
}
|
||||
cfg := Config{
|
||||
IPPrefix: netaddr.MustParseIPPrefix("10.27.0.0/23"),
|
||||
IPPrefixes: []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("10.27.0.0/23"),
|
||||
},
|
||||
}
|
||||
|
||||
h = Headscale{
|
||||
app = Headscale{
|
||||
cfg: cfg,
|
||||
dbType: "sqlite3",
|
||||
dbString: tmpDir + "/headscale_test.db",
|
||||
requestedExpiryCache: cache.New(
|
||||
requestedExpiryCacheExpiration,
|
||||
requestedExpiryCacheCleanupInterval,
|
||||
),
|
||||
}
|
||||
err = h.initDB()
|
||||
err = app.initDB()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
db, err := h.openDB()
|
||||
db, err := app.openDB()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
h.db = db
|
||||
app.db = db
|
||||
}
|
||||
|
||||
266
apple_mobileconfig.go
Normal file
266
apple_mobileconfig.go
Normal file
@@ -0,0 +1,266 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"html/template"
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/gofrs/uuid"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
// AppleMobileConfig shows a simple message in the browser to point to the CLI
|
||||
// Listens in /register.
|
||||
func (h *Headscale) AppleMobileConfig(ctx *gin.Context) {
|
||||
appleTemplate := template.Must(template.New("apple").Parse(`
|
||||
<html>
|
||||
<body>
|
||||
<h1>Apple configuration profiles</h1>
|
||||
<p>
|
||||
This page provides <a href="https://support.apple.com/guide/mdm/mdm-overview-mdmbf9e668/web">configuration profiles</a> for the official Tailscale clients for <a href="https://apps.apple.com/us/app/tailscale/id1470499037?ls=1">iOS</a> and <a href="https://apps.apple.com/ca/app/tailscale/id1475387142?mt=12">macOS</a>.
|
||||
</p>
|
||||
<p>
|
||||
The profiles will configure Tailscale.app to use {{.Url}} as its control server.
|
||||
</p>
|
||||
|
||||
<h3>Caution</h3>
|
||||
<p>You should always inspect the profile before installing it:</p>
|
||||
<!--
|
||||
<p><code>curl {{.Url}}/apple/ios</code></p>
|
||||
-->
|
||||
<p><code>curl {{.Url}}/apple/macos</code></p>
|
||||
|
||||
<h2>Profiles</h2>
|
||||
|
||||
<!--
|
||||
<h3>iOS</h3>
|
||||
<p>
|
||||
<a href="/apple/ios" download="headscale_ios.mobileconfig">iOS profile</a>
|
||||
</p>
|
||||
-->
|
||||
|
||||
<h3>macOS</h3>
|
||||
<p>Headscale can be set to the default server by installing a Headscale configuration profile:</p>
|
||||
<p>
|
||||
<a href="/apple/macos" download="headscale_macos.mobileconfig">macOS profile</a>
|
||||
</p>
|
||||
|
||||
<ol>
|
||||
<li>Download the profile, then open it. When it has been opened, there should be a notification that a profile can be installed</li>
|
||||
<li>Open System Preferences and go to "Profiles"</li>
|
||||
<li>Find and install the Headscale profile</li>
|
||||
<li>Restart Tailscale.app and log in</li>
|
||||
</ol>
|
||||
|
||||
<p>Or</p>
|
||||
<p>Use your terminal to configure the default setting for Tailscale by issuing:</p>
|
||||
<code>defaults write io.tailscale.ipn.macos ControlURL {{.URL}}</code>
|
||||
|
||||
<p>Restart Tailscale.app and log in.</p>
|
||||
|
||||
</body>
|
||||
</html>`))
|
||||
|
||||
config := map[string]interface{}{
|
||||
"URL": h.cfg.ServerURL,
|
||||
}
|
||||
|
||||
var payload bytes.Buffer
|
||||
if err := appleTemplate.Execute(&payload, config); err != nil {
|
||||
log.Error().
|
||||
Str("handler", "AppleMobileConfig").
|
||||
Err(err).
|
||||
Msg("Could not render Apple index template")
|
||||
ctx.Data(
|
||||
http.StatusInternalServerError,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Could not render Apple index template"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Data(http.StatusOK, "text/html; charset=utf-8", payload.Bytes())
|
||||
}
|
||||
|
||||
func (h *Headscale) ApplePlatformConfig(ctx *gin.Context) {
|
||||
platform := ctx.Param("platform")
|
||||
|
||||
id, err := uuid.NewV4()
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "ApplePlatformConfig").
|
||||
Err(err).
|
||||
Msg("Failed not create UUID")
|
||||
ctx.Data(
|
||||
http.StatusInternalServerError,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Failed to create UUID"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
contentID, err := uuid.NewV4()
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "ApplePlatformConfig").
|
||||
Err(err).
|
||||
Msg("Failed not create UUID")
|
||||
ctx.Data(
|
||||
http.StatusInternalServerError,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Failed to create UUID"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
platformConfig := AppleMobilePlatformConfig{
|
||||
UUID: contentID,
|
||||
URL: h.cfg.ServerURL,
|
||||
}
|
||||
|
||||
var payload bytes.Buffer
|
||||
|
||||
switch platform {
|
||||
case "macos":
|
||||
if err := macosTemplate.Execute(&payload, platformConfig); err != nil {
|
||||
log.Error().
|
||||
Str("handler", "ApplePlatformConfig").
|
||||
Err(err).
|
||||
Msg("Could not render Apple macOS template")
|
||||
ctx.Data(
|
||||
http.StatusInternalServerError,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Could not render Apple macOS template"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
case "ios":
|
||||
if err := iosTemplate.Execute(&payload, platformConfig); err != nil {
|
||||
log.Error().
|
||||
Str("handler", "ApplePlatformConfig").
|
||||
Err(err).
|
||||
Msg("Could not render Apple iOS template")
|
||||
ctx.Data(
|
||||
http.StatusInternalServerError,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Could not render Apple iOS template"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
default:
|
||||
ctx.Data(
|
||||
http.StatusOK,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Invalid platform, only ios and macos is supported"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
config := AppleMobileConfig{
|
||||
UUID: id,
|
||||
URL: h.cfg.ServerURL,
|
||||
Payload: payload.String(),
|
||||
}
|
||||
|
||||
var content bytes.Buffer
|
||||
if err := commonTemplate.Execute(&content, config); err != nil {
|
||||
log.Error().
|
||||
Str("handler", "ApplePlatformConfig").
|
||||
Err(err).
|
||||
Msg("Could not render Apple platform template")
|
||||
ctx.Data(
|
||||
http.StatusInternalServerError,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Could not render Apple platform template"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Data(
|
||||
http.StatusOK,
|
||||
"application/x-apple-aspen-config; charset=utf-8",
|
||||
content.Bytes(),
|
||||
)
|
||||
}
|
||||
|
||||
type AppleMobileConfig struct {
|
||||
UUID uuid.UUID
|
||||
URL string
|
||||
Payload string
|
||||
}
|
||||
|
||||
type AppleMobilePlatformConfig struct {
|
||||
UUID uuid.UUID
|
||||
URL string
|
||||
}
|
||||
|
||||
var commonTemplate = template.Must(
|
||||
template.New("mobileconfig").Parse(`<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>PayloadUUID</key>
|
||||
<string>{{.UUID}}</string>
|
||||
<key>PayloadDisplayName</key>
|
||||
<string>Headscale</string>
|
||||
<key>PayloadDescription</key>
|
||||
<string>Configure Tailscale login server to: {{.URL}}</string>
|
||||
<key>PayloadIdentifier</key>
|
||||
<string>com.github.juanfont.headscale</string>
|
||||
<key>PayloadRemovalDisallowed</key>
|
||||
<false/>
|
||||
<key>PayloadType</key>
|
||||
<string>Configuration</string>
|
||||
<key>PayloadVersion</key>
|
||||
<integer>1</integer>
|
||||
<key>PayloadContent</key>
|
||||
<array>
|
||||
{{.Payload}}
|
||||
</array>
|
||||
</dict>
|
||||
</plist>`),
|
||||
)
|
||||
|
||||
var iosTemplate = template.Must(template.New("iosTemplate").Parse(`
|
||||
<dict>
|
||||
<key>PayloadType</key>
|
||||
<string>io.tailscale.ipn.ios</string>
|
||||
<key>PayloadUUID</key>
|
||||
<string>{{.UUID}}</string>
|
||||
<key>PayloadIdentifier</key>
|
||||
<string>com.github.juanfont.headscale</string>
|
||||
<key>PayloadVersion</key>
|
||||
<integer>1</integer>
|
||||
<key>PayloadEnabled</key>
|
||||
<true/>
|
||||
|
||||
<key>ControlURL</key>
|
||||
<string>{{.URL}}</string>
|
||||
</dict>
|
||||
`))
|
||||
|
||||
var macosTemplate = template.Must(template.New("macosTemplate").Parse(`
|
||||
<dict>
|
||||
<key>PayloadType</key>
|
||||
<string>io.tailscale.ipn.macos</string>
|
||||
<key>PayloadUUID</key>
|
||||
<string>{{.UUID}}</string>
|
||||
<key>PayloadIdentifier</key>
|
||||
<string>com.github.juanfont.headscale</string>
|
||||
<key>PayloadVersion</key>
|
||||
<integer>1</integer>
|
||||
<key>PayloadEnabled</key>
|
||||
<true/>
|
||||
|
||||
<key>ControlURL</key>
|
||||
<string>{{.URL}}</string>
|
||||
</dict>
|
||||
`))
|
||||
21
buf.gen.yaml
Normal file
21
buf.gen.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
version: v1
|
||||
plugins:
|
||||
- name: go
|
||||
out: gen/go
|
||||
opt:
|
||||
- paths=source_relative
|
||||
- name: go-grpc
|
||||
out: gen/go
|
||||
opt:
|
||||
- paths=source_relative
|
||||
- name: grpc-gateway
|
||||
out: gen/go
|
||||
opt:
|
||||
- paths=source_relative
|
||||
- generate_unbound_methods=true
|
||||
# - name: gorm
|
||||
# out: gen/go
|
||||
# opt:
|
||||
# - paths=source_relative,enums=string,gateway=true
|
||||
- name: openapiv2
|
||||
out: gen/openapiv2
|
||||
40
cli.go
40
cli.go
@@ -1,40 +0,0 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/types/wgkey"
|
||||
)
|
||||
|
||||
// RegisterMachine is executed from the CLI to register a new Machine using its MachineKey
|
||||
func (h *Headscale) RegisterMachine(key string, namespace string) (*Machine, error) {
|
||||
ns, err := h.GetNamespace(namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mKey, err := wgkey.ParseHex(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := Machine{}
|
||||
if result := h.db.First(&m, "machine_key = ?", mKey.HexString()); errors.Is(result.Error, gorm.ErrRecordNotFound) {
|
||||
return nil, errors.New("Machine not found")
|
||||
}
|
||||
|
||||
if m.isAlreadyRegistered() {
|
||||
return nil, errors.New("Machine already registered")
|
||||
}
|
||||
|
||||
ip, err := h.getAvailableIP()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.IPAddress = ip.String()
|
||||
m.NamespaceID = ns.ID
|
||||
m.Registered = true
|
||||
m.RegisterMethod = "cli"
|
||||
h.db.Save(&m)
|
||||
return &m, nil
|
||||
}
|
||||
30
cli_test.go
30
cli_test.go
@@ -1,31 +1,41 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
|
||||
func (s *Suite) TestRegisterMachine(c *check.C) {
|
||||
n, err := h.CreateNamespace("test")
|
||||
namespace, err := app.CreateNamespace("test")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
m := Machine{
|
||||
now := time.Now().UTC()
|
||||
|
||||
machine := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "8ce002a935f8c394e55e78fbbb410576575ff8ec5cfa2e627e4b807f1be15b0e",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
NamespaceID: n.ID,
|
||||
IPAddress: "10.0.0.1",
|
||||
NamespaceID: namespace.ID,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("10.0.0.1")},
|
||||
Expiry: &now,
|
||||
}
|
||||
h.db.Save(&m)
|
||||
|
||||
_, err = h.GetMachine("test", "testmachine")
|
||||
err = app.db.Save(&machine).Error
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
m2, err := h.RegisterMachine("8ce002a935f8c394e55e78fbbb410576575ff8ec5cfa2e627e4b807f1be15b0e", n.Name)
|
||||
_, err = app.GetMachine(namespace.Name, machine.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(m2.Registered, check.Equals, true)
|
||||
|
||||
_, err = m2.GetHostInfo()
|
||||
machineAfterRegistering, err := app.RegisterMachine(
|
||||
machine.MachineKey,
|
||||
namespace.Name,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(machineAfterRegistering.Registered, check.Equals, true)
|
||||
|
||||
_, err = machineAfterRegistering.GetHostInfo()
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
183
cmd/headscale/cli/api_key.go
Normal file
183
cmd/headscale/cli/api_key.go
Normal file
@@ -0,0 +1,183 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const (
|
||||
// 90 days.
|
||||
DefaultAPIKeyExpiry = 90 * 24 * time.Hour
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(apiKeysCmd)
|
||||
apiKeysCmd.AddCommand(listAPIKeys)
|
||||
|
||||
createAPIKeyCmd.Flags().
|
||||
DurationP("expiration", "e", DefaultAPIKeyExpiry, "Human-readable expiration of the key (30m, 24h, 365d...)")
|
||||
|
||||
apiKeysCmd.AddCommand(createAPIKeyCmd)
|
||||
|
||||
expireAPIKeyCmd.Flags().StringP("prefix", "p", "", "ApiKey prefix")
|
||||
err := expireAPIKeyCmd.MarkFlagRequired("prefix")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
apiKeysCmd.AddCommand(expireAPIKeyCmd)
|
||||
}
|
||||
|
||||
var apiKeysCmd = &cobra.Command{
|
||||
Use: "apikeys",
|
||||
Short: "Handle the Api keys in Headscale",
|
||||
}
|
||||
|
||||
var listAPIKeys = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List the Api keys for headscale",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ListApiKeysRequest{}
|
||||
|
||||
response, err := client.ListApiKeys(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting the list of keys: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.ApiKeys, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
tableData := pterm.TableData{
|
||||
{"ID", "Prefix", "Expiration", "Created"},
|
||||
}
|
||||
for _, key := range response.ApiKeys {
|
||||
expiration := "-"
|
||||
|
||||
if key.GetExpiration() != nil {
|
||||
expiration = ColourTime(key.Expiration.AsTime())
|
||||
}
|
||||
|
||||
tableData = append(tableData, []string{
|
||||
strconv.FormatUint(key.GetId(), headscale.Base10),
|
||||
key.GetPrefix(),
|
||||
expiration,
|
||||
key.GetCreatedAt().AsTime().Format(HeadscaleDateTimeFormat),
|
||||
})
|
||||
|
||||
}
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var createAPIKeyCmd = &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Creates a new Api key",
|
||||
Long: `
|
||||
Creates a new Api key, the Api key is only visible on creation
|
||||
and cannot be retrieved again.
|
||||
If you loose a key, create a new one and revoke (expire) the old one.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
log.Trace().
|
||||
Msg("Preparing to create ApiKey")
|
||||
|
||||
request := &v1.CreateApiKeyRequest{}
|
||||
|
||||
duration, _ := cmd.Flags().GetDuration("expiration")
|
||||
expiration := time.Now().UTC().Add(duration)
|
||||
|
||||
log.Trace().Dur("expiration", duration).Msg("expiration has been set")
|
||||
|
||||
request.Expiration = timestamppb.New(expiration)
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
response, err := client.CreateApiKey(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot create Api Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.ApiKey, response.ApiKey, output)
|
||||
},
|
||||
}
|
||||
|
||||
var expireAPIKeyCmd = &cobra.Command{
|
||||
Use: "expire",
|
||||
Short: "Expire an ApiKey",
|
||||
Aliases: []string{"revoke"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
prefix, err := cmd.Flags().GetString("prefix")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting prefix from CLI flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ExpireApiKeyRequest{
|
||||
Prefix: prefix,
|
||||
}
|
||||
|
||||
response, err := client.ExpireApiKey(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot expire Api Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response, "Key expired", output)
|
||||
},
|
||||
}
|
||||
132
cmd/headscale/cli/debug.go
Normal file
132
cmd/headscale/cli/debug.go
Normal file
@@ -0,0 +1,132 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
keyLength = 64
|
||||
errPreAuthKeyTooShort = Error("key too short, must be 64 hexadecimal characters")
|
||||
)
|
||||
|
||||
// Error is used to compare errors as per https://dave.cheney.net/2016/04/07/constant-errors
|
||||
type Error string
|
||||
|
||||
func (e Error) Error() string { return string(e) }
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(debugCmd)
|
||||
|
||||
createNodeCmd.Flags().StringP("name", "", "", "Name")
|
||||
err := createNodeCmd.MarkFlagRequired("name")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
createNodeCmd.Flags().StringP("namespace", "n", "", "Namespace")
|
||||
err = createNodeCmd.MarkFlagRequired("namespace")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
createNodeCmd.Flags().StringP("key", "k", "", "Key")
|
||||
err = createNodeCmd.MarkFlagRequired("key")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
createNodeCmd.Flags().
|
||||
StringSliceP("route", "r", []string{}, "List (or repeated flags) of routes to advertise")
|
||||
|
||||
debugCmd.AddCommand(createNodeCmd)
|
||||
}
|
||||
|
||||
var debugCmd = &cobra.Command{
|
||||
Use: "debug",
|
||||
Short: "debug and testing commands",
|
||||
Long: "debug contains extra commands used for debugging and testing headscale",
|
||||
}
|
||||
|
||||
var createNodeCmd = &cobra.Command{
|
||||
Use: "create-node",
|
||||
Short: "Create a node (machine) that can be registered with `nodes register <>` command",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
namespace, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
name, err := cmd.Flags().GetString("name")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting node from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
machineKey, err := cmd.Flags().GetString("key")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting key from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
if len(machineKey) != keyLength {
|
||||
err = errPreAuthKeyTooShort
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
routes, err := cmd.Flags().GetStringSlice("route")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting routes from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
request := &v1.DebugCreateMachineRequest{
|
||||
Key: machineKey,
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Routes: routes,
|
||||
}
|
||||
|
||||
response, err := client.DebugCreateMachine(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot create machine: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Machine created", output)
|
||||
},
|
||||
}
|
||||
41
cmd/headscale/cli/generate.go
Normal file
41
cmd/headscale/cli/generate.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(generateCmd)
|
||||
generateCmd.AddCommand(generatePrivateKeyCmd)
|
||||
}
|
||||
|
||||
var generateCmd = &cobra.Command{
|
||||
Use: "generate",
|
||||
Short: "Generate commands",
|
||||
}
|
||||
|
||||
var generatePrivateKeyCmd = &cobra.Command{
|
||||
Use: "private-key",
|
||||
Short: "Generate a private key for the headscale server",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
machineKeyStr, err := machineKey.MarshalText()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine key from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
SuccessOutput(map[string]string{
|
||||
"private_key": string(machineKeyStr),
|
||||
},
|
||||
string(machineKeyStr), output)
|
||||
},
|
||||
}
|
||||
@@ -2,12 +2,14 @@ package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
survey "github.com/AlecAivazis/survey/v2"
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -15,8 +17,13 @@ func init() {
|
||||
namespaceCmd.AddCommand(createNamespaceCmd)
|
||||
namespaceCmd.AddCommand(listNamespacesCmd)
|
||||
namespaceCmd.AddCommand(destroyNamespaceCmd)
|
||||
namespaceCmd.AddCommand(renameNamespaceCmd)
|
||||
}
|
||||
|
||||
const (
|
||||
errMissingParameter = headscale.Error("missing parameters")
|
||||
)
|
||||
|
||||
var namespaceCmd = &cobra.Command{
|
||||
Use: "namespaces",
|
||||
Short: "Manage the namespaces of Headscale",
|
||||
@@ -27,26 +34,40 @@ var createNamespaceCmd = &cobra.Command{
|
||||
Short: "Creates a new namespace",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("Missing parameters")
|
||||
return errMissingParameter
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
h, err := getHeadscaleApp()
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
namespaceName := args[0]
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
log.Trace().Interface("client", client).Msg("Obtained gRPC client")
|
||||
|
||||
request := &v1.CreateNamespaceRequest{Name: namespaceName}
|
||||
|
||||
log.Trace().Interface("request", request).Msg("Sending CreateNamespace request")
|
||||
response, err := client.CreateNamespace(ctx, request)
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
namespace, err := h.CreateNamespace(args[0])
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(namespace, err, o)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot create namespace: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating namespace: %s\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("Namespace created\n")
|
||||
|
||||
SuccessOutput(response.Namespace, "Namespace created", output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -55,26 +76,70 @@ var destroyNamespaceCmd = &cobra.Command{
|
||||
Short: "Destroys a namespace",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("Missing parameters")
|
||||
return errMissingParameter
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
h, err := getHeadscaleApp()
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
namespaceName := args[0]
|
||||
|
||||
request := &v1.GetNamespaceRequest{
|
||||
Name: namespaceName,
|
||||
}
|
||||
err = h.DestroyNamespace(args[0])
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(map[string]string{"Result": "Namespace destroyed"}, err, o)
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
_, err := client.GetNamespace(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("Error destroying namespace: %s\n", err)
|
||||
return
|
||||
|
||||
confirm := false
|
||||
force, _ := cmd.Flags().GetBool("force")
|
||||
if !force {
|
||||
prompt := &survey.Confirm{
|
||||
Message: fmt.Sprintf(
|
||||
"Do you want to remove the namespace '%s' and any associated preauthkeys?",
|
||||
namespaceName,
|
||||
),
|
||||
}
|
||||
err := survey.AskOne(prompt, &confirm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if confirm || force {
|
||||
request := &v1.DeleteNamespaceRequest{Name: namespaceName}
|
||||
|
||||
response, err := client.DeleteNamespace(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot destroy namespace: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
SuccessOutput(response, "Namespace destroyed", output)
|
||||
} else {
|
||||
SuccessOutput(map[string]string{"Result": "Namespace not destroyed"}, "Namespace not destroyed", output)
|
||||
}
|
||||
fmt.Printf("Namespace destroyed\n")
|
||||
},
|
||||
}
|
||||
|
||||
@@ -82,28 +147,92 @@ var listNamespacesCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all the namespaces",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
h, err := getHeadscaleApp()
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ListNamespacesRequest{}
|
||||
|
||||
response, err := client.ListNamespaces(ctx, request)
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
namespaces, err := h.ListNamespaces()
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(namespaces, err, o)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot get namespaces: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
d := pterm.TableData{{"ID", "Name", "Created"}}
|
||||
for _, n := range *namespaces {
|
||||
d = append(d, []string{strconv.FormatUint(uint64(n.ID), 10), n.Name, n.CreatedAt.Format("2006-01-02 15:04:05")})
|
||||
if output != "" {
|
||||
SuccessOutput(response.Namespaces, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(d).Render()
|
||||
|
||||
tableData := pterm.TableData{{"ID", "Name", "Created"}}
|
||||
for _, namespace := range response.GetNamespaces() {
|
||||
tableData = append(
|
||||
tableData,
|
||||
[]string{
|
||||
namespace.GetId(),
|
||||
namespace.GetName(),
|
||||
namespace.GetCreatedAt().AsTime().Format("2006-01-02 15:04:05"),
|
||||
},
|
||||
)
|
||||
}
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var renameNamespaceCmd = &cobra.Command{
|
||||
Use: "rename OLD_NAME NEW_NAME",
|
||||
Short: "Renames a namespace",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
expectedArguments := 2
|
||||
if len(args) < expectedArguments {
|
||||
return errMissingParameter
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.RenameNamespaceRequest{
|
||||
OldName: args[0],
|
||||
NewName: args[1],
|
||||
}
|
||||
|
||||
response, err := client.RenameNamespace(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot rename namespace: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Namespace, "Namespace renamed", output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -9,23 +9,67 @@ import (
|
||||
|
||||
survey "github.com/AlecAivazis/survey/v2"
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/spf13/cobra"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/wgkey"
|
||||
"google.golang.org/grpc/status"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(nodeCmd)
|
||||
nodeCmd.PersistentFlags().StringP("namespace", "n", "", "Namespace")
|
||||
err := nodeCmd.MarkPersistentFlagRequired("namespace")
|
||||
listNodesCmd.Flags().StringP("namespace", "n", "", "Filter by namespace")
|
||||
nodeCmd.AddCommand(listNodesCmd)
|
||||
|
||||
registerNodeCmd.Flags().StringP("namespace", "n", "", "Namespace")
|
||||
err := registerNodeCmd.MarkFlagRequired("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
registerNodeCmd.Flags().StringP("key", "k", "", "Key")
|
||||
err = registerNodeCmd.MarkFlagRequired("key")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(listNodesCmd)
|
||||
nodeCmd.AddCommand(registerNodeCmd)
|
||||
|
||||
expireNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = expireNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(expireNodeCmd)
|
||||
|
||||
deleteNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = deleteNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(deleteNodeCmd)
|
||||
|
||||
shareMachineCmd.Flags().StringP("namespace", "n", "", "Namespace")
|
||||
err = shareMachineCmd.MarkFlagRequired("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
shareMachineCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = shareMachineCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(shareMachineCmd)
|
||||
|
||||
unshareMachineCmd.Flags().StringP("namespace", "n", "", "Namespace")
|
||||
err = unshareMachineCmd.MarkFlagRequired("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
unshareMachineCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = unshareMachineCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(unshareMachineCmd)
|
||||
}
|
||||
|
||||
var nodeCmd = &cobra.Command{
|
||||
@@ -34,223 +78,460 @@ var nodeCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
var registerNodeCmd = &cobra.Command{
|
||||
Use: "register machineID",
|
||||
Use: "register",
|
||||
Short: "Registers a machine to your network",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("missing parameters")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
n, err := cmd.Flags().GetString("namespace")
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
namespace, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
}
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
|
||||
|
||||
h, err := getHeadscaleApp()
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
m, err := h.RegisterMachine(args[0], n)
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(m, err, o)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
machineKey, err := cmd.Flags().GetString("key")
|
||||
if err != nil {
|
||||
fmt.Printf("Cannot register machine: %s\n", err)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine key from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
fmt.Printf("Machine registered\n")
|
||||
|
||||
request := &v1.RegisterMachineRequest{
|
||||
Key: machineKey,
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
response, err := client.RegisterMachine(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot register machine: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Machine register", output)
|
||||
},
|
||||
}
|
||||
|
||||
var listNodesCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List the nodes in a given namespace",
|
||||
Short: "List nodes",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
n, err := cmd.Flags().GetString("namespace")
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
namespace, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
}
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
|
||||
|
||||
h, err := getHeadscaleApp()
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
|
||||
namespace, err := h.GetNamespace(n)
|
||||
if err != nil {
|
||||
log.Fatalf("Error fetching namespace: %s", err)
|
||||
}
|
||||
|
||||
machines, err := h.ListMachinesInNamespace(n)
|
||||
if err != nil {
|
||||
log.Fatalf("Error fetching machines: %s", err)
|
||||
}
|
||||
|
||||
sharedMachines, err := h.ListSharedMachinesInNamespace(n)
|
||||
if err != nil {
|
||||
log.Fatalf("Error fetching shared machines: %s", err)
|
||||
}
|
||||
|
||||
allMachines := append(*machines, *sharedMachines...)
|
||||
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(allMachines, err, o)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting nodes: %s", err)
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ListMachinesRequest{
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
d, err := nodesToPtables(*namespace, allMachines)
|
||||
response, err := client.ListMachines(ctx, request)
|
||||
if err != nil {
|
||||
log.Fatalf("Error converting to table: %s", err)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot get nodes: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(d).Render()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
if output != "" {
|
||||
SuccessOutput(response.Machines, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
tableData, err := nodesToPtables(namespace, response.Machines)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var expireNodeCmd = &cobra.Command{
|
||||
Use: "expire",
|
||||
Short: "Expire (log out) a machine in your network",
|
||||
Long: "Expiring a node will keep the node in the database and force it to reauthenticate.",
|
||||
Aliases: []string{"logout"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ExpireMachineRequest{
|
||||
MachineId: identifier,
|
||||
}
|
||||
|
||||
response, err := client.ExpireMachine(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot expire machine: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Machine expired", output)
|
||||
},
|
||||
}
|
||||
|
||||
var deleteNodeCmd = &cobra.Command{
|
||||
Use: "delete ID",
|
||||
Use: "delete",
|
||||
Short: "Delete a node",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("missing parameters")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
h, err := getHeadscaleApp()
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
id, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Error converting ID to integer: %s", err)
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
getRequest := &v1.GetMachineRequest{
|
||||
MachineId: identifier,
|
||||
}
|
||||
m, err := h.GetMachineByID(uint64(id))
|
||||
|
||||
getResponse, err := client.GetMachine(ctx, getRequest)
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting node: %s", err)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Error getting node node: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
deleteRequest := &v1.DeleteMachineRequest{
|
||||
MachineId: identifier,
|
||||
}
|
||||
|
||||
confirm := false
|
||||
prompt := &survey.Confirm{
|
||||
Message: fmt.Sprintf("Do you want to remove the node %s?", m.Name),
|
||||
}
|
||||
err = survey.AskOne(prompt, &confirm)
|
||||
if err != nil {
|
||||
return
|
||||
force, _ := cmd.Flags().GetBool("force")
|
||||
if !force {
|
||||
prompt := &survey.Confirm{
|
||||
Message: fmt.Sprintf(
|
||||
"Do you want to remove the node %s?",
|
||||
getResponse.GetMachine().Name,
|
||||
),
|
||||
}
|
||||
err = survey.AskOne(prompt, &confirm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if confirm {
|
||||
err = h.DeleteMachine(m)
|
||||
if err != nil {
|
||||
log.Fatalf("Error deleting node: %s", err)
|
||||
if confirm || force {
|
||||
response, err := client.DeleteMachine(ctx, deleteRequest)
|
||||
if output != "" {
|
||||
SuccessOutput(response, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
fmt.Printf("Node deleted\n")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Error deleting node: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
SuccessOutput(
|
||||
map[string]string{"Result": "Node deleted"},
|
||||
"Node deleted",
|
||||
output,
|
||||
)
|
||||
} else {
|
||||
fmt.Printf("Node not deleted\n")
|
||||
SuccessOutput(map[string]string{"Result": "Node not deleted"}, "Node not deleted", output)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func sharingWorker(
|
||||
cmd *cobra.Command,
|
||||
) (string, *v1.Machine, *v1.Namespace, error) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
namespaceStr, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
|
||||
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting ID to integer: %s", err), output)
|
||||
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
machineRequest := &v1.GetMachineRequest{
|
||||
MachineId: identifier,
|
||||
}
|
||||
|
||||
machineResponse, err := client.GetMachine(ctx, machineRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting node node: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
namespaceRequest := &v1.GetNamespaceRequest{
|
||||
Name: namespaceStr,
|
||||
}
|
||||
|
||||
namespaceResponse, err := client.GetNamespace(ctx, namespaceRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting node node: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
return output, machineResponse.GetMachine(), namespaceResponse.GetNamespace(), nil
|
||||
}
|
||||
|
||||
var shareMachineCmd = &cobra.Command{
|
||||
Use: "share ID namespace",
|
||||
Use: "share",
|
||||
Short: "Shares a node from the current namespace to the specified one",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 2 {
|
||||
return fmt.Errorf("missing parameters")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
namespace, err := cmd.Flags().GetString("namespace")
|
||||
output, machine, namespace, err := sharingWorker(cmd)
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
}
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to fetch namespace or machine: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
h, err := getHeadscaleApp()
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
|
||||
_, err = h.GetNamespace(namespace)
|
||||
if err != nil {
|
||||
log.Fatalf("Error fetching origin namespace: %s", err)
|
||||
}
|
||||
|
||||
destinationNamespace, err := h.GetNamespace(args[1])
|
||||
if err != nil {
|
||||
log.Fatalf("Error fetching destination namespace: %s", err)
|
||||
}
|
||||
|
||||
id, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Error converting ID to integer: %s", err)
|
||||
}
|
||||
machine, err := h.GetMachineByID(uint64(id))
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting node: %s", err)
|
||||
}
|
||||
|
||||
err = h.AddSharedMachineToNamespace(machine, destinationNamespace)
|
||||
if strings.HasPrefix(output, "json") {
|
||||
JsonOutput(map[string]string{"Result": "Node shared"}, err, output)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("Error sharing node: %s\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("Node shared!")
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ShareMachineRequest{
|
||||
MachineId: machine.Id,
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
|
||||
response, err := client.ShareMachine(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error sharing node: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Node shared", output)
|
||||
},
|
||||
}
|
||||
|
||||
func nodesToPtables(currentNamespace headscale.Namespace, machines []headscale.Machine) (pterm.TableData, error) {
|
||||
d := pterm.TableData{{"ID", "Name", "NodeKey", "Namespace", "IP address", "Ephemeral", "Last seen", "Online"}}
|
||||
var unshareMachineCmd = &cobra.Command{
|
||||
Use: "unshare",
|
||||
Short: "Unshares a node from the specified namespace",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, machine, namespace, err := sharingWorker(cmd)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to fetch namespace or machine: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.UnshareMachineRequest{
|
||||
MachineId: machine.Id,
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
|
||||
response, err := client.UnshareMachine(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error unsharing node: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Node unshared", output)
|
||||
},
|
||||
}
|
||||
|
||||
func nodesToPtables(
|
||||
currentNamespace string,
|
||||
machines []*v1.Machine,
|
||||
) (pterm.TableData, error) {
|
||||
tableData := pterm.TableData{
|
||||
{
|
||||
"ID",
|
||||
"Name",
|
||||
"NodeKey",
|
||||
"Namespace",
|
||||
"IP addresses",
|
||||
"Ephemeral",
|
||||
"Last seen",
|
||||
"Online",
|
||||
"Expired",
|
||||
},
|
||||
}
|
||||
|
||||
for _, machine := range machines {
|
||||
var ephemeral bool
|
||||
if machine.AuthKey != nil && machine.AuthKey.Ephemeral {
|
||||
if machine.PreAuthKey != nil && machine.PreAuthKey.Ephemeral {
|
||||
ephemeral = true
|
||||
}
|
||||
|
||||
var lastSeen time.Time
|
||||
var lastSeenTime string
|
||||
if machine.LastSeen != nil {
|
||||
lastSeen = *machine.LastSeen
|
||||
lastSeen = machine.LastSeen.AsTime()
|
||||
lastSeenTime = lastSeen.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
nKey, err := wgkey.ParseHex(machine.NodeKey)
|
||||
|
||||
var expiry time.Time
|
||||
if machine.Expiry != nil {
|
||||
expiry = machine.Expiry.AsTime()
|
||||
}
|
||||
|
||||
var nodeKey key.NodePublic
|
||||
err := nodeKey.UnmarshalText(
|
||||
[]byte(headscale.NodePublicKeyEnsurePrefix(machine.NodeKey)),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodeKey := tailcfg.NodeKey(nKey)
|
||||
|
||||
var online string
|
||||
if lastSeen.After(time.Now().Add(-5 * time.Minute)) { // TODO: Find a better way to reliably show if online
|
||||
online = pterm.LightGreen("true")
|
||||
if lastSeen.After(
|
||||
time.Now().Add(-5 * time.Minute),
|
||||
) { // TODO: Find a better way to reliably show if online
|
||||
online = pterm.LightGreen("online")
|
||||
} else {
|
||||
online = pterm.LightRed("false")
|
||||
online = pterm.LightRed("offline")
|
||||
}
|
||||
|
||||
var expired string
|
||||
if expiry.IsZero() || expiry.After(time.Now()) {
|
||||
expired = pterm.LightGreen("no")
|
||||
} else {
|
||||
expired = pterm.LightRed("yes")
|
||||
}
|
||||
|
||||
var namespace string
|
||||
if currentNamespace.ID == machine.NamespaceID {
|
||||
if currentNamespace == "" || (currentNamespace == machine.Namespace.Name) {
|
||||
namespace = pterm.LightMagenta(machine.Namespace.Name)
|
||||
} else {
|
||||
// Shared into this namespace
|
||||
namespace = pterm.LightYellow(machine.Namespace.Name)
|
||||
}
|
||||
d = append(d, []string{strconv.FormatUint(machine.ID, 10), machine.Name, nodeKey.ShortString(), namespace, machine.IPAddress, strconv.FormatBool(ephemeral), lastSeenTime, online})
|
||||
tableData = append(
|
||||
tableData,
|
||||
[]string{
|
||||
strconv.FormatUint(machine.Id, headscale.Base10),
|
||||
machine.Name,
|
||||
nodeKey.ShortString(),
|
||||
namespace,
|
||||
strings.Join(machine.IpAddresses, ", "),
|
||||
strconv.FormatBool(ephemeral),
|
||||
lastSeenTime,
|
||||
online,
|
||||
expired,
|
||||
},
|
||||
)
|
||||
}
|
||||
return d, nil
|
||||
|
||||
return tableData, nil
|
||||
}
|
||||
|
||||
@@ -2,14 +2,18 @@ package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hako/durafmt"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultPreAuthKeyExpiry = 1 * time.Hour
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -17,14 +21,17 @@ func init() {
|
||||
preauthkeysCmd.PersistentFlags().StringP("namespace", "n", "", "Namespace")
|
||||
err := preauthkeysCmd.MarkPersistentFlagRequired("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
preauthkeysCmd.AddCommand(listPreAuthKeys)
|
||||
preauthkeysCmd.AddCommand(createPreAuthKeyCmd)
|
||||
preauthkeysCmd.AddCommand(expirePreAuthKeyCmd)
|
||||
createPreAuthKeyCmd.PersistentFlags().Bool("reusable", false, "Make the preauthkey reusable")
|
||||
createPreAuthKeyCmd.PersistentFlags().Bool("ephemeral", false, "Preauthkey for ephemeral nodes")
|
||||
createPreAuthKeyCmd.Flags().StringP("expiration", "e", "", "Human-readable expiration of the key (30m, 24h, 365d...)")
|
||||
createPreAuthKeyCmd.PersistentFlags().
|
||||
Bool("reusable", false, "Make the preauthkey reusable")
|
||||
createPreAuthKeyCmd.PersistentFlags().
|
||||
Bool("ephemeral", false, "Preauthkey for ephemeral nodes")
|
||||
createPreAuthKeyCmd.Flags().
|
||||
DurationP("expiration", "e", DefaultPreAuthKeyExpiry, "Human-readable expiration of the key (30m, 24h, 365d...)")
|
||||
}
|
||||
|
||||
var preauthkeysCmd = &cobra.Command{
|
||||
@@ -36,54 +43,76 @@ var listPreAuthKeys = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List the preauthkeys for this namespace",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
n, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
}
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
h, err := getHeadscaleApp()
|
||||
namespace, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
keys, err := h.GetPreAuthKeys(n)
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(keys, err, o)
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ListPreAuthKeysRequest{
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
response, err := client.ListPreAuthKeys(ctx, request)
|
||||
if err != nil {
|
||||
fmt.Printf("Error getting the list of keys: %s\n", err)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting the list of keys: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
d := pterm.TableData{{"ID", "Key", "Reusable", "Ephemeral", "Expiration", "Created"}}
|
||||
for _, k := range *keys {
|
||||
if output != "" {
|
||||
SuccessOutput(response.PreAuthKeys, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
tableData := pterm.TableData{
|
||||
{"ID", "Key", "Reusable", "Ephemeral", "Used", "Expiration", "Created"},
|
||||
}
|
||||
for _, key := range response.PreAuthKeys {
|
||||
expiration := "-"
|
||||
if k.Expiration != nil {
|
||||
expiration = k.Expiration.Format("2006-01-02 15:04:05")
|
||||
if key.GetExpiration() != nil {
|
||||
expiration = ColourTime(key.Expiration.AsTime())
|
||||
}
|
||||
|
||||
var reusable string
|
||||
if k.Ephemeral {
|
||||
if key.GetEphemeral() {
|
||||
reusable = "N/A"
|
||||
} else {
|
||||
reusable = fmt.Sprintf("%v", k.Reusable)
|
||||
reusable = fmt.Sprintf("%v", key.GetReusable())
|
||||
}
|
||||
|
||||
d = append(d, []string{
|
||||
strconv.FormatUint(k.ID, 10),
|
||||
k.Key,
|
||||
tableData = append(tableData, []string{
|
||||
key.GetId(),
|
||||
key.GetKey(),
|
||||
reusable,
|
||||
strconv.FormatBool(k.Ephemeral),
|
||||
strconv.FormatBool(key.GetEphemeral()),
|
||||
strconv.FormatBool(key.GetUsed()),
|
||||
expiration,
|
||||
k.CreatedAt.Format("2006-01-02 15:04:05"),
|
||||
key.GetCreatedAt().AsTime().Format("2006-01-02 15:04:05"),
|
||||
})
|
||||
|
||||
}
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(d).Render()
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -92,78 +121,95 @@ var createPreAuthKeyCmd = &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Creates a new preauthkey in the specified namespace",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
n, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
}
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
h, err := getHeadscaleApp()
|
||||
namespace, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
reusable, _ := cmd.Flags().GetBool("reusable")
|
||||
ephemeral, _ := cmd.Flags().GetBool("ephemeral")
|
||||
|
||||
e, _ := cmd.Flags().GetString("expiration")
|
||||
var expiration *time.Time
|
||||
if e != "" {
|
||||
duration, err := durafmt.ParseStringShort(e)
|
||||
if err != nil {
|
||||
log.Fatalf("Error parsing expiration: %s", err)
|
||||
}
|
||||
exp := time.Now().UTC().Add(duration.Duration())
|
||||
expiration = &exp
|
||||
log.Trace().
|
||||
Bool("reusable", reusable).
|
||||
Bool("ephemeral", ephemeral).
|
||||
Str("namespace", namespace).
|
||||
Msg("Preparing to create preauthkey")
|
||||
|
||||
request := &v1.CreatePreAuthKeyRequest{
|
||||
Namespace: namespace,
|
||||
Reusable: reusable,
|
||||
Ephemeral: ephemeral,
|
||||
}
|
||||
|
||||
k, err := h.CreatePreAuthKey(n, reusable, ephemeral, expiration)
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(k, err, o)
|
||||
return
|
||||
}
|
||||
duration, _ := cmd.Flags().GetDuration("expiration")
|
||||
expiration := time.Now().UTC().Add(duration)
|
||||
|
||||
log.Trace().Dur("expiration", duration).Msg("expiration has been set")
|
||||
|
||||
request.Expiration = timestamppb.New(expiration)
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
response, err := client.CreatePreAuthKey(ctx, request)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot create Pre Auth Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
fmt.Printf("%s\n", k.Key)
|
||||
|
||||
SuccessOutput(response.PreAuthKey, response.PreAuthKey.Key, output)
|
||||
},
|
||||
}
|
||||
|
||||
var expirePreAuthKeyCmd = &cobra.Command{
|
||||
Use: "expire",
|
||||
Use: "expire KEY",
|
||||
Short: "Expire a preauthkey",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("missing parameters")
|
||||
return errMissingParameter
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
n, err := cmd.Flags().GetString("namespace")
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
namespace, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
}
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
|
||||
|
||||
h, err := getHeadscaleApp()
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
|
||||
k, err := h.GetPreAuthKey(n, args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting the key: %s", err)
|
||||
}
|
||||
|
||||
err = h.MarkExpirePreAuthKey(k)
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(k, err, o)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ExpirePreAuthKeyRequest{
|
||||
Namespace: namespace,
|
||||
Key: args[0],
|
||||
}
|
||||
|
||||
response, err := client.ExpirePreAuthKey(ctx, request)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot expire Pre Auth Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
fmt.Println("Expired")
|
||||
|
||||
SuccessOutput(response, "Key expired", output)
|
||||
},
|
||||
}
|
||||
|
||||
19
cmd/headscale/cli/pterm_style.go
Normal file
19
cmd/headscale/cli/pterm_style.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/pterm/pterm"
|
||||
)
|
||||
|
||||
func ColourTime(date time.Time) string {
|
||||
dateStr := date.Format("2006-01-02 15:04:05")
|
||||
|
||||
if date.After(time.Now()) {
|
||||
dateStr = pterm.LightGreen(dateStr)
|
||||
} else {
|
||||
dateStr = pterm.LightRed(dateStr)
|
||||
}
|
||||
|
||||
return dateStr
|
||||
}
|
||||
@@ -8,7 +8,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.PersistentFlags().StringP("output", "o", "", "Output format. Empty for human-readable, 'json' or 'json-line'")
|
||||
rootCmd.PersistentFlags().
|
||||
StringP("output", "o", "", "Output format. Empty for human-readable, 'json', 'json-line' or 'yaml'")
|
||||
rootCmd.PersistentFlags().
|
||||
Bool("force", false, "Disable prompts and forces the execution")
|
||||
}
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
|
||||
@@ -3,24 +3,35 @@ package cli
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"strconv"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(routesCmd)
|
||||
routesCmd.PersistentFlags().StringP("namespace", "n", "", "Namespace")
|
||||
err := routesCmd.MarkPersistentFlagRequired("namespace")
|
||||
|
||||
listRoutesCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err := listRoutesCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
routesCmd.AddCommand(listRoutesCmd)
|
||||
|
||||
enableRouteCmd.Flags().
|
||||
StringSliceP("route", "r", []string{}, "List (or repeated flags) of routes to enable")
|
||||
enableRouteCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = enableRouteCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
enableRouteCmd.Flags().BoolP("all", "a", false, "Enable all routes advertised by the node")
|
||||
|
||||
routesCmd.AddCommand(listRoutesCmd)
|
||||
routesCmd.AddCommand(enableRouteCmd)
|
||||
|
||||
nodeCmd.AddCommand(routesCmd)
|
||||
}
|
||||
|
||||
var routesCmd = &cobra.Command{
|
||||
@@ -29,119 +40,168 @@ var routesCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
var listRoutesCmd = &cobra.Command{
|
||||
Use: "list NODE",
|
||||
Short: "List the routes exposed by this node",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("Missing parameters")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Use: "list",
|
||||
Short: "List routes advertised and enabled by a given node",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
n, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
}
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
h, err := getHeadscaleApp()
|
||||
machineID, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine id from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
availableRoutes, err := h.GetAdvertisedNodeRoutes(n, args[0])
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasPrefix(o, "json") {
|
||||
// TODO: Add enable/disabled information to this interface
|
||||
JsonOutput(availableRoutes, err, o)
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.GetMachineRouteRequest{
|
||||
MachineId: machineID,
|
||||
}
|
||||
|
||||
response, err := client.GetMachineRoute(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot get nodes: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
d := h.RoutesToPtables(n, args[0], *availableRoutes)
|
||||
if output != "" {
|
||||
SuccessOutput(response.Routes, "", output)
|
||||
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(d).Render()
|
||||
return
|
||||
}
|
||||
|
||||
tableData := routesToPtables(response.Routes)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var enableRouteCmd = &cobra.Command{
|
||||
Use: "enable node-name route",
|
||||
Short: "Allows exposing a route declared by this node to the rest of the nodes",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
all, err := cmd.Flags().GetBool("all")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
}
|
||||
|
||||
if all {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("Missing parameters")
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
if len(args) < 2 {
|
||||
return fmt.Errorf("Missing parameters")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
},
|
||||
Use: "enable",
|
||||
Short: "Set the enabled routes for a given node",
|
||||
Long: `This command will take a list of routes that will _replace_
|
||||
the current set of routes on a given node.
|
||||
If you would like to disable a route, simply run the command again, but
|
||||
omit the route you do not want to enable.
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
n, err := cmd.Flags().GetString("namespace")
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
machineID, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine id from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
|
||||
all, err := cmd.Flags().GetBool("all")
|
||||
routes, err := cmd.Flags().GetStringSlice("route")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting routes from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
h, err := getHeadscaleApp()
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.EnableMachineRoutesRequest{
|
||||
MachineId: machineID,
|
||||
Routes: routes,
|
||||
}
|
||||
|
||||
if all {
|
||||
availableRoutes, err := h.GetAdvertisedNodeRoutes(n, args[0])
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
response, err := client.EnableMachineRoutes(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot register machine: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
for _, availableRoute := range *availableRoutes {
|
||||
err = h.EnableNodeRoute(n, args[0], availableRoute.String())
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(availableRoute, err, o)
|
||||
} else {
|
||||
fmt.Printf("Enabled route %s\n", availableRoute)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = h.EnableNodeRoute(n, args[0], args[1])
|
||||
if output != "" {
|
||||
SuccessOutput(response.Routes, "", output)
|
||||
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(args[1], err, o)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("Enabled route %s\n", args[1])
|
||||
tableData := routesToPtables(response.Routes)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// routesToPtables converts the list of routes to a nice table.
|
||||
func routesToPtables(routes *v1.Routes) pterm.TableData {
|
||||
tableData := pterm.TableData{{"Route", "Enabled"}}
|
||||
|
||||
for _, route := range routes.GetAdvertisedRoutes() {
|
||||
enabled := isStringInSlice(routes.EnabledRoutes, route)
|
||||
|
||||
tableData = append(tableData, []string{route, strconv.FormatBool(enabled)})
|
||||
}
|
||||
|
||||
return tableData
|
||||
}
|
||||
|
||||
func isStringInSlice(strs []string, s string) bool {
|
||||
for _, s2 := range strs {
|
||||
if s == s2 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,26 +1,37 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/viper"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"gopkg.in/yaml.v2"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
)
|
||||
|
||||
type ErrorOutput struct {
|
||||
Error string
|
||||
}
|
||||
const (
|
||||
PermissionFallback = 0o700
|
||||
HeadscaleDateTimeFormat = "2006-01-02 15:04:05"
|
||||
)
|
||||
|
||||
func LoadConfig(path string) error {
|
||||
viper.SetConfigName("config")
|
||||
@@ -32,50 +43,93 @@ func LoadConfig(path string) error {
|
||||
// For testing
|
||||
viper.AddConfigPath(path)
|
||||
}
|
||||
|
||||
viper.SetEnvPrefix("headscale")
|
||||
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||
viper.AutomaticEnv()
|
||||
|
||||
viper.SetDefault("tls_letsencrypt_cache_dir", "/var/www/.cache")
|
||||
viper.SetDefault("tls_letsencrypt_challenge_type", "HTTP-01")
|
||||
|
||||
viper.SetDefault("ip_prefix", "100.64.0.0/10")
|
||||
|
||||
viper.SetDefault("log_level", "info")
|
||||
|
||||
viper.SetDefault("dns_config", nil)
|
||||
|
||||
err := viper.ReadInConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Fatal error reading config file: %s \n", err)
|
||||
viper.SetDefault("unix_socket", "/var/run/headscale.sock")
|
||||
viper.SetDefault("unix_socket_permission", "0o770")
|
||||
|
||||
viper.SetDefault("grpc_listen_addr", ":50443")
|
||||
viper.SetDefault("grpc_allow_insecure", false)
|
||||
|
||||
viper.SetDefault("cli.timeout", "5s")
|
||||
viper.SetDefault("cli.insecure", false)
|
||||
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
return fmt.Errorf("fatal error reading config file: %w", err)
|
||||
}
|
||||
|
||||
// Collect any validation errors and return them all at once
|
||||
var errorText string
|
||||
if (viper.GetString("tls_letsencrypt_hostname") != "") && ((viper.GetString("tls_cert_path") != "") || (viper.GetString("tls_key_path") != "")) {
|
||||
if (viper.GetString("tls_letsencrypt_hostname") != "") &&
|
||||
((viper.GetString("tls_cert_path") != "") || (viper.GetString("tls_key_path") != "")) {
|
||||
errorText += "Fatal config error: set either tls_letsencrypt_hostname or tls_cert_path/tls_key_path, not both\n"
|
||||
}
|
||||
|
||||
if (viper.GetString("tls_letsencrypt_hostname") != "") && (viper.GetString("tls_letsencrypt_challenge_type") == "TLS-ALPN-01") && (!strings.HasSuffix(viper.GetString("listen_addr"), ":443")) {
|
||||
if (viper.GetString("tls_letsencrypt_hostname") != "") &&
|
||||
(viper.GetString("tls_letsencrypt_challenge_type") == "TLS-ALPN-01") &&
|
||||
(!strings.HasSuffix(viper.GetString("listen_addr"), ":443")) {
|
||||
// this is only a warning because there could be something sitting in front of headscale that redirects the traffic (e.g. an iptables rule)
|
||||
log.Warn().
|
||||
Msg("Warning: when using tls_letsencrypt_hostname with TLS-ALPN-01 as challenge type, headscale must be reachable on port 443, i.e. listen_addr should probably end in :443")
|
||||
}
|
||||
|
||||
if (viper.GetString("tls_letsencrypt_challenge_type") != "HTTP-01") && (viper.GetString("tls_letsencrypt_challenge_type") != "TLS-ALPN-01") {
|
||||
if (viper.GetString("tls_letsencrypt_challenge_type") != "HTTP-01") &&
|
||||
(viper.GetString("tls_letsencrypt_challenge_type") != "TLS-ALPN-01") {
|
||||
errorText += "Fatal config error: the only supported values for tls_letsencrypt_challenge_type are HTTP-01 and TLS-ALPN-01\n"
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(viper.GetString("server_url"), "http://") && !strings.HasPrefix(viper.GetString("server_url"), "https://") {
|
||||
if !strings.HasPrefix(viper.GetString("server_url"), "http://") &&
|
||||
!strings.HasPrefix(viper.GetString("server_url"), "https://") {
|
||||
errorText += "Fatal config error: server_url must start with https:// or http://\n"
|
||||
}
|
||||
if errorText != "" {
|
||||
//nolint
|
||||
return errors.New(strings.TrimSuffix(errorText, "\n"))
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func GetDNSConfig() *tailcfg.DNSConfig {
|
||||
func GetDERPConfig() headscale.DERPConfig {
|
||||
urlStrs := viper.GetStringSlice("derp.urls")
|
||||
|
||||
urls := make([]url.URL, len(urlStrs))
|
||||
for index, urlStr := range urlStrs {
|
||||
urlAddr, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("url", urlStr).
|
||||
Err(err).
|
||||
Msg("Failed to parse url, ignoring...")
|
||||
}
|
||||
|
||||
urls[index] = *urlAddr
|
||||
}
|
||||
|
||||
paths := viper.GetStringSlice("derp.paths")
|
||||
|
||||
autoUpdate := viper.GetBool("derp.auto_update_enabled")
|
||||
updateFrequency := viper.GetDuration("derp.update_frequency")
|
||||
|
||||
return headscale.DERPConfig{
|
||||
URLs: urls,
|
||||
Paths: paths,
|
||||
AutoUpdate: autoUpdate,
|
||||
UpdateFrequency: updateFrequency,
|
||||
}
|
||||
}
|
||||
|
||||
func GetDNSConfig() (*tailcfg.DNSConfig, string) {
|
||||
if viper.IsSet("dns_config") {
|
||||
dnsConfig := &tailcfg.DNSConfig{}
|
||||
|
||||
@@ -83,7 +137,7 @@ func GetDNSConfig() *tailcfg.DNSConfig {
|
||||
nameserversStr := viper.GetStringSlice("dns_config.nameservers")
|
||||
|
||||
nameservers := make([]netaddr.IP, len(nameserversStr))
|
||||
resolvers := make([]tailcfg.DNSResolver, len(nameserversStr))
|
||||
resolvers := make([]dnstype.Resolver, len(nameserversStr))
|
||||
|
||||
for index, nameserverStr := range nameserversStr {
|
||||
nameserver, err := netaddr.ParseIP(nameserverStr)
|
||||
@@ -95,7 +149,7 @@ func GetDNSConfig() *tailcfg.DNSConfig {
|
||||
}
|
||||
|
||||
nameservers[index] = nameserver
|
||||
resolvers[index] = tailcfg.DNSResolver{
|
||||
resolvers[index] = dnstype.Resolver{
|
||||
Addr: nameserver.String(),
|
||||
}
|
||||
}
|
||||
@@ -103,14 +157,63 @@ func GetDNSConfig() *tailcfg.DNSConfig {
|
||||
dnsConfig.Nameservers = nameservers
|
||||
dnsConfig.Resolvers = resolvers
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.restricted_nameservers") {
|
||||
if len(dnsConfig.Nameservers) > 0 {
|
||||
dnsConfig.Routes = make(map[string][]dnstype.Resolver)
|
||||
restrictedDNS := viper.GetStringMapStringSlice(
|
||||
"dns_config.restricted_nameservers",
|
||||
)
|
||||
for domain, restrictedNameservers := range restrictedDNS {
|
||||
restrictedResolvers := make(
|
||||
[]dnstype.Resolver,
|
||||
len(restrictedNameservers),
|
||||
)
|
||||
for index, nameserverStr := range restrictedNameservers {
|
||||
nameserver, err := netaddr.ParseIP(nameserverStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "getDNSConfig").
|
||||
Err(err).
|
||||
Msgf("Could not parse restricted nameserver IP: %s", nameserverStr)
|
||||
}
|
||||
restrictedResolvers[index] = dnstype.Resolver{
|
||||
Addr: nameserver.String(),
|
||||
}
|
||||
}
|
||||
dnsConfig.Routes[domain] = restrictedResolvers
|
||||
}
|
||||
} else {
|
||||
log.Warn().
|
||||
Msg("Warning: dns_config.restricted_nameservers is set, but no nameservers are configured. Ignoring restricted_nameservers.")
|
||||
}
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.domains") {
|
||||
dnsConfig.Domains = viper.GetStringSlice("dns_config.domains")
|
||||
}
|
||||
|
||||
return dnsConfig
|
||||
if viper.IsSet("dns_config.magic_dns") {
|
||||
magicDNS := viper.GetBool("dns_config.magic_dns")
|
||||
if len(dnsConfig.Nameservers) > 0 {
|
||||
dnsConfig.Proxied = magicDNS
|
||||
} else if magicDNS {
|
||||
log.Warn().
|
||||
Msg("Warning: dns_config.magic_dns is set, but no nameservers are configured. Ignoring magic_dns.")
|
||||
}
|
||||
}
|
||||
|
||||
var baseDomain string
|
||||
if viper.IsSet("dns_config.base_domain") {
|
||||
baseDomain = viper.GetString("dns_config.base_domain")
|
||||
} else {
|
||||
baseDomain = "headscale.net" // does not really matter when MagicDNS is not enabled
|
||||
}
|
||||
|
||||
return dnsConfig, baseDomain
|
||||
}
|
||||
|
||||
return nil
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
func absPath(path string) string {
|
||||
@@ -122,35 +225,77 @@ func absPath(path string) string {
|
||||
path = filepath.Join(dir, path)
|
||||
}
|
||||
}
|
||||
|
||||
return path
|
||||
}
|
||||
|
||||
func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
derpPath := absPath(viper.GetString("derp_map_path"))
|
||||
derpMap, err := loadDerpMap(derpPath)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("path", derpPath).
|
||||
Err(err).
|
||||
Msg("Could not load DERP servers map file")
|
||||
func getHeadscaleConfig() headscale.Config {
|
||||
dnsConfig, baseDomain := GetDNSConfig()
|
||||
derpConfig := GetDERPConfig()
|
||||
|
||||
configuredPrefixes := viper.GetStringSlice("ip_prefixes")
|
||||
parsedPrefixes := make([]netaddr.IPPrefix, 0, len(configuredPrefixes)+1)
|
||||
|
||||
legacyPrefixField := viper.GetString("ip_prefix")
|
||||
if len(legacyPrefixField) > 0 {
|
||||
log.
|
||||
Warn().
|
||||
Msgf(
|
||||
"%s, %s",
|
||||
"use of 'ip_prefix' for configuration is deprecated",
|
||||
"please see 'ip_prefixes' in the shipped example.",
|
||||
)
|
||||
legacyPrefix, err := netaddr.ParseIPPrefix(legacyPrefixField)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to parse ip_prefix: %w", err))
|
||||
}
|
||||
parsedPrefixes = append(parsedPrefixes, legacyPrefix)
|
||||
}
|
||||
|
||||
// Minimum inactivity time out is keepalive timeout (60s) plus a few seconds
|
||||
// to avoid races
|
||||
minInactivityTimeout, _ := time.ParseDuration("65s")
|
||||
if viper.GetDuration("ephemeral_node_inactivity_timeout") <= minInactivityTimeout {
|
||||
err = fmt.Errorf("ephemeral_node_inactivity_timeout (%s) is set too low, must be more than %s\n", viper.GetString("ephemeral_node_inactivity_timeout"), minInactivityTimeout)
|
||||
return nil, err
|
||||
for i, prefixInConfig := range configuredPrefixes {
|
||||
prefix, err := netaddr.ParseIPPrefix(prefixInConfig)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to parse ip_prefixes[%d]: %w", i, err))
|
||||
}
|
||||
parsedPrefixes = append(parsedPrefixes, prefix)
|
||||
}
|
||||
|
||||
cfg := headscale.Config{
|
||||
ServerURL: viper.GetString("server_url"),
|
||||
Addr: viper.GetString("listen_addr"),
|
||||
prefixes := make([]netaddr.IPPrefix, 0, len(parsedPrefixes))
|
||||
{
|
||||
// dedup
|
||||
normalizedPrefixes := make(map[string]int, len(parsedPrefixes))
|
||||
for i, p := range parsedPrefixes {
|
||||
normalized, _ := p.Range().Prefix()
|
||||
normalizedPrefixes[normalized.String()] = i
|
||||
}
|
||||
|
||||
// convert back to list
|
||||
for _, i := range normalizedPrefixes {
|
||||
prefixes = append(prefixes, parsedPrefixes[i])
|
||||
}
|
||||
}
|
||||
|
||||
if len(prefixes) < 1 {
|
||||
prefixes = append(prefixes, netaddr.MustParseIPPrefix("100.64.0.0/10"))
|
||||
log.Warn().
|
||||
Msgf("'ip_prefixes' not configured, falling back to default: %v", prefixes)
|
||||
}
|
||||
|
||||
return headscale.Config{
|
||||
ServerURL: viper.GetString("server_url"),
|
||||
Addr: viper.GetString("listen_addr"),
|
||||
GRPCAddr: viper.GetString("grpc_listen_addr"),
|
||||
GRPCAllowInsecure: viper.GetBool("grpc_allow_insecure"),
|
||||
|
||||
IPPrefixes: prefixes,
|
||||
PrivateKeyPath: absPath(viper.GetString("private_key_path")),
|
||||
DerpMap: derpMap,
|
||||
IPPrefix: netaddr.MustParseIPPrefix(viper.GetString("ip_prefix")),
|
||||
BaseDomain: baseDomain,
|
||||
|
||||
EphemeralNodeInactivityTimeout: viper.GetDuration("ephemeral_node_inactivity_timeout"),
|
||||
DERP: derpConfig,
|
||||
|
||||
EphemeralNodeInactivityTimeout: viper.GetDuration(
|
||||
"ephemeral_node_inactivity_timeout",
|
||||
),
|
||||
|
||||
DBtype: viper.GetString("db_type"),
|
||||
DBpath: absPath(viper.GetString("db_path")),
|
||||
@@ -160,18 +305,60 @@ func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
DBuser: viper.GetString("db_user"),
|
||||
DBpass: viper.GetString("db_pass"),
|
||||
|
||||
TLSLetsEncryptHostname: viper.GetString("tls_letsencrypt_hostname"),
|
||||
TLSLetsEncryptListen: viper.GetString("tls_letsencrypt_listen"),
|
||||
TLSLetsEncryptCacheDir: absPath(viper.GetString("tls_letsencrypt_cache_dir")),
|
||||
TLSLetsEncryptHostname: viper.GetString("tls_letsencrypt_hostname"),
|
||||
TLSLetsEncryptListen: viper.GetString("tls_letsencrypt_listen"),
|
||||
TLSLetsEncryptCacheDir: absPath(
|
||||
viper.GetString("tls_letsencrypt_cache_dir"),
|
||||
),
|
||||
TLSLetsEncryptChallengeType: viper.GetString("tls_letsencrypt_challenge_type"),
|
||||
|
||||
TLSCertPath: absPath(viper.GetString("tls_cert_path")),
|
||||
TLSKeyPath: absPath(viper.GetString("tls_key_path")),
|
||||
|
||||
DNSConfig: GetDNSConfig(),
|
||||
DNSConfig: dnsConfig,
|
||||
|
||||
ACMEEmail: viper.GetString("acme_email"),
|
||||
ACMEURL: viper.GetString("acme_url"),
|
||||
|
||||
UnixSocket: viper.GetString("unix_socket"),
|
||||
UnixSocketPermission: GetFileMode("unix_socket_permission"),
|
||||
|
||||
OIDC: headscale.OIDCConfig{
|
||||
Issuer: viper.GetString("oidc.issuer"),
|
||||
ClientID: viper.GetString("oidc.client_id"),
|
||||
ClientSecret: viper.GetString("oidc.client_secret"),
|
||||
},
|
||||
|
||||
CLI: headscale.CLIConfig{
|
||||
Address: viper.GetString("cli.address"),
|
||||
APIKey: viper.GetString("cli.api_key"),
|
||||
Timeout: viper.GetDuration("cli.timeout"),
|
||||
Insecure: viper.GetBool("cli.insecure"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
// Minimum inactivity time out is keepalive timeout (60s) plus a few seconds
|
||||
// to avoid races
|
||||
minInactivityTimeout, _ := time.ParseDuration("65s")
|
||||
if viper.GetDuration("ephemeral_node_inactivity_timeout") <= minInactivityTimeout {
|
||||
// TODO: Find a better way to return this text
|
||||
//nolint
|
||||
err := fmt.Errorf(
|
||||
"ephemeral_node_inactivity_timeout (%s) is set too low, must be more than %s",
|
||||
viper.GetString("ephemeral_node_inactivity_timeout"),
|
||||
minInactivityTimeout,
|
||||
)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h, err := headscale.NewHeadscale(cfg)
|
||||
cfg := getHeadscaleConfig()
|
||||
|
||||
cfg.OIDC.MatchMap = loadOIDCMatchMap()
|
||||
|
||||
app, err := headscale.NewHeadscale(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -180,7 +367,7 @@ func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
|
||||
if viper.GetString("acl_policy_path") != "" {
|
||||
aclPath := absPath(viper.GetString("acl_policy_path"))
|
||||
err = h.LoadACLPolicy(aclPath)
|
||||
err = app.LoadACLPolicy(aclPath)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("path", aclPath).
|
||||
@@ -189,52 +376,163 @@ func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return h, nil
|
||||
return app, nil
|
||||
}
|
||||
|
||||
func loadDerpMap(path string) (*tailcfg.DERPMap, error) {
|
||||
derpFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc.ClientConn, context.CancelFunc) {
|
||||
cfg := getHeadscaleConfig()
|
||||
|
||||
log.Debug().
|
||||
Dur("timeout", cfg.CLI.Timeout).
|
||||
Msgf("Setting timeout")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), cfg.CLI.Timeout)
|
||||
|
||||
grpcOptions := []grpc.DialOption{
|
||||
grpc.WithBlock(),
|
||||
}
|
||||
defer derpFile.Close()
|
||||
var derpMap tailcfg.DERPMap
|
||||
b, err := io.ReadAll(derpFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
address := cfg.CLI.Address
|
||||
|
||||
// If the address is not set, we assume that we are on the server hosting headscale.
|
||||
if address == "" {
|
||||
log.Debug().
|
||||
Str("socket", cfg.UnixSocket).
|
||||
Msgf("HEADSCALE_CLI_ADDRESS environment is not set, connecting to unix socket.")
|
||||
|
||||
address = cfg.UnixSocket
|
||||
|
||||
grpcOptions = append(
|
||||
grpcOptions,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithContextDialer(headscale.GrpcSocketDialer),
|
||||
)
|
||||
} else {
|
||||
// If we are not connecting to a local server, require an API key for authentication
|
||||
apiKey := cfg.CLI.APIKey
|
||||
if apiKey == "" {
|
||||
log.Fatal().Caller().Msgf("HEADSCALE_CLI_API_KEY environment variable needs to be set.")
|
||||
}
|
||||
grpcOptions = append(grpcOptions,
|
||||
grpc.WithPerRPCCredentials(tokenAuth{
|
||||
token: apiKey,
|
||||
}),
|
||||
)
|
||||
|
||||
if cfg.CLI.Insecure {
|
||||
tlsConfig := &tls.Config{
|
||||
// turn of gosec as we are intentionally setting
|
||||
// insecure.
|
||||
//nolint:gosec
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
grpcOptions = append(grpcOptions,
|
||||
grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)),
|
||||
)
|
||||
} else {
|
||||
grpcOptions = append(grpcOptions,
|
||||
grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")),
|
||||
)
|
||||
}
|
||||
}
|
||||
err = yaml.Unmarshal(b, &derpMap)
|
||||
return &derpMap, err
|
||||
|
||||
log.Trace().Caller().Str("address", address).Msg("Connecting via gRPC")
|
||||
conn, err := grpc.DialContext(ctx, address, grpcOptions...)
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msgf("Could not connect: %v", err)
|
||||
}
|
||||
|
||||
client := v1.NewHeadscaleServiceClient(conn)
|
||||
|
||||
return ctx, client, conn, cancel
|
||||
}
|
||||
|
||||
func JsonOutput(result interface{}, errResult error, outputFormat string) {
|
||||
var j []byte
|
||||
func SuccessOutput(result interface{}, override string, outputFormat string) {
|
||||
var jsonBytes []byte
|
||||
var err error
|
||||
switch outputFormat {
|
||||
case "json":
|
||||
if errResult != nil {
|
||||
j, err = json.MarshalIndent(ErrorOutput{errResult.Error()}, "", "\t")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
} else {
|
||||
j, err = json.MarshalIndent(result, "", "\t")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
jsonBytes, err = json.MarshalIndent(result, "", "\t")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
case "json-line":
|
||||
if errResult != nil {
|
||||
j, err = json.Marshal(ErrorOutput{errResult.Error()})
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
} else {
|
||||
j, err = json.Marshal(result)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
jsonBytes, err = json.Marshal(result)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
case "yaml":
|
||||
jsonBytes, err = yaml.Marshal(result)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
default:
|
||||
//nolint
|
||||
fmt.Println(override)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
//nolint
|
||||
fmt.Println(string(jsonBytes))
|
||||
}
|
||||
|
||||
func ErrorOutput(errResult error, override string, outputFormat string) {
|
||||
type errOutput struct {
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
SuccessOutput(errOutput{errResult.Error()}, override, outputFormat)
|
||||
}
|
||||
|
||||
func HasMachineOutputFlag() bool {
|
||||
for _, arg := range os.Args {
|
||||
if arg == "json" || arg == "json-line" || arg == "yaml" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
fmt.Println(string(j))
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
type tokenAuth struct {
|
||||
token string
|
||||
}
|
||||
|
||||
// Return value is mapped to request headers.
|
||||
func (t tokenAuth) GetRequestMetadata(
|
||||
ctx context.Context,
|
||||
in ...string,
|
||||
) (map[string]string, error) {
|
||||
return map[string]string{
|
||||
"authorization": "Bearer " + t.token,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (tokenAuth) RequireTransportSecurity() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// loadOIDCMatchMap is a wrapper around viper to verifies that the keys in
|
||||
// the match map is valid regex strings.
|
||||
func loadOIDCMatchMap() map[string]string {
|
||||
strMap := viper.GetStringMapString("oidc.domain_map")
|
||||
|
||||
for oidcMatcher := range strMap {
|
||||
_ = regexp.MustCompile(oidcMatcher)
|
||||
}
|
||||
|
||||
return strMap
|
||||
}
|
||||
|
||||
func GetFileMode(key string) fs.FileMode {
|
||||
modeStr := viper.GetString(key)
|
||||
|
||||
mode, err := strconv.ParseUint(modeStr, headscale.Base8, headscale.BitSize64)
|
||||
if err != nil {
|
||||
return PermissionFallback
|
||||
}
|
||||
|
||||
return fs.FileMode(mode)
|
||||
}
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/spf13/cobra"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var version = "dev"
|
||||
var Version = "dev"
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(versionCmd)
|
||||
@@ -17,11 +15,7 @@ var versionCmd = &cobra.Command{
|
||||
Short: "Print the version.",
|
||||
Long: "The version of headscale.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(map[string]string{"version": version}, nil, o)
|
||||
return
|
||||
}
|
||||
fmt.Println(version)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
SuccessOutput(map[string]string{"version": Version}, Version, output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/efekarakus/termcolor"
|
||||
@@ -9,6 +11,7 @@ import (
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/tcnksm/go-latest"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -20,6 +23,8 @@ func main() {
|
||||
colors = true
|
||||
case termcolor.LevelBasic:
|
||||
colors = true
|
||||
case termcolor.LevelNone:
|
||||
colors = false
|
||||
default:
|
||||
// no color, return text as is.
|
||||
colors = false
|
||||
@@ -38,25 +43,43 @@ func main() {
|
||||
NoColor: !colors,
|
||||
})
|
||||
|
||||
err := cli.LoadConfig("")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
if err := cli.LoadConfig(""); err != nil {
|
||||
log.Fatal().Caller().Err(err)
|
||||
}
|
||||
|
||||
machineOutput := cli.HasMachineOutputFlag()
|
||||
|
||||
logLevel := viper.GetString("log_level")
|
||||
switch logLevel {
|
||||
case "trace":
|
||||
zerolog.SetGlobalLevel(zerolog.TraceLevel)
|
||||
case "debug":
|
||||
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
||||
case "info":
|
||||
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
||||
case "warn":
|
||||
zerolog.SetGlobalLevel(zerolog.WarnLevel)
|
||||
case "error":
|
||||
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
|
||||
default:
|
||||
level, err := zerolog.ParseLevel(logLevel)
|
||||
if err != nil {
|
||||
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
||||
} else {
|
||||
zerolog.SetGlobalLevel(level)
|
||||
}
|
||||
|
||||
// If the user has requested a "machine" readable format,
|
||||
// then disable login so the output remains valid.
|
||||
if machineOutput {
|
||||
zerolog.SetGlobalLevel(zerolog.Disabled)
|
||||
}
|
||||
|
||||
if !viper.GetBool("disable_check_updates") && !machineOutput {
|
||||
if (runtime.GOOS == "linux" || runtime.GOOS == "darwin") &&
|
||||
cli.Version != "dev" {
|
||||
githubTag := &latest.GithubTag{
|
||||
Owner: "juanfont",
|
||||
Repository: "headscale",
|
||||
}
|
||||
res, err := latest.Check(githubTag, cli.Version)
|
||||
if err == nil && res.Outdated {
|
||||
//nolint
|
||||
fmt.Printf(
|
||||
"An updated version of Headscale has been found (%s vs. your current %s). Check it out https://github.com/juanfont/headscale/releases\n",
|
||||
res.Current,
|
||||
cli.Version,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cli.Execute()
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -25,10 +25,9 @@ func (s *Suite) SetUpSuite(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *Suite) TearDownSuite(c *check.C) {
|
||||
|
||||
}
|
||||
|
||||
func (*Suite) TestPostgresConfigLoading(c *check.C) {
|
||||
func (*Suite) TestConfigLoading(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "headscale")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
@@ -41,7 +40,10 @@ func (*Suite) TestPostgresConfigLoading(c *check.C) {
|
||||
}
|
||||
|
||||
// Symlink the example config file
|
||||
err = os.Symlink(filepath.Clean(path+"/../../config.json.postgres.example"), filepath.Join(tmpDir, "config.json"))
|
||||
err = os.Symlink(
|
||||
filepath.Clean(path+"/../../config-example.yaml"),
|
||||
filepath.Join(tmpDir, "config.yaml"),
|
||||
)
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
@@ -53,46 +55,17 @@ func (*Suite) TestPostgresConfigLoading(c *check.C) {
|
||||
// Test that config file was interpreted correctly
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8080")
|
||||
c.Assert(viper.GetString("derp_map_path"), check.Equals, "derp.yaml")
|
||||
c.Assert(viper.GetString("db_type"), check.Equals, "postgres")
|
||||
c.Assert(viper.GetString("db_port"), check.Equals, "5432")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetStringSlice("dns_config.nameservers")[0], check.Equals, "1.1.1.1")
|
||||
}
|
||||
|
||||
func (*Suite) TestSqliteConfigLoading(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "headscale")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Symlink the example config file
|
||||
err = os.Symlink(filepath.Clean(path+"/../../config.json.sqlite.example"), filepath.Join(tmpDir, "config.json"))
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Load example config, it should load without validation errors
|
||||
err = cli.LoadConfig(tmpDir)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Test that config file was interpreted correctly
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8080")
|
||||
c.Assert(viper.GetString("derp_map_path"), check.Equals, "derp.yaml")
|
||||
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite3")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "db.sqlite")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "/var/lib/headscale/db.sqlite")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
c.Assert(viper.GetStringSlice("dns_config.nameservers")[0], check.Equals, "1.1.1.1")
|
||||
c.Assert(
|
||||
cli.GetFileMode("unix_socket_permission"),
|
||||
check.Equals,
|
||||
fs.FileMode(0o770),
|
||||
)
|
||||
}
|
||||
|
||||
func (*Suite) TestDNSConfigLoading(c *check.C) {
|
||||
@@ -108,7 +81,10 @@ func (*Suite) TestDNSConfigLoading(c *check.C) {
|
||||
}
|
||||
|
||||
// Symlink the example config file
|
||||
err = os.Symlink(filepath.Clean(path+"/../../config.json.sqlite.example"), filepath.Join(tmpDir, "config.json"))
|
||||
err = os.Symlink(
|
||||
filepath.Clean(path+"/../../config-example.yaml"),
|
||||
filepath.Join(tmpDir, "config.yaml"),
|
||||
)
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
@@ -117,18 +93,18 @@ func (*Suite) TestDNSConfigLoading(c *check.C) {
|
||||
err = cli.LoadConfig(tmpDir)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
dnsConfig := cli.GetDNSConfig()
|
||||
fmt.Println(dnsConfig)
|
||||
dnsConfig, baseDomain := cli.GetDNSConfig()
|
||||
|
||||
c.Assert(dnsConfig.Nameservers[0].String(), check.Equals, "1.1.1.1")
|
||||
|
||||
c.Assert(dnsConfig.Resolvers[0].Addr, check.Equals, "1.1.1.1")
|
||||
c.Assert(dnsConfig.Proxied, check.Equals, true)
|
||||
c.Assert(baseDomain, check.Equals, "example.com")
|
||||
}
|
||||
|
||||
func writeConfig(c *check.C, tmpDir string, configYaml []byte) {
|
||||
// Populate a custom config file
|
||||
configFile := filepath.Join(tmpDir, "config.yaml")
|
||||
err := ioutil.WriteFile(configFile, configYaml, 0644)
|
||||
err := ioutil.WriteFile(configFile, configYaml, 0o600)
|
||||
if err != nil {
|
||||
c.Fatalf("Couldn't write file %s", configFile)
|
||||
}
|
||||
@@ -139,10 +115,11 @@ func (*Suite) TestTLSConfigValidation(c *check.C) {
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
//defer os.RemoveAll(tmpDir)
|
||||
fmt.Println(tmpDir)
|
||||
// defer os.RemoveAll(tmpDir)
|
||||
|
||||
configYaml := []byte("---\ntls_letsencrypt_hostname: \"example.com\"\ntls_letsencrypt_challenge_type: \"\"\ntls_cert_path: \"abc.pem\"")
|
||||
configYaml := []byte(
|
||||
"---\ntls_letsencrypt_hostname: \"example.com\"\ntls_letsencrypt_challenge_type: \"\"\ntls_cert_path: \"abc.pem\"",
|
||||
)
|
||||
writeConfig(c, tmpDir, configYaml)
|
||||
|
||||
// Check configuration validation errors (1)
|
||||
@@ -150,13 +127,26 @@ func (*Suite) TestTLSConfigValidation(c *check.C) {
|
||||
c.Assert(err, check.NotNil)
|
||||
// check.Matches can not handle multiline strings
|
||||
tmp := strings.ReplaceAll(err.Error(), "\n", "***")
|
||||
c.Assert(tmp, check.Matches, ".*Fatal config error: set either tls_letsencrypt_hostname or tls_cert_path/tls_key_path, not both.*")
|
||||
c.Assert(tmp, check.Matches, ".*Fatal config error: the only supported values for tls_letsencrypt_challenge_type are.*")
|
||||
c.Assert(tmp, check.Matches, ".*Fatal config error: server_url must start with https:// or http://.*")
|
||||
fmt.Println(tmp)
|
||||
c.Assert(
|
||||
tmp,
|
||||
check.Matches,
|
||||
".*Fatal config error: set either tls_letsencrypt_hostname or tls_cert_path/tls_key_path, not both.*",
|
||||
)
|
||||
c.Assert(
|
||||
tmp,
|
||||
check.Matches,
|
||||
".*Fatal config error: the only supported values for tls_letsencrypt_challenge_type are.*",
|
||||
)
|
||||
c.Assert(
|
||||
tmp,
|
||||
check.Matches,
|
||||
".*Fatal config error: server_url must start with https:// or http://.*",
|
||||
)
|
||||
|
||||
// Check configuration validation errors (2)
|
||||
configYaml = []byte("---\nserver_url: \"http://127.0.0.1:8080\"\ntls_letsencrypt_hostname: \"example.com\"\ntls_letsencrypt_challenge_type: \"TLS-ALPN-01\"")
|
||||
configYaml = []byte(
|
||||
"---\nserver_url: \"http://127.0.0.1:8080\"\ntls_letsencrypt_hostname: \"example.com\"\ntls_letsencrypt_challenge_type: \"TLS-ALPN-01\"",
|
||||
)
|
||||
writeConfig(c, tmpDir, configYaml)
|
||||
err = cli.LoadConfig(tmpDir)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
186
config-example.yaml
Normal file
186
config-example.yaml
Normal file
@@ -0,0 +1,186 @@
|
||||
---
|
||||
# headscale will look for a configuration file named `config.yaml` (or `config.json`) in the following order:
|
||||
#
|
||||
# - `/etc/headscale`
|
||||
# - `~/.headscale`
|
||||
# - current working directory
|
||||
|
||||
# The url clients will connect to.
|
||||
# Typically this will be a domain like:
|
||||
#
|
||||
# https://myheadscale.example.com:443
|
||||
#
|
||||
server_url: http://127.0.0.1:8080
|
||||
|
||||
# Address to listen to / bind to on the server
|
||||
#
|
||||
listen_addr: 0.0.0.0:8080
|
||||
|
||||
# Address to listen for gRPC.
|
||||
# gRPC is used for controlling a headscale server
|
||||
# remotely with the CLI
|
||||
# Note: Remote access _only_ works if you have
|
||||
# valid certificates.
|
||||
grpc_listen_addr: 0.0.0.0:50443
|
||||
|
||||
# Allow the gRPC admin interface to run in INSECURE
|
||||
# mode. This is not recommended as the traffic will
|
||||
# be unencrypted. Only enable if you know what you
|
||||
# are doing.
|
||||
grpc_allow_insecure: false
|
||||
|
||||
# Private key used encrypt the traffic between headscale
|
||||
# and Tailscale clients.
|
||||
# The private key file which will be
|
||||
# autogenerated if it's missing
|
||||
private_key_path: /var/lib/headscale/private.key
|
||||
|
||||
# List of IP prefixes to allocate tailaddresses from.
|
||||
# Each prefix consists of either an IPv4 or IPv6 address,
|
||||
# and the associated prefix length, delimited by a slash.
|
||||
ip_prefixes:
|
||||
- fd7a:115c:a1e0::/48
|
||||
- 100.64.0.0/10
|
||||
|
||||
# DERP is a relay system that Tailscale uses when a direct
|
||||
# connection cannot be established.
|
||||
# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp
|
||||
#
|
||||
# headscale needs a list of DERP servers that can be presented
|
||||
# to the clients.
|
||||
derp:
|
||||
# List of externally available DERP maps encoded in JSON
|
||||
urls:
|
||||
- https://controlplane.tailscale.com/derpmap/default
|
||||
|
||||
# Locally available DERP map files encoded in YAML
|
||||
#
|
||||
# This option is mostly interesting for people hosting
|
||||
# their own DERP servers:
|
||||
# https://tailscale.com/kb/1118/custom-derp-servers/
|
||||
#
|
||||
# paths:
|
||||
# - /etc/headscale/derp-example.yaml
|
||||
paths: []
|
||||
|
||||
# If enabled, a worker will be set up to periodically
|
||||
# refresh the given sources and update the derpmap
|
||||
# will be set up.
|
||||
auto_update_enabled: true
|
||||
|
||||
# How often should we check for DERP updates?
|
||||
update_frequency: 24h
|
||||
|
||||
# Disables the automatic check for headscale updates on startup
|
||||
disable_check_updates: false
|
||||
|
||||
# Time before an inactive ephemeral node is deleted?
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
|
||||
# SQLite config
|
||||
db_type: sqlite3
|
||||
db_path: /var/lib/headscale/db.sqlite
|
||||
|
||||
# # Postgres config
|
||||
# db_type: postgres
|
||||
# db_host: localhost
|
||||
# db_port: 5432
|
||||
# db_name: headscale
|
||||
# db_user: foo
|
||||
# db_pass: bar
|
||||
|
||||
### TLS configuration
|
||||
#
|
||||
## Let's encrypt / ACME
|
||||
#
|
||||
# headscale supports automatically requesting and setting up
|
||||
# TLS for a domain with Let's Encrypt.
|
||||
#
|
||||
# URL to ACME directory
|
||||
acme_url: https://acme-v02.api.letsencrypt.org/directory
|
||||
|
||||
# Email to register with ACME provider
|
||||
acme_email: ""
|
||||
|
||||
# Domain name to request a TLS certificate for:
|
||||
tls_letsencrypt_hostname: ""
|
||||
|
||||
# Path to store certificates and metadata needed by
|
||||
# letsencrypt
|
||||
tls_letsencrypt_cache_dir: /var/lib/headscale/cache
|
||||
|
||||
# Type of ACME challenge to use, currently supported types:
|
||||
# HTTP-01 or TLS_ALPN-01
|
||||
# See [docs/tls.md](docs/tls.md) for more information
|
||||
tls_letsencrypt_challenge_type: HTTP-01
|
||||
# When HTTP-01 challenge is chosen, letsencrypt must set up a
|
||||
# verification endpoint, and it will be listning on:
|
||||
# :http = port 80
|
||||
tls_letsencrypt_listen: ":http"
|
||||
|
||||
## Use already defined certificates:
|
||||
tls_cert_path: ""
|
||||
tls_key_path: ""
|
||||
|
||||
log_level: info
|
||||
|
||||
# Path to a file containg ACL policies.
|
||||
# Recommended path: /etc/headscale/acl.hujson
|
||||
acl_policy_path: ""
|
||||
|
||||
## DNS
|
||||
#
|
||||
# headscale supports Tailscale's DNS configuration and MagicDNS.
|
||||
# Please have a look to their KB to better understand the concepts:
|
||||
#
|
||||
# - https://tailscale.com/kb/1054/dns/
|
||||
# - https://tailscale.com/kb/1081/magicdns/
|
||||
# - https://tailscale.com/blog/2021-09-private-dns-with-magicdns/
|
||||
#
|
||||
dns_config:
|
||||
# List of DNS servers to expose to clients.
|
||||
nameservers:
|
||||
- 1.1.1.1
|
||||
|
||||
# Split DNS (see https://tailscale.com/kb/1054/dns/),
|
||||
# list of search domains and the DNS to query for each one.
|
||||
#
|
||||
# restricted_nameservers:
|
||||
# foo.bar.com:
|
||||
# - 1.1.1.1
|
||||
# darp.headscale.net:
|
||||
# - 1.1.1.1
|
||||
# - 8.8.8.8
|
||||
|
||||
# Search domains to inject.
|
||||
domains: []
|
||||
|
||||
# Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
|
||||
# Only works if there is at least a nameserver defined.
|
||||
magic_dns: true
|
||||
|
||||
# Defines the base domain to create the hostnames for MagicDNS.
|
||||
# `base_domain` must be a FQDNs, without the trailing dot.
|
||||
# The FQDN of the hosts will be
|
||||
# `hostname.namespace.base_domain` (e.g., _myhost.mynamespace.example.com_).
|
||||
base_domain: example.com
|
||||
|
||||
# Unix socket used for the CLI to connect without authentication
|
||||
# Note: for local development, you probably want to change this to:
|
||||
# unix_socket: ./headscale.sock
|
||||
unix_socket: /var/run/headscale.sock
|
||||
unix_socket_permission: "0770"
|
||||
#
|
||||
# headscale supports experimental OpenID connect support,
|
||||
# it is still being tested and might have some bugs, please
|
||||
# help us test it.
|
||||
# OpenID Connect
|
||||
# oidc:
|
||||
# issuer: "https://your-oidc.issuer.com/path"
|
||||
# client_id: "your-oidc-client-id"
|
||||
# client_secret: "your-oidc-client-secret"
|
||||
#
|
||||
# # Domain map is used to map incomming users (by their email) to
|
||||
# # a namespace. The key can be a string, or regex.
|
||||
# domain_map:
|
||||
# ".*": default-namespace
|
||||
@@ -1,25 +0,0 @@
|
||||
{
|
||||
"server_url": "http://127.0.0.1:8080",
|
||||
"listen_addr": "0.0.0.0:8080",
|
||||
"private_key_path": "private.key",
|
||||
"derp_map_path": "derp.yaml",
|
||||
"ephemeral_node_inactivity_timeout": "30m",
|
||||
"db_type": "postgres",
|
||||
"db_host": "localhost",
|
||||
"db_port": 5432,
|
||||
"db_name": "headscale",
|
||||
"db_user": "foo",
|
||||
"db_pass": "bar",
|
||||
"tls_letsencrypt_hostname": "",
|
||||
"tls_letsencrypt_listen": ":http",
|
||||
"tls_letsencrypt_cache_dir": ".cache",
|
||||
"tls_letsencrypt_challenge_type": "HTTP-01",
|
||||
"tls_cert_path": "",
|
||||
"tls_key_path": "",
|
||||
"acl_policy_path": "",
|
||||
"dns_config": {
|
||||
"nameservers": [
|
||||
"1.1.1.1"
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
{
|
||||
"server_url": "http://127.0.0.1:8080",
|
||||
"listen_addr": "0.0.0.0:8080",
|
||||
"private_key_path": "private.key",
|
||||
"derp_map_path": "derp.yaml",
|
||||
"ephemeral_node_inactivity_timeout": "30m",
|
||||
"db_type": "sqlite3",
|
||||
"db_path": "db.sqlite",
|
||||
"tls_letsencrypt_hostname": "",
|
||||
"tls_letsencrypt_listen": ":http",
|
||||
"tls_letsencrypt_cache_dir": ".cache",
|
||||
"tls_letsencrypt_challenge_type": "HTTP-01",
|
||||
"tls_cert_path": "",
|
||||
"tls_key_path": "",
|
||||
"acl_policy_path": "",
|
||||
"dns_config": {
|
||||
"nameservers": [
|
||||
"1.1.1.1"
|
||||
]
|
||||
}
|
||||
}
|
||||
48
db.go
48
db.go
@@ -9,7 +9,10 @@ import (
|
||||
"gorm.io/gorm/logger"
|
||||
)
|
||||
|
||||
const dbVersion = "1"
|
||||
const (
|
||||
dbVersion = "1"
|
||||
errValueNotFound = Error("not found")
|
||||
)
|
||||
|
||||
// KV is a key-value store in a psql table. For future use...
|
||||
type KV struct {
|
||||
@@ -24,21 +27,27 @@ func (h *Headscale) initDB() error {
|
||||
}
|
||||
h.db = db
|
||||
|
||||
if h.dbType == "postgres" {
|
||||
db.Exec("create extension if not exists \"uuid-ossp\";")
|
||||
if h.dbType == Postgres {
|
||||
db.Exec(`create extension if not exists "uuid-ossp";`)
|
||||
}
|
||||
|
||||
_ = db.Migrator().RenameColumn(&Machine{}, "ip_address", "ip_addresses")
|
||||
|
||||
err = db.AutoMigrate(&Machine{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&KV{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&Namespace{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&PreAuthKey{})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -49,7 +58,13 @@ func (h *Headscale) initDB() error {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&APIKey{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = h.setValue("db_version", dbVersion)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -65,12 +80,12 @@ func (h *Headscale) openDB() (*gorm.DB, error) {
|
||||
}
|
||||
|
||||
switch h.dbType {
|
||||
case "sqlite3":
|
||||
case Sqlite:
|
||||
db, err = gorm.Open(sqlite.Open(h.dbString), &gorm.Config{
|
||||
DisableForeignKeyConstraintWhenMigrating: true,
|
||||
Logger: log,
|
||||
})
|
||||
case "postgres":
|
||||
case Postgres:
|
||||
db, err = gorm.Open(postgres.Open(h.dbString), &gorm.Config{
|
||||
DisableForeignKeyConstraintWhenMigrating: true,
|
||||
Logger: log,
|
||||
@@ -84,28 +99,33 @@ func (h *Headscale) openDB() (*gorm.DB, error) {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// getValue returns the value for the given key in KV
|
||||
// getValue returns the value for the given key in KV.
|
||||
func (h *Headscale) getValue(key string) (string, error) {
|
||||
var row KV
|
||||
if result := h.db.First(&row, "key = ?", key); errors.Is(result.Error, gorm.ErrRecordNotFound) {
|
||||
return "", errors.New("not found")
|
||||
if result := h.db.First(&row, "key = ?", key); errors.Is(
|
||||
result.Error,
|
||||
gorm.ErrRecordNotFound,
|
||||
) {
|
||||
return "", errValueNotFound
|
||||
}
|
||||
|
||||
return row.Value, nil
|
||||
}
|
||||
|
||||
// setValue sets value for the given key in KV
|
||||
// setValue sets value for the given key in KV.
|
||||
func (h *Headscale) setValue(key string, value string) error {
|
||||
kv := KV{
|
||||
keyValue := KV{
|
||||
Key: key,
|
||||
Value: value,
|
||||
}
|
||||
|
||||
_, err := h.getValue(key)
|
||||
if err == nil {
|
||||
h.db.Model(&kv).Where("key = ?", key).Update("value", value)
|
||||
if _, err := h.getValue(key); err == nil {
|
||||
h.db.Model(&keyValue).Where("key = ?", key).Update("value", value)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
h.db.Create(kv)
|
||||
h.db.Create(keyValue)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
15
derp-example.yaml
Normal file
15
derp-example.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
# If you plan to somehow use headscale, please deploy your own DERP infra: https://tailscale.com/kb/1118/custom-derp-servers/
|
||||
regions:
|
||||
900:
|
||||
regionid: 900
|
||||
regioncode: custom
|
||||
regionname: My Region
|
||||
nodes:
|
||||
- name: 900a
|
||||
regionid: 900
|
||||
hostname: myderp.mydomain.no
|
||||
ipv4: 123.123.123.123
|
||||
ipv6: "2604:a880:400:d1::828:b001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
164
derp.go
Normal file
164
derp.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"gopkg.in/yaml.v2"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
func loadDERPMapFromPath(path string) (*tailcfg.DERPMap, error) {
|
||||
derpFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer derpFile.Close()
|
||||
var derpMap tailcfg.DERPMap
|
||||
b, err := io.ReadAll(derpFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = yaml.Unmarshal(b, &derpMap)
|
||||
|
||||
return &derpMap, err
|
||||
}
|
||||
|
||||
func loadDERPMapFromURL(addr url.URL) (*tailcfg.DERPMap, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), HTTPReadTimeout)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", addr.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := http.Client{
|
||||
Timeout: HTTPReadTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var derpMap tailcfg.DERPMap
|
||||
err = json.Unmarshal(body, &derpMap)
|
||||
|
||||
return &derpMap, err
|
||||
}
|
||||
|
||||
// mergeDERPMaps naively merges a list of DERPMaps into a single
|
||||
// DERPMap, it will _only_ look at the Regions, an integer.
|
||||
// If a region exists in two of the given DERPMaps, the region
|
||||
// form the _last_ DERPMap will be preserved.
|
||||
// An empty DERPMap list will result in a DERPMap with no regions.
|
||||
func mergeDERPMaps(derpMaps []*tailcfg.DERPMap) *tailcfg.DERPMap {
|
||||
result := tailcfg.DERPMap{
|
||||
OmitDefaultRegions: false,
|
||||
Regions: map[int]*tailcfg.DERPRegion{},
|
||||
}
|
||||
|
||||
for _, derpMap := range derpMaps {
|
||||
for id, region := range derpMap.Regions {
|
||||
result.Regions[id] = region
|
||||
}
|
||||
}
|
||||
|
||||
return &result
|
||||
}
|
||||
|
||||
func GetDERPMap(cfg DERPConfig) *tailcfg.DERPMap {
|
||||
derpMaps := make([]*tailcfg.DERPMap, 0)
|
||||
|
||||
for _, path := range cfg.Paths {
|
||||
log.Debug().
|
||||
Str("func", "GetDERPMap").
|
||||
Str("path", path).
|
||||
Msg("Loading DERPMap from path")
|
||||
derpMap, err := loadDERPMapFromPath(path)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "GetDERPMap").
|
||||
Str("path", path).
|
||||
Err(err).
|
||||
Msg("Could not load DERP map from path")
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
derpMaps = append(derpMaps, derpMap)
|
||||
}
|
||||
|
||||
for _, addr := range cfg.URLs {
|
||||
derpMap, err := loadDERPMapFromURL(addr)
|
||||
log.Debug().
|
||||
Str("func", "GetDERPMap").
|
||||
Str("url", addr.String()).
|
||||
Msg("Loading DERPMap from path")
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "GetDERPMap").
|
||||
Str("url", addr.String()).
|
||||
Err(err).
|
||||
Msg("Could not load DERP map from path")
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
derpMaps = append(derpMaps, derpMap)
|
||||
}
|
||||
|
||||
derpMap := mergeDERPMaps(derpMaps)
|
||||
|
||||
log.Trace().Interface("derpMap", derpMap).Msg("DERPMap loaded")
|
||||
|
||||
if len(derpMap.Regions) == 0 {
|
||||
log.Warn().
|
||||
Msg("DERP map is empty, not a single DERP map datasource was loaded correctly or contained a region")
|
||||
}
|
||||
|
||||
return derpMap
|
||||
}
|
||||
|
||||
func (h *Headscale) scheduledDERPMapUpdateWorker(cancelChan <-chan struct{}) {
|
||||
log.Info().
|
||||
Dur("frequency", h.cfg.DERP.UpdateFrequency).
|
||||
Msg("Setting up a DERPMap update worker")
|
||||
ticker := time.NewTicker(h.cfg.DERP.UpdateFrequency)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-cancelChan:
|
||||
return
|
||||
|
||||
case <-ticker.C:
|
||||
log.Info().Msg("Fetching DERPMap updates")
|
||||
h.DERPMap = GetDERPMap(h.cfg.DERP)
|
||||
|
||||
namespaces, err := h.ListNamespaces()
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Msg("Failed to fetch namespaces")
|
||||
}
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
h.setLastStateChangeToNow(namespace.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
146
derp.yaml
146
derp.yaml
@@ -1,146 +0,0 @@
|
||||
# This file contains some of the official Tailscale DERP servers,
|
||||
# shamelessly taken from https://github.com/tailscale/tailscale/blob/main/net/dnsfallback/dns-fallback-servers.json
|
||||
#
|
||||
# If you plan to somehow use headscale, please deploy your own DERP infra: https://tailscale.com/kb/1118/custom-derp-servers/
|
||||
regions:
|
||||
1:
|
||||
regionid: 1
|
||||
regioncode: nyc
|
||||
regionname: New York City
|
||||
nodes:
|
||||
- name: 1a
|
||||
regionid: 1
|
||||
hostname: derp1.tailscale.com
|
||||
ipv4: 159.89.225.99
|
||||
ipv6: "2604:a880:400:d1::828:b001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
- name: 1b
|
||||
regionid: 1
|
||||
hostname: derp1b.tailscale.com
|
||||
ipv4: 45.55.35.93
|
||||
ipv6: "2604:a880:800:a1::f:2001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
2:
|
||||
regionid: 2
|
||||
regioncode: sfo
|
||||
regionname: San Francisco
|
||||
nodes:
|
||||
- name: 2a
|
||||
regionid: 2
|
||||
hostname: derp2.tailscale.com
|
||||
ipv4: 167.172.206.31
|
||||
ipv6: "2604:a880:2:d1::c5:7001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
- name: 2b
|
||||
regionid: 2
|
||||
hostname: derp2b.tailscale.com
|
||||
ipv4: 64.227.106.23
|
||||
ipv6: "2604:a880:4:1d0::29:9000"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
3:
|
||||
regionid: 3
|
||||
regioncode: sin
|
||||
regionname: Singapore
|
||||
nodes:
|
||||
- name: 3a
|
||||
regionid: 3
|
||||
hostname: derp3.tailscale.com
|
||||
ipv4: 68.183.179.66
|
||||
ipv6: "2400:6180:0:d1::67d:8001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
4:
|
||||
regionid: 4
|
||||
regioncode: fra
|
||||
regionname: Frankfurt
|
||||
nodes:
|
||||
- name: 4a
|
||||
regionid: 4
|
||||
hostname: derp4.tailscale.com
|
||||
ipv4: 167.172.182.26
|
||||
ipv6: "2a03:b0c0:3:e0::36e:900"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
- name: 4b
|
||||
regionid: 4
|
||||
hostname: derp4b.tailscale.com
|
||||
ipv4: 157.230.25.0
|
||||
ipv6: "2a03:b0c0:3:e0::58f:3001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
5:
|
||||
regionid: 5
|
||||
regioncode: syd
|
||||
regionname: Sydney
|
||||
nodes:
|
||||
- name: 5a
|
||||
regionid: 5
|
||||
hostname: derp5.tailscale.com
|
||||
ipv4: 103.43.75.49
|
||||
ipv6: "2001:19f0:5801:10b7:5400:2ff:feaa:284c"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
6:
|
||||
regionid: 6
|
||||
regioncode: blr
|
||||
regionname: Bangalore
|
||||
nodes:
|
||||
- name: 6a
|
||||
regionid: 6
|
||||
hostname: derp6.tailscale.com
|
||||
ipv4: 68.183.90.120
|
||||
ipv6: "2400:6180:100:d0::982:d001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
7:
|
||||
regionid: 7
|
||||
regioncode: tok
|
||||
regionname: Tokyo
|
||||
nodes:
|
||||
- name: 7a
|
||||
regionid: 7
|
||||
hostname: derp7.tailscale.com
|
||||
ipv4: 167.179.89.145
|
||||
ipv6: "2401:c080:1000:467f:5400:2ff:feee:22aa"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
8:
|
||||
regionid: 8
|
||||
regioncode: lhr
|
||||
regionname: London
|
||||
nodes:
|
||||
- name: 8a
|
||||
regionid: 8
|
||||
hostname: derp8.tailscale.com
|
||||
ipv4: 167.71.139.179
|
||||
ipv6: "2a03:b0c0:1:e0::3cc:e001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
9:
|
||||
regionid: 9
|
||||
regioncode: sao
|
||||
regionname: São Paulo
|
||||
nodes:
|
||||
- name: 9a
|
||||
regionid: 9
|
||||
hostname: derp9.tailscale.com
|
||||
ipv4: 207.148.3.137
|
||||
ipv6: "2001:19f0:6401:1d9c:5400:2ff:feef:bb82"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
183
dns.go
Normal file
183
dns.go
Normal file
@@ -0,0 +1,183 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/fatih/set"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/dnsname"
|
||||
)
|
||||
|
||||
const (
|
||||
ByteSize = 8
|
||||
)
|
||||
|
||||
const (
|
||||
ipv4AddressLength = 32
|
||||
ipv6AddressLength = 128
|
||||
)
|
||||
|
||||
// generateMagicDNSRootDomains generates a list of DNS entries to be included in `Routes` in `MapResponse`.
|
||||
// This list of reverse DNS entries instructs the OS on what subnets and domains the Tailscale embedded DNS
|
||||
// server (listening in 100.100.100.100 udp/53) should be used for.
|
||||
//
|
||||
// Tailscale.com includes in the list:
|
||||
// - the `BaseDomain` of the user
|
||||
// - the reverse DNS entry for IPv6 (0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa., see below more on IPv6)
|
||||
// - the reverse DNS entries for the IPv4 subnets covered by the user's `IPPrefix`.
|
||||
// In the public SaaS this is [64-127].100.in-addr.arpa.
|
||||
//
|
||||
// The main purpose of this function is then generating the list of IPv4 entries. For the 100.64.0.0/10, this
|
||||
// is clear, and could be hardcoded. But we are allowing any range as `IPPrefix`, so we need to find out the
|
||||
// subnets when we have 172.16.0.0/16 (i.e., [0-255].16.172.in-addr.arpa.), or any other subnet.
|
||||
//
|
||||
// How IN-ADDR.ARPA domains work is defined in RFC1035 (section 3.5). Tailscale.com seems to adhere to this,
|
||||
// and do not make use of RFC2317 ("Classless IN-ADDR.ARPA delegation") - hence generating the entries for the next
|
||||
// class block only.
|
||||
|
||||
// From the netmask we can find out the wildcard bits (the bits that are not set in the netmask).
|
||||
// This allows us to then calculate the subnets included in the subsequent class block and generate the entries.
|
||||
func generateMagicDNSRootDomains(ipPrefixes []netaddr.IPPrefix) []dnsname.FQDN {
|
||||
fqdns := make([]dnsname.FQDN, 0, len(ipPrefixes))
|
||||
for _, ipPrefix := range ipPrefixes {
|
||||
var generateDNSRoot func(netaddr.IPPrefix) []dnsname.FQDN
|
||||
switch ipPrefix.IP().BitLen() {
|
||||
case ipv4AddressLength:
|
||||
generateDNSRoot = generateIPv4DNSRootDomain
|
||||
|
||||
case ipv6AddressLength:
|
||||
generateDNSRoot = generateIPv6DNSRootDomain
|
||||
|
||||
default:
|
||||
panic(
|
||||
fmt.Sprintf(
|
||||
"unsupported IP version with address length %d",
|
||||
ipPrefix.IP().BitLen(),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
fqdns = append(fqdns, generateDNSRoot(ipPrefix)...)
|
||||
}
|
||||
|
||||
return fqdns
|
||||
}
|
||||
|
||||
func generateIPv4DNSRootDomain(ipPrefix netaddr.IPPrefix) []dnsname.FQDN {
|
||||
// Conversion to the std lib net.IPnet, a bit easier to operate
|
||||
netRange := ipPrefix.IPNet()
|
||||
maskBits, _ := netRange.Mask.Size()
|
||||
|
||||
// lastOctet is the last IP byte covered by the mask
|
||||
lastOctet := maskBits / ByteSize
|
||||
|
||||
// wildcardBits is the number of bits not under the mask in the lastOctet
|
||||
wildcardBits := ByteSize - maskBits%ByteSize
|
||||
|
||||
// min is the value in the lastOctet byte of the IP
|
||||
// max is basically 2^wildcardBits - i.e., the value when all the wildcardBits are set to 1
|
||||
min := uint(netRange.IP[lastOctet])
|
||||
max := (min + 1<<uint(wildcardBits)) - 1
|
||||
|
||||
// here we generate the base domain (e.g., 100.in-addr.arpa., 16.172.in-addr.arpa., etc.)
|
||||
rdnsSlice := []string{}
|
||||
for i := lastOctet - 1; i >= 0; i-- {
|
||||
rdnsSlice = append(rdnsSlice, fmt.Sprintf("%d", netRange.IP[i]))
|
||||
}
|
||||
rdnsSlice = append(rdnsSlice, "in-addr.arpa.")
|
||||
rdnsBase := strings.Join(rdnsSlice, ".")
|
||||
|
||||
fqdns := make([]dnsname.FQDN, 0, max-min+1)
|
||||
for i := min; i <= max; i++ {
|
||||
fqdn, err := dnsname.ToFQDN(fmt.Sprintf("%d.%s", i, rdnsBase))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
fqdns = append(fqdns, fqdn)
|
||||
}
|
||||
|
||||
return fqdns
|
||||
}
|
||||
|
||||
func generateIPv6DNSRootDomain(ipPrefix netaddr.IPPrefix) []dnsname.FQDN {
|
||||
const nibbleLen = 4
|
||||
|
||||
maskBits, _ := ipPrefix.IPNet().Mask.Size()
|
||||
expanded := ipPrefix.IP().StringExpanded()
|
||||
nibbleStr := strings.Map(func(r rune) rune {
|
||||
if r == ':' {
|
||||
return -1
|
||||
}
|
||||
|
||||
return r
|
||||
}, expanded)
|
||||
|
||||
// TODO?: that does not look the most efficient implementation,
|
||||
// but the inputs are not so long as to cause problems,
|
||||
// and from what I can see, the generateMagicDNSRootDomains
|
||||
// function is called only once over the lifetime of a server process.
|
||||
prefixConstantParts := []string{}
|
||||
for i := 0; i < maskBits/nibbleLen; i++ {
|
||||
prefixConstantParts = append(
|
||||
[]string{string(nibbleStr[i])},
|
||||
prefixConstantParts...)
|
||||
}
|
||||
|
||||
makeDomain := func(variablePrefix ...string) (dnsname.FQDN, error) {
|
||||
prefix := strings.Join(append(variablePrefix, prefixConstantParts...), ".")
|
||||
|
||||
return dnsname.ToFQDN(fmt.Sprintf("%s.ip6.arpa", prefix))
|
||||
}
|
||||
|
||||
var fqdns []dnsname.FQDN
|
||||
if maskBits%4 == 0 {
|
||||
dom, _ := makeDomain()
|
||||
fqdns = append(fqdns, dom)
|
||||
} else {
|
||||
domCount := 1 << (maskBits % nibbleLen)
|
||||
fqdns = make([]dnsname.FQDN, 0, domCount)
|
||||
for i := 0; i < domCount; i++ {
|
||||
varNibble := fmt.Sprintf("%x", i)
|
||||
dom, err := makeDomain(varNibble)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
fqdns = append(fqdns, dom)
|
||||
}
|
||||
}
|
||||
|
||||
return fqdns
|
||||
}
|
||||
|
||||
func getMapResponseDNSConfig(
|
||||
dnsConfigOrig *tailcfg.DNSConfig,
|
||||
baseDomain string,
|
||||
machine Machine,
|
||||
peers Machines,
|
||||
) *tailcfg.DNSConfig {
|
||||
var dnsConfig *tailcfg.DNSConfig
|
||||
if dnsConfigOrig != nil && dnsConfigOrig.Proxied { // if MagicDNS is enabled
|
||||
// Only inject the Search Domain of the current namespace - shared nodes should use their full FQDN
|
||||
dnsConfig = dnsConfigOrig.Clone()
|
||||
dnsConfig.Domains = append(
|
||||
dnsConfig.Domains,
|
||||
fmt.Sprintf("%s.%s", machine.Namespace.Name, baseDomain),
|
||||
)
|
||||
|
||||
namespaceSet := set.New(set.ThreadSafe)
|
||||
namespaceSet.Add(machine.Namespace)
|
||||
for _, p := range peers {
|
||||
namespaceSet.Add(p.Namespace)
|
||||
}
|
||||
for _, namespace := range namespaceSet.List() {
|
||||
dnsRoute := fmt.Sprintf("%s.%s", namespace.(Namespace).Name, baseDomain)
|
||||
dnsConfig.Routes[dnsRoute] = nil
|
||||
}
|
||||
} else {
|
||||
dnsConfig = dnsConfigOrig
|
||||
}
|
||||
|
||||
return dnsConfig
|
||||
}
|
||||
399
dns_test.go
Normal file
399
dns_test.go
Normal file
@@ -0,0 +1,399 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
)
|
||||
|
||||
func (s *Suite) TestMagicDNSRootDomains100(c *check.C) {
|
||||
prefixes := []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("100.64.0.0/10"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
found := false
|
||||
for _, domain := range domains {
|
||||
if domain == "64.100.in-addr.arpa." {
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(found, check.Equals, true)
|
||||
|
||||
found = false
|
||||
for _, domain := range domains {
|
||||
if domain == "100.100.in-addr.arpa." {
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(found, check.Equals, true)
|
||||
|
||||
found = false
|
||||
for _, domain := range domains {
|
||||
if domain == "127.100.in-addr.arpa." {
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(found, check.Equals, true)
|
||||
}
|
||||
|
||||
func (s *Suite) TestMagicDNSRootDomains172(c *check.C) {
|
||||
prefixes := []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("172.16.0.0/16"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
found := false
|
||||
for _, domain := range domains {
|
||||
if domain == "0.16.172.in-addr.arpa." {
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(found, check.Equals, true)
|
||||
|
||||
found = false
|
||||
for _, domain := range domains {
|
||||
if domain == "255.16.172.in-addr.arpa." {
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(found, check.Equals, true)
|
||||
}
|
||||
|
||||
// Happens when netmask is a multiple of 4 bits (sounds likely).
|
||||
func (s *Suite) TestMagicDNSRootDomainsIPv6Single(c *check.C) {
|
||||
prefixes := []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/48"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
c.Assert(len(domains), check.Equals, 1)
|
||||
c.Assert(
|
||||
domains[0].WithTrailingDot(),
|
||||
check.Equals,
|
||||
"0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa.",
|
||||
)
|
||||
}
|
||||
|
||||
func (s *Suite) TestMagicDNSRootDomainsIPv6SingleMultiple(c *check.C) {
|
||||
prefixes := []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/50"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
yieldsRoot := func(dom string) bool {
|
||||
for _, candidate := range domains {
|
||||
if candidate.WithTrailingDot() == dom {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
c.Assert(len(domains), check.Equals, 4)
|
||||
c.Assert(yieldsRoot("0.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa."), check.Equals, true)
|
||||
c.Assert(yieldsRoot("1.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa."), check.Equals, true)
|
||||
c.Assert(yieldsRoot("2.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa."), check.Equals, true)
|
||||
c.Assert(yieldsRoot("3.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa."), check.Equals, true)
|
||||
}
|
||||
|
||||
func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
namespaceShared1, err := app.CreateNamespace("shared1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
namespaceShared2, err := app.CreateNamespace("shared2")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
namespaceShared3, err := app.CreateNamespace("shared3")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKeyInShared1, err := app.CreatePreAuthKey(
|
||||
namespaceShared1.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKeyInShared2, err := app.CreatePreAuthKey(
|
||||
namespaceShared2.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKeyInShared3, err := app.CreatePreAuthKey(
|
||||
namespaceShared3.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
PreAuthKey2InShared1, err := app.CreatePreAuthKey(
|
||||
namespaceShared1.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared1.Name, "test_get_shared_nodes_1")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
machineInShared1 := &Machine{
|
||||
ID: 1,
|
||||
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
DiscoKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
Name: "test_get_shared_nodes_1",
|
||||
NamespaceID: namespaceShared1.ID,
|
||||
Namespace: *namespaceShared1,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.1")},
|
||||
AuthKeyID: uint(preAuthKeyInShared1.ID),
|
||||
}
|
||||
app.db.Save(machineInShared1)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared1.Name, machineInShared1.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared2 := &Machine{
|
||||
ID: 2,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_2",
|
||||
NamespaceID: namespaceShared2.ID,
|
||||
Namespace: *namespaceShared2,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.2")},
|
||||
AuthKeyID: uint(preAuthKeyInShared2.ID),
|
||||
}
|
||||
app.db.Save(machineInShared2)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared2.Name, machineInShared2.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared3 := &Machine{
|
||||
ID: 3,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_3",
|
||||
NamespaceID: namespaceShared3.ID,
|
||||
Namespace: *namespaceShared3,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.3")},
|
||||
AuthKeyID: uint(preAuthKeyInShared3.ID),
|
||||
}
|
||||
app.db.Save(machineInShared3)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared3.Name, machineInShared3.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machine2InShared1 := &Machine{
|
||||
ID: 4,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_4",
|
||||
NamespaceID: namespaceShared1.ID,
|
||||
Namespace: *namespaceShared1,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.4")},
|
||||
AuthKeyID: uint(PreAuthKey2InShared1.ID),
|
||||
}
|
||||
app.db.Save(machine2InShared1)
|
||||
|
||||
err = app.AddSharedMachineToNamespace(machineInShared2, namespaceShared1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
baseDomain := "foobar.headscale.net"
|
||||
dnsConfigOrig := tailcfg.DNSConfig{
|
||||
Routes: make(map[string][]dnstype.Resolver),
|
||||
Domains: []string{baseDomain},
|
||||
Proxied: true,
|
||||
}
|
||||
|
||||
peersOfMachineInShared1, err := app.getPeers(machineInShared1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
dnsConfig := getMapResponseDNSConfig(
|
||||
&dnsConfigOrig,
|
||||
baseDomain,
|
||||
*machineInShared1,
|
||||
peersOfMachineInShared1,
|
||||
)
|
||||
c.Assert(dnsConfig, check.NotNil)
|
||||
c.Assert(len(dnsConfig.Routes), check.Equals, 2)
|
||||
|
||||
domainRouteShared1 := fmt.Sprintf("%s.%s", namespaceShared1.Name, baseDomain)
|
||||
_, ok := dnsConfig.Routes[domainRouteShared1]
|
||||
c.Assert(ok, check.Equals, true)
|
||||
|
||||
domainRouteShared2 := fmt.Sprintf("%s.%s", namespaceShared2.Name, baseDomain)
|
||||
_, ok = dnsConfig.Routes[domainRouteShared2]
|
||||
c.Assert(ok, check.Equals, true)
|
||||
|
||||
domainRouteShared3 := fmt.Sprintf("%s.%s", namespaceShared3.Name, baseDomain)
|
||||
_, ok = dnsConfig.Routes[domainRouteShared3]
|
||||
c.Assert(ok, check.Equals, false)
|
||||
}
|
||||
|
||||
func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
namespaceShared1, err := app.CreateNamespace("shared1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
namespaceShared2, err := app.CreateNamespace("shared2")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
namespaceShared3, err := app.CreateNamespace("shared3")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKeyInShared1, err := app.CreatePreAuthKey(
|
||||
namespaceShared1.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKeyInShared2, err := app.CreatePreAuthKey(
|
||||
namespaceShared2.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKeyInShared3, err := app.CreatePreAuthKey(
|
||||
namespaceShared3.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKey2InShared1, err := app.CreatePreAuthKey(
|
||||
namespaceShared1.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared1.Name, "test_get_shared_nodes_1")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
machineInShared1 := &Machine{
|
||||
ID: 1,
|
||||
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
DiscoKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
Name: "test_get_shared_nodes_1",
|
||||
NamespaceID: namespaceShared1.ID,
|
||||
Namespace: *namespaceShared1,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.1")},
|
||||
AuthKeyID: uint(preAuthKeyInShared1.ID),
|
||||
}
|
||||
app.db.Save(machineInShared1)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared1.Name, machineInShared1.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared2 := &Machine{
|
||||
ID: 2,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_2",
|
||||
NamespaceID: namespaceShared2.ID,
|
||||
Namespace: *namespaceShared2,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.2")},
|
||||
AuthKeyID: uint(preAuthKeyInShared2.ID),
|
||||
}
|
||||
app.db.Save(machineInShared2)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared2.Name, machineInShared2.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared3 := &Machine{
|
||||
ID: 3,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_3",
|
||||
NamespaceID: namespaceShared3.ID,
|
||||
Namespace: *namespaceShared3,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.3")},
|
||||
AuthKeyID: uint(preAuthKeyInShared3.ID),
|
||||
}
|
||||
app.db.Save(machineInShared3)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared3.Name, machineInShared3.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machine2InShared1 := &Machine{
|
||||
ID: 4,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_4",
|
||||
NamespaceID: namespaceShared1.ID,
|
||||
Namespace: *namespaceShared1,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.4")},
|
||||
AuthKeyID: uint(preAuthKey2InShared1.ID),
|
||||
}
|
||||
app.db.Save(machine2InShared1)
|
||||
|
||||
err = app.AddSharedMachineToNamespace(machineInShared2, namespaceShared1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
baseDomain := "foobar.headscale.net"
|
||||
dnsConfigOrig := tailcfg.DNSConfig{
|
||||
Routes: make(map[string][]dnstype.Resolver),
|
||||
Domains: []string{baseDomain},
|
||||
Proxied: false,
|
||||
}
|
||||
|
||||
peersOfMachine1Shared1, err := app.getPeers(machineInShared1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
dnsConfig := getMapResponseDNSConfig(
|
||||
&dnsConfigOrig,
|
||||
baseDomain,
|
||||
*machineInShared1,
|
||||
peersOfMachine1Shared1,
|
||||
)
|
||||
c.Assert(dnsConfig, check.NotNil)
|
||||
c.Assert(len(dnsConfig.Routes), check.Equals, 0)
|
||||
c.Assert(len(dnsConfig.Domains), check.Equals, 1)
|
||||
}
|
||||
44
docs/README.md
Normal file
44
docs/README.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# headscale documentation
|
||||
|
||||
This page contains the official and community contributed documentation for `headscale`.
|
||||
|
||||
If you are having trouble with following the documentation or get unexpected results,
|
||||
please ask on [Discord](https://discord.gg/XcQxk2VHjx) instead of opening an Issue.
|
||||
|
||||
## Official documentation
|
||||
|
||||
### How-to
|
||||
|
||||
- [Running headscale on Linux](running-headscale-linux.md)
|
||||
- [Control headscale remotly](remote-cli.md)
|
||||
- [Using a Windows client with headscale](windows-client.md)
|
||||
|
||||
### References
|
||||
|
||||
- [Configuration](../config-example.yaml)
|
||||
- [Glossary](glossary.md)
|
||||
- [TLS](tls.md)
|
||||
|
||||
## Community documentation
|
||||
|
||||
Community documentation is not actively maintained by the headscale authors and is
|
||||
written by community members. It is _not_ verified by `headscale` developers.
|
||||
|
||||
**It might be outdated and it might miss necessary steps**.
|
||||
|
||||
- [Running headscale in a container](running-headscale-container.md)
|
||||
|
||||
## Misc
|
||||
|
||||
### Policy ACLs
|
||||
|
||||
Headscale implements the same policy ACLs as Tailscale.com, adapted to the self-hosted environment.
|
||||
|
||||
For instance, instead of referring to users when defining groups you must
|
||||
use namespaces (which are the equivalent to user/logins in Tailscale.com).
|
||||
|
||||
Please check https://tailscale.com/kb/1018/acls/, and `./tests/acls/` in this repo for working examples.
|
||||
|
||||
### Apple devices
|
||||
|
||||
An endpoint with information on how to connect your Apple devices (currently macOS only) is available at `/apple` on your running instance.
|
||||
5
docs/examples/README.md
Normal file
5
docs/examples/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Examples
|
||||
|
||||
This directory contains examples on how to run `headscale` on different platforms.
|
||||
|
||||
All examples are provided by the community and they are not verified by the `headscale` authors.
|
||||
@@ -1,7 +1,9 @@
|
||||
# Deploying Headscale on Kubernetes
|
||||
# Deploying headscale on Kubernetes
|
||||
|
||||
**Note:** This is contributed by the community and not verified by the headscale authors.
|
||||
|
||||
This directory contains [Kustomize](https://kustomize.io) templates that deploy
|
||||
Headscale in various configurations.
|
||||
headscale in various configurations.
|
||||
|
||||
These templates currently support Rancher k3s. Other clusters may require
|
||||
adaptation, especially around volume claims and ingress.
|
||||
@@ -24,6 +26,7 @@ Configure DERP servers by editing `base/site/derp.yaml` if needed.
|
||||
You'll somehow need to get `headscale:latest` into your cluster image registry.
|
||||
|
||||
An easy way to do this with k3s:
|
||||
|
||||
- Reconfigure k3s to use docker instead of containerd (`k3s server --docker`)
|
||||
- `docker build -t headscale:latest ..` from here
|
||||
|
||||
@@ -61,21 +64,21 @@ Use the wrapper script to remotely operate headscale to perform administrative
|
||||
tasks like creating namespaces, authkeys, etc.
|
||||
|
||||
```
|
||||
[c@nix-slate:~/Projects/headscale/k8s]$ ./headscale.bash
|
||||
[c@nix-slate:~/Projects/headscale/k8s]$ ./headscale.bash
|
||||
|
||||
headscale is an open source implementation of the Tailscale control server
|
||||
|
||||
https://gitlab.com/juanfont/headscale
|
||||
https://github.com/juanfont/headscale
|
||||
|
||||
Usage:
|
||||
headscale [command]
|
||||
|
||||
Available Commands:
|
||||
help Help about any command
|
||||
namespace Manage the namespaces of Headscale
|
||||
node Manage the nodes of Headscale
|
||||
preauthkey Handle the preauthkeys in Headscale
|
||||
routes Manage the routes of Headscale
|
||||
namespace Manage the namespaces of headscale
|
||||
node Manage the nodes of headscale
|
||||
preauthkey Handle the preauthkeys in headscale
|
||||
routes Manage the routes of headscale
|
||||
serve Launches the headscale server
|
||||
version Print the version.
|
||||
|
||||
18
docs/examples/kustomize/base/ingress.yaml
Normal file
18
docs/examples/kustomize/base/ingress.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: headscale
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
spec:
|
||||
rules:
|
||||
- host: $(PUBLIC_HOSTNAME)
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: headscale
|
||||
port:
|
||||
number: 8080
|
||||
path: /
|
||||
pathType: Prefix
|
||||
42
docs/examples/kustomize/base/kustomization.yaml
Normal file
42
docs/examples/kustomize/base/kustomization.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
namespace: headscale
|
||||
resources:
|
||||
- configmap.yaml
|
||||
- ingress.yaml
|
||||
- service.yaml
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true
|
||||
configMapGenerator:
|
||||
- name: headscale-site
|
||||
files:
|
||||
- derp.yaml=site/derp.yaml
|
||||
envs:
|
||||
- site/public.env
|
||||
- name: headscale-etc
|
||||
literals:
|
||||
- config.json={}
|
||||
secretGenerator:
|
||||
- name: headscale
|
||||
files:
|
||||
- secrets/private-key
|
||||
vars:
|
||||
- name: PUBLIC_PROTO
|
||||
objRef:
|
||||
kind: ConfigMap
|
||||
name: headscale-site
|
||||
apiVersion: v1
|
||||
fieldRef:
|
||||
fieldPath: data.public-proto
|
||||
- name: PUBLIC_HOSTNAME
|
||||
objRef:
|
||||
kind: ConfigMap
|
||||
name: headscale-site
|
||||
apiVersion: v1
|
||||
fieldRef:
|
||||
fieldPath: data.public-hostname
|
||||
- name: CONTACT_EMAIL
|
||||
objRef:
|
||||
kind: ConfigMap
|
||||
name: headscale-site
|
||||
apiVersion: v1
|
||||
fieldRef:
|
||||
fieldPath: data.contact-email
|
||||
@@ -8,6 +8,6 @@ spec:
|
||||
selector:
|
||||
app: headscale
|
||||
ports:
|
||||
- name: http
|
||||
targetPort: http
|
||||
port: 8080
|
||||
- name: http
|
||||
targetPort: http
|
||||
port: 8080
|
||||
76
docs/examples/kustomize/postgres/deployment.yaml
Normal file
76
docs/examples/kustomize/postgres/deployment.yaml
Normal file
@@ -0,0 +1,76 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: headscale
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: headscale
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: headscale
|
||||
spec:
|
||||
containers:
|
||||
- name: headscale
|
||||
image: "headscale:latest"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["/go/bin/headscale", "serve"]
|
||||
env:
|
||||
- name: SERVER_URL
|
||||
value: $(PUBLIC_PROTO)://$(PUBLIC_HOSTNAME)
|
||||
- name: LISTEN_ADDR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: listen_addr
|
||||
- name: DERP_MAP_PATH
|
||||
value: /vol/config/derp.yaml
|
||||
- name: EPHEMERAL_NODE_INACTIVITY_TIMEOUT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: ephemeral_node_inactivity_timeout
|
||||
- name: DB_TYPE
|
||||
value: postgres
|
||||
- name: DB_HOST
|
||||
value: postgres.headscale.svc.cluster.local
|
||||
- name: DB_PORT
|
||||
value: "5432"
|
||||
- name: DB_USER
|
||||
value: headscale
|
||||
- name: DB_PASS
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: postgresql
|
||||
key: password
|
||||
- name: DB_NAME
|
||||
value: headscale
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
containerPort: 8080
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 15
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /vol/config
|
||||
- name: secret
|
||||
mountPath: /vol/secret
|
||||
- name: etc
|
||||
mountPath: /etc/headscale
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: headscale-site
|
||||
- name: etc
|
||||
configMap:
|
||||
name: headscale-etc
|
||||
- name: secret
|
||||
secret:
|
||||
secretName: headscale
|
||||
13
docs/examples/kustomize/postgres/kustomization.yaml
Normal file
13
docs/examples/kustomize/postgres/kustomization.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
namespace: headscale
|
||||
bases:
|
||||
- ../base
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- postgres-service.yaml
|
||||
- postgres-statefulset.yaml
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true
|
||||
secretGenerator:
|
||||
- name: postgresql
|
||||
files:
|
||||
- secrets/password
|
||||
@@ -8,6 +8,6 @@ spec:
|
||||
selector:
|
||||
app: postgres
|
||||
ports:
|
||||
- name: postgres
|
||||
targetPort: postgres
|
||||
port: 5432
|
||||
- name: postgres
|
||||
targetPort: postgres
|
||||
port: 5432
|
||||
49
docs/examples/kustomize/postgres/postgres-statefulset.yaml
Normal file
49
docs/examples/kustomize/postgres/postgres-statefulset.yaml
Normal file
@@ -0,0 +1,49 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: postgres
|
||||
spec:
|
||||
serviceName: postgres
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: postgres
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
containers:
|
||||
- name: postgres
|
||||
image: "postgres:13"
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: POSTGRES_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: postgresql
|
||||
key: password
|
||||
- name: POSTGRES_USER
|
||||
value: headscale
|
||||
ports:
|
||||
- name: postgres
|
||||
protocol: TCP
|
||||
containerPort: 5432
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: 5432
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 15
|
||||
volumeMounts:
|
||||
- name: pgdata
|
||||
mountPath: /var/lib/postgresql/data
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: pgdata
|
||||
spec:
|
||||
storageClassName: local-path
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
@@ -6,6 +6,6 @@ metadata:
|
||||
traefik.ingress.kubernetes.io/router.tls: "true"
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- $(PUBLIC_HOSTNAME)
|
||||
secretName: production-cert
|
||||
- hosts:
|
||||
- $(PUBLIC_HOSTNAME)
|
||||
secretName: production-cert
|
||||
@@ -0,0 +1,9 @@
|
||||
namespace: headscale
|
||||
bases:
|
||||
- ../base
|
||||
resources:
|
||||
- production-issuer.yaml
|
||||
patches:
|
||||
- path: ingress-patch.yaml
|
||||
target:
|
||||
kind: Ingress
|
||||
@@ -11,6 +11,6 @@ spec:
|
||||
# Secret resource used to store the account's private key.
|
||||
name: letsencrypt-production-acc-key
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
@@ -1,5 +1,5 @@
|
||||
namespace: headscale
|
||||
bases:
|
||||
- ../base
|
||||
- ../base
|
||||
resources:
|
||||
- statefulset.yaml
|
||||
- statefulset.yaml
|
||||
77
docs/examples/kustomize/sqlite/statefulset.yaml
Normal file
77
docs/examples/kustomize/sqlite/statefulset.yaml
Normal file
@@ -0,0 +1,77 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: headscale
|
||||
spec:
|
||||
serviceName: headscale
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: headscale
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: headscale
|
||||
spec:
|
||||
containers:
|
||||
- name: headscale
|
||||
image: "headscale:latest"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["/go/bin/headscale", "serve"]
|
||||
env:
|
||||
- name: SERVER_URL
|
||||
value: $(PUBLIC_PROTO)://$(PUBLIC_HOSTNAME)
|
||||
- name: LISTEN_ADDR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: listen_addr
|
||||
- name: DERP_MAP_PATH
|
||||
value: /vol/config/derp.yaml
|
||||
- name: EPHEMERAL_NODE_INACTIVITY_TIMEOUT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: ephemeral_node_inactivity_timeout
|
||||
- name: DB_TYPE
|
||||
value: sqlite3
|
||||
- name: DB_PATH
|
||||
value: /vol/data/db.sqlite
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
containerPort: 8080
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 15
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /vol/config
|
||||
- name: data
|
||||
mountPath: /vol/data
|
||||
- name: secret
|
||||
mountPath: /vol/secret
|
||||
- name: etc
|
||||
mountPath: /etc/headscale
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: headscale-site
|
||||
- name: etc
|
||||
configMap:
|
||||
name: headscale-etc
|
||||
- name: secret
|
||||
secret:
|
||||
secretName: headscale
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
storageClassName: local-path
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
@@ -6,6 +6,6 @@ metadata:
|
||||
traefik.ingress.kubernetes.io/router.tls: "true"
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- $(PUBLIC_HOSTNAME)
|
||||
secretName: staging-cert
|
||||
- hosts:
|
||||
- $(PUBLIC_HOSTNAME)
|
||||
secretName: staging-cert
|
||||
9
docs/examples/kustomize/staging-tls/kustomization.yaml
Normal file
9
docs/examples/kustomize/staging-tls/kustomization.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
namespace: headscale
|
||||
bases:
|
||||
- ../base
|
||||
resources:
|
||||
- staging-issuer.yaml
|
||||
patches:
|
||||
- path: ingress-patch.yaml
|
||||
target:
|
||||
kind: Ingress
|
||||
@@ -11,6 +11,6 @@ spec:
|
||||
# Secret resource used to store the account's private key.
|
||||
name: letsencrypt-staging-acc-key
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
3
docs/glossary.md
Normal file
3
docs/glossary.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Glossary
|
||||
|
||||
- Namespace: Collection of Tailscale nodes that can see each other. In Tailscale.com this is called Tailnet.
|
||||
BIN
docs/images/windows-registry.png
Normal file
BIN
docs/images/windows-registry.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 101 KiB |
100
docs/remote-cli.md
Normal file
100
docs/remote-cli.md
Normal file
@@ -0,0 +1,100 @@
|
||||
# Controlling `headscale` with remote CLI
|
||||
|
||||
## Prerequisit
|
||||
|
||||
- A workstation to run `headscale` (could be Linux, macOS, other supported platforms)
|
||||
- A `headscale` server (version `0.13.0` or newer)
|
||||
- Access to create API keys (local access to the `headscale` server)
|
||||
- `headscale` _must_ be served over TLS/HTTPS
|
||||
- Remote access does _not_ support unencrypted traffic.
|
||||
- Port `50443` must be open in the firewall (or port overriden by `grpc_listen_addr` option)
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing a user how-to set control a `headscale` instance
|
||||
from a remote machine with the `headscale` command line binary.
|
||||
|
||||
## Create an API key
|
||||
|
||||
We need to create an API key to authenticate our remote `headscale` when using it from our workstation.
|
||||
|
||||
To create a API key, log into your `headscale` server and generate a key:
|
||||
|
||||
```shell
|
||||
headscale apikeys create --expiration 90d
|
||||
```
|
||||
|
||||
Copy the output of the command and save it for later. Please not that you can not retrieve a key again,
|
||||
if the key is lost, expire the old one, and create a new key.
|
||||
|
||||
To list the keys currently assosicated with the server:
|
||||
|
||||
```shell
|
||||
headscale apikeys list
|
||||
```
|
||||
|
||||
and to expire a key:
|
||||
|
||||
```shell
|
||||
headscale apikeys expire --prefix "<PREFIX>"
|
||||
```
|
||||
|
||||
## Download and configure `headscale`
|
||||
|
||||
1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases):
|
||||
|
||||
2. Put the binary somewhere in your `PATH`, e.g. `/usr/local/bin/headcale`
|
||||
|
||||
3. Make `headscale` executable:
|
||||
|
||||
```shell
|
||||
chmod +x /usr/local/bin/headscale
|
||||
```
|
||||
|
||||
4. Configure the CLI through Environment Variables
|
||||
|
||||
```shell
|
||||
export HEADSCALE_CLI_ADDRESS="<HEADSCALE ADDRESS>:<PORT>"
|
||||
export HEADSCALE_CLI_API_KEY="<API KEY FROM PREVIOUS STAGE>"
|
||||
```
|
||||
|
||||
for example:
|
||||
|
||||
```shell
|
||||
export HEADSCALE_CLI_ADDRESS="headscale.example.com:50443"
|
||||
export HEADSCALE_CLI_API_KEY="abcde12345"
|
||||
```
|
||||
|
||||
This will tell the `headscale` binary to connect to a remote instance, instead of looking
|
||||
for a local instance (which is what it does on the server).
|
||||
|
||||
The API key is needed to make sure that your are allowed to access the server. The key is _not_
|
||||
needed when running directly on the server, as the connection is local.
|
||||
|
||||
5. Test the connection
|
||||
|
||||
Let us run the headscale command to verify that we can connect by listing our nodes:
|
||||
|
||||
```shell
|
||||
headscale nodes list
|
||||
```
|
||||
|
||||
You should now be able to see a list of your nodes from your workstation, and you can
|
||||
now control the `headscale` server from your workstation.
|
||||
|
||||
## Behind a proxy
|
||||
|
||||
It is possible to run the gRPC remote endpoint behind a reverse proxy, like Nginx, and have it run on the _same_ port as `headscale`.
|
||||
|
||||
While this is _not a supported_ feature, an example on how this can be set up on
|
||||
[NixOS is shown here](https://github.com/kradalby/dotfiles/blob/4489cdbb19cddfbfae82cd70448a38fde5a76711/machines/headscale.oracldn/headscale.nix#L61-L91).
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Checklist:
|
||||
|
||||
- Make sure you have the _same_ `headscale` version on your server and workstation
|
||||
- Make sure you use version `0.13.0` or newer.
|
||||
- Verify that your TLS certificate is valid and trusted
|
||||
- If you do not have access to a trusted certificate (e.g. from Let's Encrypt), add your self signed certificate to the trust store of your OS or
|
||||
- Set `HEADSCALE_CLI_INSECURE` to 0 in your environement
|
||||
148
docs/running-headscale-container.md
Normal file
148
docs/running-headscale-container.md
Normal file
@@ -0,0 +1,148 @@
|
||||
# Running headscale in a container
|
||||
|
||||
**Note:** the container documentation is maintained by the _community_ and there is no guarentee
|
||||
it is up to date, or working.
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing a user how-to set up and run `headscale` in a container.
|
||||
[Docker](https://www.docker.com) is used as the reference container implementation, but there is no reason that it should
|
||||
not work with alternatives like [Podman](https://podman.io). The Docker image can be found on Docker Hub [here](https://hub.docker.com/r/headscale/headscale).
|
||||
|
||||
## Configure and run `headscale`
|
||||
|
||||
1. Prepare a directory on the host Docker node in your directory of choice, used to hold `headscale` configuration and the [SQLite](https://www.sqlite.org/) database:
|
||||
|
||||
```shell
|
||||
mkdir ./headscale && cd ./headscale
|
||||
mkdir ./config
|
||||
```
|
||||
|
||||
2. Create an empty SQlite datebase in the headscale directory:
|
||||
|
||||
```shell
|
||||
touch ./config/db.sqlite
|
||||
```
|
||||
|
||||
3. **(Strongly Recommended)** Download a copy of the [example configuration](../config-example.yaml) from the [headscale repository](https://github.com/juanfont/headscale/).
|
||||
|
||||
Using wget:
|
||||
|
||||
```shell
|
||||
wget -O ./config/config.yaml https://raw.githubusercontent.com/juanfont/headscale/main/config-example.yaml
|
||||
```
|
||||
|
||||
Using curl:
|
||||
|
||||
```shell
|
||||
curl https://raw.githubusercontent.com/juanfont/headscale/main/config-example.yaml -o ./config/config.yaml
|
||||
```
|
||||
|
||||
**(Advanced)** If you would like to hand craft a config file **instead** of downloading the example config file, create a blank `headscale` configuration in the headscale directory to edit:
|
||||
|
||||
```shell
|
||||
touch ./config/config.yaml
|
||||
```
|
||||
|
||||
Modify the config file to your preferences before launching Docker container.
|
||||
|
||||
4. Start the headscale server while working in the host headscale directory:
|
||||
|
||||
```shell
|
||||
docker run \
|
||||
--name headscale \
|
||||
--detach \
|
||||
--rm \
|
||||
--volume $(pwd)/config:/etc/headscale/ \
|
||||
--publish 127.0.0.1:8080:8080 \
|
||||
headscale/headscale:<VERSION> \
|
||||
headscale serve
|
||||
|
||||
```
|
||||
|
||||
This command will mount `config/` under `/etc/headscale`, forward port 8080 out of the container so the
|
||||
`headscale` instance becomes available and then detach so headscale runs in the background.
|
||||
|
||||
5. Verify `headscale` is running:
|
||||
|
||||
Follow the container logs:
|
||||
|
||||
```shell
|
||||
docker logs --follow headscale
|
||||
```
|
||||
|
||||
Verify running containers:
|
||||
|
||||
```shell
|
||||
docker ps
|
||||
```
|
||||
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:8080/metrics
|
||||
```
|
||||
|
||||
6. Create a namespace ([tailnet](https://tailscale.com/kb/1136/tailnet/)):
|
||||
|
||||
```shell
|
||||
docker exec headscale -- headscale namespaces create myfirstnamespace
|
||||
```
|
||||
|
||||
### Register a machine (normal login)
|
||||
|
||||
On a client machine, execute the `tailscale` login command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server YOUR_HEADSCALE_URL
|
||||
```
|
||||
|
||||
To register a machine when running `headscale` in a container, take the headscale command and pass it to the container:
|
||||
|
||||
```shell
|
||||
docker exec headscale -- \
|
||||
headscale --namespace myfirstnamespace nodes register --key <YOU_+MACHINE_KEY>
|
||||
```
|
||||
|
||||
### Register machine using a pre authenticated key
|
||||
|
||||
Generate a key using the command line:
|
||||
|
||||
```shell
|
||||
docker exec headscale -- \
|
||||
headscale --namespace myfirstnamespace preauthkeys create --reusable --expiration 24h
|
||||
```
|
||||
|
||||
This will return a pre-authenticated key that can be used to connect a node to `headscale` during the `tailscale` command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
## Debugging headscale running in Docker
|
||||
|
||||
The `headscale/headscale` Docker container is based on a "distroless" image that does not contain a shell or any other debug tools. If you need to debug your application running in the Docker container, you can use the `-debug` variant, for example `headscale/headscale:x.x.x-debug`.
|
||||
|
||||
### Running the debug Docker container
|
||||
|
||||
To run the debug Docker container, use the exact same commands as above, but replace `headscale/headscale:x.x.x` with `headscale/headscale:x.x.x-debug` (`x.x.x` is the version of headscale). The two containers are compatible with each other, so you can alternate between them.
|
||||
|
||||
### Executing commands in the debug container
|
||||
|
||||
The default command in the debug container is to run `headscale`, which is located at `/bin/headscale` inside the container.
|
||||
|
||||
Additionally, the debug container includes a minimalist Busybox shell.
|
||||
|
||||
To launch a shell in the container, use:
|
||||
|
||||
```
|
||||
docker run -it headscale/headscale:x.x.x-debug sh
|
||||
```
|
||||
|
||||
You can also execute commands directly, such as `ls /bin` in this example:
|
||||
|
||||
```
|
||||
docker run headscale/headscale:x.x.x-debug ls /bin
|
||||
```
|
||||
|
||||
Using `docker exec` allows you to run commands in an existing container.
|
||||
184
docs/running-headscale-linux.md
Normal file
184
docs/running-headscale-linux.md
Normal file
@@ -0,0 +1,184 @@
|
||||
# Running headscale on Linux
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing a user how-to set up and run `headscale` on Linux.
|
||||
In additional to the "get up and running section", there is an optional [SystemD section](#running-headscale-in-the-background-with-systemd)
|
||||
describing how to make `headscale` run properly in a server environment.
|
||||
|
||||
## Configure and run `headscale`
|
||||
|
||||
1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases):
|
||||
|
||||
```shell
|
||||
wget --output-document=/usr/local/bin/headscale \
|
||||
https://github.com/juanfont/headscale/releases/download/v<HEADSCALE VERSION>/headscale_<HEADSCALE VERSION>_linux_<ARCH>
|
||||
```
|
||||
|
||||
2. Make `headscale` executable:
|
||||
|
||||
```shell
|
||||
chmod +x /usr/local/bin/headscale
|
||||
```
|
||||
|
||||
3. Prepare a directory to hold `headscale` configuration and the [SQLite](https://www.sqlite.org/) database:
|
||||
|
||||
```shell
|
||||
# Directory for configuration
|
||||
|
||||
mkdir -p /etc/headscale
|
||||
|
||||
# Directory for Database, and other variable data (like certificates)
|
||||
mkdir -p /var/lib/headscale
|
||||
```
|
||||
|
||||
4. Create an empty SQLite database:
|
||||
|
||||
```shell
|
||||
touch /var/lib/headscale/db.sqlite
|
||||
```
|
||||
|
||||
5. Create a `headscale` configuration:
|
||||
|
||||
```shell
|
||||
touch /etc/headscale/config.yaml
|
||||
```
|
||||
|
||||
It is **strongly recommended** to copy and modify the [example configuration](../config-example.yaml)
|
||||
from the [headscale repository](../)
|
||||
|
||||
6. Start the headscale server:
|
||||
|
||||
```shell
|
||||
headscale serve
|
||||
```
|
||||
|
||||
This command will start `headscale` in the current terminal session.
|
||||
|
||||
---
|
||||
|
||||
To continue the tutorial, open a new terminal and let it run in the background.
|
||||
Alternatively use terminal emulators like [tmux](https://github.com/tmux/tmux) or [screen](https://www.gnu.org/software/screen/).
|
||||
|
||||
To run `headscale` in the background, please follow the steps in the [SystemD section](#running-headscale-in-the-background-with-systemd) before continuing.
|
||||
|
||||
7. Verify `headscale` is running:
|
||||
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:8080/metrics
|
||||
```
|
||||
|
||||
8. Create a namespace ([tailnet](https://tailscale.com/kb/1136/tailnet/)):
|
||||
|
||||
```shell
|
||||
headscale namespaces create myfirstnamespace
|
||||
```
|
||||
|
||||
### Register a machine (normal login)
|
||||
|
||||
On a client machine, execute the `tailscale` login command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server YOUR_HEADSCALE_URL
|
||||
```
|
||||
|
||||
Register the machine:
|
||||
|
||||
```shell
|
||||
headscale --namespace myfirstnamespace nodes register --key <YOU_+MACHINE_KEY>
|
||||
```
|
||||
|
||||
### Register machine using a pre authenticated key
|
||||
|
||||
Generate a key using the command line:
|
||||
|
||||
```shell
|
||||
headscale --namespace myfirstnamespace preauthkeys create --reusable --expiration 24h
|
||||
```
|
||||
|
||||
This will return a pre-authenticated key that can be used to connect a node to `headscale` during the `tailscale` command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
## Running `headscale` in the background with SystemD
|
||||
|
||||
This section demonstrates how to run `headscale` as a service in the background with [SystemD](https://www.freedesktop.org/wiki/Software/systemd/).
|
||||
This should work on most modern Linux distributions.
|
||||
|
||||
1. Create a SystemD service configuration at `/etc/systemd/system/headscale.service` containing:
|
||||
|
||||
```systemd
|
||||
[Unit]
|
||||
Description=headscale controller
|
||||
After=syslog.target
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=headscale
|
||||
Group=headscale
|
||||
ExecStart=/usr/local/bin/headscale serve
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
# Optional security enhancements
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
ReadWritePaths=/var/lib/headscale /var/run/headscale
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
RuntimeDirectory=headscale
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
Note that when running as the headscale user ensure that, either you add your current user to the headscale group:
|
||||
|
||||
```shell
|
||||
usermod -a -G headscale current_user
|
||||
```
|
||||
|
||||
or run all headscale commands as the headscale user:
|
||||
|
||||
```shell
|
||||
su - headscale
|
||||
```
|
||||
|
||||
2. In `/etc/headscale/config.yaml`, override the default `headscale` unix socket with a SystemD friendly path:
|
||||
|
||||
```yaml
|
||||
unix_socket: /var/run/headscale/headscale.sock
|
||||
```
|
||||
|
||||
3. Reload SystemD to load the new configuration file:
|
||||
|
||||
```shell
|
||||
systemctl daemon-reload
|
||||
```
|
||||
|
||||
4. Enable and start the new `headscale` service:
|
||||
|
||||
```shell
|
||||
systemctl enable headscale
|
||||
systemctl start headscale
|
||||
```
|
||||
|
||||
5. Verify the headscale service:
|
||||
|
||||
```shell
|
||||
systemctl status headscale
|
||||
```
|
||||
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:8080/metrics
|
||||
```
|
||||
|
||||
`headscale` will now run in the background and start at boot.
|
||||
31
docs/tls.md
Normal file
31
docs/tls.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# Running the service via TLS (optional)
|
||||
|
||||
## Let's Encrypt / ACME
|
||||
|
||||
To get a certificate automatically via [Let's Encrypt](https://letsencrypt.org/), set `tls_letsencrypt_hostname` to the desired certificate hostname. This name must resolve to the IP address(es) headscale is reachable on (i.e., it must correspond to the `server_url` configuration parameter). The certificate and Let's Encrypt account credentials will be stored in the directory configured in `tls_letsencrypt_cache_dir`. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from. The certificate will automatically be renewed as needed.
|
||||
|
||||
```yaml
|
||||
tls_letsencrypt_hostname: ""
|
||||
tls_letsencrypt_listen: ":http"
|
||||
tls_letsencrypt_cache_dir: ".cache"
|
||||
tls_letsencrypt_challenge_type: HTTP-01
|
||||
```
|
||||
|
||||
### Challenge type HTTP-01
|
||||
|
||||
The default challenge type `HTTP-01` requires that headscale is reachable on port 80 for the Let's Encrypt automated validation, in addition to whatever port is configured in `listen_addr`. By default, headscale listens on port 80 on all local IPs for Let's Encrypt automated validation.
|
||||
|
||||
If you need to change the ip and/or port used by headscale for the Let's Encrypt validation process, set `tls_letsencrypt_listen` to the appropriate value. This can be handy if you are running headscale as a non-root user (or can't run `setcap`). Keep in mind, however, that Let's Encrypt will _only_ connect to port 80 for the validation callback, so if you change `tls_letsencrypt_listen` you will also need to configure something else (e.g. a firewall rule) to forward the traffic from port 80 to the ip:port combination specified in `tls_letsencrypt_listen`.
|
||||
|
||||
### Challenge type TLS-ALPN-01
|
||||
|
||||
Alternatively, `tls_letsencrypt_challenge_type` can be set to `TLS-ALPN-01`. In this configuration, headscale listens on the ip:port combination defined in `listen_addr`. Let's Encrypt will _only_ connect to port 443 for the validation callback, so if `listen_addr` is not set to port 443, something else (e.g. a firewall rule) will be required to forward the traffic from port 443 to the ip:port combination specified in `listen_addr`.
|
||||
|
||||
## Bring your own certificate
|
||||
|
||||
headscale can also be configured to expose its web service via TLS. To configure the certificate and key file manually, set the `tls_cert_path` and `tls_cert_path` configuration parameters. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.
|
||||
|
||||
```yaml
|
||||
tls_cert_path: ""
|
||||
tls_key_path: ""
|
||||
```
|
||||
50
docs/windows-client.md
Normal file
50
docs/windows-client.md
Normal file
@@ -0,0 +1,50 @@
|
||||
# Connecting a Windows client
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing how a user can use the official Windows [Tailscale](https://tailscale.com) client with `headscale`.
|
||||
|
||||
## Add registry keys
|
||||
|
||||
To make the Windows client behave as expected and to run well with `headscale`, two registry keys **must** be set:
|
||||
|
||||
- `HKLM:\SOFTWARE\Tailscale IPN\UnattendedMode` must be set to `always` as a `string` type, to allow Tailscale to run properly in the background
|
||||
- `HKLM:\SOFTWARE\Tailscale IPN\LoginURL` must be set to `<YOUR HEADSCALE URL>` as a `string` type, to ensure Tailscale contacts the correct control server.
|
||||
|
||||

|
||||
|
||||
The Tailscale Windows client has been observed to reset its configuration on logout/reboot and these two keys [resolves that issue](https://github.com/tailscale/tailscale/issues/2798).
|
||||
|
||||
For a guide on how to edit registry keys, [check out Computer Hope](https://www.computerhope.com/issues/ch001348.htm).
|
||||
|
||||
## Installation
|
||||
|
||||
Download the [Official Windows Client](https://tailscale.com/download/windows) and install it.
|
||||
|
||||
When the installation has finished, start Tailscale and log in (you might have to click the icon in the system tray).
|
||||
|
||||
The log in should open a browser Window and direct you to your `headscale` instance.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you are seeing repeated messages like:
|
||||
|
||||
```
|
||||
[GIN] 2022/02/10 - 16:39:34 | 200 | 1.105306ms | 127.0.0.1 | POST "/machine/redacted"
|
||||
```
|
||||
|
||||
in your `headscale` output, turn on `DEBUG` logging and look for:
|
||||
|
||||
```
|
||||
2022-02-11T00:59:29Z DBG Machine registration has expired. Sending a authurl to register machine=redacted
|
||||
```
|
||||
|
||||
This typically means that the registry keys above was not set appropriately.
|
||||
|
||||
To reset and try again, it is important to do the following:
|
||||
|
||||
1. Ensure the registry keys from the previous guide is correctly set.
|
||||
2. Shut down the Tailscale service (or the client running in the tray)
|
||||
3. Delete Tailscale Application data folder, located at `C:\Users\<USERNAME>\AppData\Local\Tailscale` and try to connect again.
|
||||
4. Ensure the Windows node is deleted from headscale (to ensure fresh setup)
|
||||
5. Start Tailscale on the windows machine and retry the login.
|
||||
559
gen/go/headscale/v1/apikey.pb.go
Normal file
559
gen/go/headscale/v1/apikey.pb.go
Normal file
@@ -0,0 +1,559 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/apikey.proto
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type ApiKey struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
LastSeen *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=last_seen,json=lastSeen,proto3" json:"last_seen,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ApiKey) Reset() {
|
||||
*x = ApiKey{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_apikey_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ApiKey) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ApiKey) ProtoMessage() {}
|
||||
|
||||
func (x *ApiKey) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_apikey_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ApiKey.ProtoReflect.Descriptor instead.
|
||||
func (*ApiKey) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_apikey_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *ApiKey) GetId() uint64 {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *ApiKey) GetPrefix() string {
|
||||
if x != nil {
|
||||
return x.Prefix
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ApiKey) GetExpiration() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.Expiration
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ApiKey) GetCreatedAt() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.CreatedAt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ApiKey) GetLastSeen() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.LastSeen
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type CreateApiKeyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreateApiKeyRequest) Reset() {
|
||||
*x = CreateApiKeyRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_apikey_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *CreateApiKeyRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*CreateApiKeyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *CreateApiKeyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_apikey_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use CreateApiKeyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*CreateApiKeyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_apikey_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *CreateApiKeyRequest) GetExpiration() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.Expiration
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type CreateApiKeyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ApiKey string `protobuf:"bytes,1,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreateApiKeyResponse) Reset() {
|
||||
*x = CreateApiKeyResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_apikey_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *CreateApiKeyResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*CreateApiKeyResponse) ProtoMessage() {}
|
||||
|
||||
func (x *CreateApiKeyResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_apikey_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use CreateApiKeyResponse.ProtoReflect.Descriptor instead.
|
||||
func (*CreateApiKeyResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_apikey_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *CreateApiKeyResponse) GetApiKey() string {
|
||||
if x != nil {
|
||||
return x.ApiKey
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ExpireApiKeyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ExpireApiKeyRequest) Reset() {
|
||||
*x = ExpireApiKeyRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_apikey_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ExpireApiKeyRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ExpireApiKeyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ExpireApiKeyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_apikey_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ExpireApiKeyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ExpireApiKeyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_apikey_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *ExpireApiKeyRequest) GetPrefix() string {
|
||||
if x != nil {
|
||||
return x.Prefix
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ExpireApiKeyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *ExpireApiKeyResponse) Reset() {
|
||||
*x = ExpireApiKeyResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_apikey_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ExpireApiKeyResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ExpireApiKeyResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ExpireApiKeyResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_apikey_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ExpireApiKeyResponse.ProtoReflect.Descriptor instead.
|
||||
func (*ExpireApiKeyResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_apikey_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
type ListApiKeysRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *ListApiKeysRequest) Reset() {
|
||||
*x = ListApiKeysRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_apikey_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ListApiKeysRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ListApiKeysRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ListApiKeysRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_apikey_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ListApiKeysRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ListApiKeysRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_apikey_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
type ListApiKeysResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
ApiKeys []*ApiKey `protobuf:"bytes,1,rep,name=api_keys,json=apiKeys,proto3" json:"api_keys,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ListApiKeysResponse) Reset() {
|
||||
*x = ListApiKeysResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_apikey_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ListApiKeysResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ListApiKeysResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ListApiKeysResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_apikey_proto_msgTypes[6]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ListApiKeysResponse.ProtoReflect.Descriptor instead.
|
||||
func (*ListApiKeysResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_apikey_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *ListApiKeysResponse) GetApiKeys() []*ApiKey {
|
||||
if x != nil {
|
||||
return x.ApiKeys
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_headscale_v1_apikey_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_headscale_v1_apikey_proto_rawDesc = []byte{
|
||||
0x0a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x61,
|
||||
0x70, 0x69, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe0, 0x01, 0x0a, 0x06, 0x41,
|
||||
0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
|
||||
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x3a, 0x0a,
|
||||
0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65,
|
||||
0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65,
|
||||
0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
|
||||
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74,
|
||||
0x65, 0x64, 0x41, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x65,
|
||||
0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
|
||||
0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x22, 0x51, 0x0a,
|
||||
0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
|
||||
0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x22, 0x2f, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x61, 0x70, 0x69, 0x5f,
|
||||
0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x70, 0x69, 0x4b, 0x65,
|
||||
0x79, 0x22, 0x2d, 0x0a, 0x13, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65,
|
||||
0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66,
|
||||
0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78,
|
||||
0x22, 0x16, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74,
|
||||
0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x46,
|
||||
0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x61, 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79,
|
||||
0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x07, 0x61,
|
||||
0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
|
||||
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76,
|
||||
0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_headscale_v1_apikey_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_apikey_proto_rawDescData = file_headscale_v1_apikey_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_headscale_v1_apikey_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_apikey_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_apikey_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_apikey_proto_rawDescData)
|
||||
})
|
||||
return file_headscale_v1_apikey_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_headscale_v1_apikey_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
|
||||
var file_headscale_v1_apikey_proto_goTypes = []interface{}{
|
||||
(*ApiKey)(nil), // 0: headscale.v1.ApiKey
|
||||
(*CreateApiKeyRequest)(nil), // 1: headscale.v1.CreateApiKeyRequest
|
||||
(*CreateApiKeyResponse)(nil), // 2: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyRequest)(nil), // 3: headscale.v1.ExpireApiKeyRequest
|
||||
(*ExpireApiKeyResponse)(nil), // 4: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysRequest)(nil), // 5: headscale.v1.ListApiKeysRequest
|
||||
(*ListApiKeysResponse)(nil), // 6: headscale.v1.ListApiKeysResponse
|
||||
(*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp
|
||||
}
|
||||
var file_headscale_v1_apikey_proto_depIdxs = []int32{
|
||||
7, // 0: headscale.v1.ApiKey.expiration:type_name -> google.protobuf.Timestamp
|
||||
7, // 1: headscale.v1.ApiKey.created_at:type_name -> google.protobuf.Timestamp
|
||||
7, // 2: headscale.v1.ApiKey.last_seen:type_name -> google.protobuf.Timestamp
|
||||
7, // 3: headscale.v1.CreateApiKeyRequest.expiration:type_name -> google.protobuf.Timestamp
|
||||
0, // 4: headscale.v1.ListApiKeysResponse.api_keys:type_name -> headscale.v1.ApiKey
|
||||
5, // [5:5] is the sub-list for method output_type
|
||||
5, // [5:5] is the sub-list for method input_type
|
||||
5, // [5:5] is the sub-list for extension type_name
|
||||
5, // [5:5] is the sub-list for extension extendee
|
||||
0, // [0:5] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_apikey_proto_init() }
|
||||
func file_headscale_v1_apikey_proto_init() {
|
||||
if File_headscale_v1_apikey_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_headscale_v1_apikey_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ApiKey); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_apikey_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*CreateApiKeyRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_apikey_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*CreateApiKeyResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_apikey_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ExpireApiKeyRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_apikey_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ExpireApiKeyResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_apikey_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListApiKeysRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_apikey_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListApiKeysResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_apikey_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 7,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_headscale_v1_apikey_proto_goTypes,
|
||||
DependencyIndexes: file_headscale_v1_apikey_proto_depIdxs,
|
||||
MessageInfos: file_headscale_v1_apikey_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_apikey_proto = out.File
|
||||
file_headscale_v1_apikey_proto_rawDesc = nil
|
||||
file_headscale_v1_apikey_proto_goTypes = nil
|
||||
file_headscale_v1_apikey_proto_depIdxs = nil
|
||||
}
|
||||
1116
gen/go/headscale/v1/device.pb.go
Normal file
1116
gen/go/headscale/v1/device.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
340
gen/go/headscale/v1/headscale.pb.go
Normal file
340
gen/go/headscale/v1/headscale.pb.go
Normal file
@@ -0,0 +1,340 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/headscale.proto
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
var File_headscale_v1_headscale_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_headscale_v1_headscale_proto_rawDesc = []byte{
|
||||
0x0a, 0x1c, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
|
||||
0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65,
|
||||
0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69,
|
||||
0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xcb, 0x15, 0x0a, 0x10, 0x48, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x77,
|
||||
0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65,
|
||||
0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x12, 0x18, 0x2f,
|
||||
0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
|
||||
0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x7c, 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74,
|
||||
0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
|
||||
0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x16, 0x22,
|
||||
0x11, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
|
||||
0x63, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x96, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4e,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52,
|
||||
0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30, 0x22, 0x2e,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63,
|
||||
0x65, 0x2f, 0x7b, 0x6f, 0x6c, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x2f, 0x7b, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x80,
|
||||
0x01, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
|
||||
0x63, 0x65, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63,
|
||||
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x61,
|
||||
0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x2a, 0x18, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x7d, 0x12, 0x76, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
|
||||
0x63, 0x65, 0x73, 0x12, 0x23, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
|
||||
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65,
|
||||
0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19,
|
||||
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x13, 0x12, 0x11, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x80, 0x01, 0x0a, 0x10, 0x43, 0x72,
|
||||
0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x12, 0x25,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72,
|
||||
0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75,
|
||||
0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82,
|
||||
0xd3, 0xe4, 0x93, 0x02, 0x17, 0x22, 0x12, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70,
|
||||
0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x87, 0x01, 0x0a,
|
||||
0x10, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65,
|
||||
0x79, 0x12, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65,
|
||||
0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72,
|
||||
0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x22, 0x19, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x2f, 0x65, 0x78, 0x70,
|
||||
0x69, 0x72, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x7a, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72,
|
||||
0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x65,
|
||||
0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c,
|
||||
0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x12, 0x12,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b,
|
||||
0x65, 0x79, 0x12, 0x89, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61,
|
||||
0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x27, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x72,
|
||||
0x65, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x28, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3,
|
||||
0xe4, 0x93, 0x02, 0x1a, 0x22, 0x15, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65,
|
||||
0x62, 0x75, 0x67, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x75,
|
||||
0x0a, 0x0a, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x1f, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74,
|
||||
0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x80, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74,
|
||||
0x65, 0x72, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65,
|
||||
0x72, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52,
|
||||
0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x22, 0x18,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f,
|
||||
0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x12, 0x7e, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65,
|
||||
0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c,
|
||||
0x65, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x2a, 0x1c, 0x2f, 0x61, 0x70, 0x69,
|
||||
0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x85, 0x01, 0x0a, 0x0d, 0x45, 0x78, 0x70,
|
||||
0x69, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x22, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65,
|
||||
0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78,
|
||||
0x70, 0x69, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x22, 0x23, 0x2f, 0x61, 0x70,
|
||||
0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65,
|
||||
0x12, 0x6e, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73,
|
||||
0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x17, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x11, 0x12,
|
||||
0x0f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x12, 0x8d, 0x01, 0x0a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x53, 0x68, 0x61, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30,
|
||||
0x22, 0x2e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x73,
|
||||
0x68, 0x61, 0x72, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d,
|
||||
0x12, 0x95, 0x01, 0x0a, 0x0e, 0x55, 0x6e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68,
|
||||
0x69, 0x6e, 0x65, 0x12, 0x23, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x55, 0x6e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x6e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x4d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38,
|
||||
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x22, 0x30, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f,
|
||||
0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x75, 0x6e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x2f, 0x7b, 0x6e, 0x61,
|
||||
0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x12, 0x8b, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74,
|
||||
0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x24, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02,
|
||||
0x25, 0x12, 0x23, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f,
|
||||
0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x97, 0x01, 0x0a, 0x13, 0x45, 0x6e, 0x61, 0x62, 0x6c,
|
||||
0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x28,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e,
|
||||
0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x22, 0x23, 0x2f, 0x61, 0x70,
|
||||
0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x12, 0x70, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79,
|
||||
0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x13, 0x22,
|
||||
0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x3a,
|
||||
0x01, 0x2a, 0x12, 0x77, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b,
|
||||
0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65,
|
||||
0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02,
|
||||
0x1a, 0x22, 0x15, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65,
|
||||
0x79, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x6a, 0x0a, 0x0b, 0x4c,
|
||||
0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70,
|
||||
0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74,
|
||||
0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75,
|
||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f,
|
||||
0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var file_headscale_v1_headscale_proto_goTypes = []interface{}{
|
||||
(*GetNamespaceRequest)(nil), // 0: headscale.v1.GetNamespaceRequest
|
||||
(*CreateNamespaceRequest)(nil), // 1: headscale.v1.CreateNamespaceRequest
|
||||
(*RenameNamespaceRequest)(nil), // 2: headscale.v1.RenameNamespaceRequest
|
||||
(*DeleteNamespaceRequest)(nil), // 3: headscale.v1.DeleteNamespaceRequest
|
||||
(*ListNamespacesRequest)(nil), // 4: headscale.v1.ListNamespacesRequest
|
||||
(*CreatePreAuthKeyRequest)(nil), // 5: headscale.v1.CreatePreAuthKeyRequest
|
||||
(*ExpirePreAuthKeyRequest)(nil), // 6: headscale.v1.ExpirePreAuthKeyRequest
|
||||
(*ListPreAuthKeysRequest)(nil), // 7: headscale.v1.ListPreAuthKeysRequest
|
||||
(*DebugCreateMachineRequest)(nil), // 8: headscale.v1.DebugCreateMachineRequest
|
||||
(*GetMachineRequest)(nil), // 9: headscale.v1.GetMachineRequest
|
||||
(*RegisterMachineRequest)(nil), // 10: headscale.v1.RegisterMachineRequest
|
||||
(*DeleteMachineRequest)(nil), // 11: headscale.v1.DeleteMachineRequest
|
||||
(*ExpireMachineRequest)(nil), // 12: headscale.v1.ExpireMachineRequest
|
||||
(*ListMachinesRequest)(nil), // 13: headscale.v1.ListMachinesRequest
|
||||
(*ShareMachineRequest)(nil), // 14: headscale.v1.ShareMachineRequest
|
||||
(*UnshareMachineRequest)(nil), // 15: headscale.v1.UnshareMachineRequest
|
||||
(*GetMachineRouteRequest)(nil), // 16: headscale.v1.GetMachineRouteRequest
|
||||
(*EnableMachineRoutesRequest)(nil), // 17: headscale.v1.EnableMachineRoutesRequest
|
||||
(*CreateApiKeyRequest)(nil), // 18: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 19: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 20: headscale.v1.ListApiKeysRequest
|
||||
(*GetNamespaceResponse)(nil), // 21: headscale.v1.GetNamespaceResponse
|
||||
(*CreateNamespaceResponse)(nil), // 22: headscale.v1.CreateNamespaceResponse
|
||||
(*RenameNamespaceResponse)(nil), // 23: headscale.v1.RenameNamespaceResponse
|
||||
(*DeleteNamespaceResponse)(nil), // 24: headscale.v1.DeleteNamespaceResponse
|
||||
(*ListNamespacesResponse)(nil), // 25: headscale.v1.ListNamespacesResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 26: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 27: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 28: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateMachineResponse)(nil), // 29: headscale.v1.DebugCreateMachineResponse
|
||||
(*GetMachineResponse)(nil), // 30: headscale.v1.GetMachineResponse
|
||||
(*RegisterMachineResponse)(nil), // 31: headscale.v1.RegisterMachineResponse
|
||||
(*DeleteMachineResponse)(nil), // 32: headscale.v1.DeleteMachineResponse
|
||||
(*ExpireMachineResponse)(nil), // 33: headscale.v1.ExpireMachineResponse
|
||||
(*ListMachinesResponse)(nil), // 34: headscale.v1.ListMachinesResponse
|
||||
(*ShareMachineResponse)(nil), // 35: headscale.v1.ShareMachineResponse
|
||||
(*UnshareMachineResponse)(nil), // 36: headscale.v1.UnshareMachineResponse
|
||||
(*GetMachineRouteResponse)(nil), // 37: headscale.v1.GetMachineRouteResponse
|
||||
(*EnableMachineRoutesResponse)(nil), // 38: headscale.v1.EnableMachineRoutesResponse
|
||||
(*CreateApiKeyResponse)(nil), // 39: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 40: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 41: headscale.v1.ListApiKeysResponse
|
||||
}
|
||||
var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
0, // 0: headscale.v1.HeadscaleService.GetNamespace:input_type -> headscale.v1.GetNamespaceRequest
|
||||
1, // 1: headscale.v1.HeadscaleService.CreateNamespace:input_type -> headscale.v1.CreateNamespaceRequest
|
||||
2, // 2: headscale.v1.HeadscaleService.RenameNamespace:input_type -> headscale.v1.RenameNamespaceRequest
|
||||
3, // 3: headscale.v1.HeadscaleService.DeleteNamespace:input_type -> headscale.v1.DeleteNamespaceRequest
|
||||
4, // 4: headscale.v1.HeadscaleService.ListNamespaces:input_type -> headscale.v1.ListNamespacesRequest
|
||||
5, // 5: headscale.v1.HeadscaleService.CreatePreAuthKey:input_type -> headscale.v1.CreatePreAuthKeyRequest
|
||||
6, // 6: headscale.v1.HeadscaleService.ExpirePreAuthKey:input_type -> headscale.v1.ExpirePreAuthKeyRequest
|
||||
7, // 7: headscale.v1.HeadscaleService.ListPreAuthKeys:input_type -> headscale.v1.ListPreAuthKeysRequest
|
||||
8, // 8: headscale.v1.HeadscaleService.DebugCreateMachine:input_type -> headscale.v1.DebugCreateMachineRequest
|
||||
9, // 9: headscale.v1.HeadscaleService.GetMachine:input_type -> headscale.v1.GetMachineRequest
|
||||
10, // 10: headscale.v1.HeadscaleService.RegisterMachine:input_type -> headscale.v1.RegisterMachineRequest
|
||||
11, // 11: headscale.v1.HeadscaleService.DeleteMachine:input_type -> headscale.v1.DeleteMachineRequest
|
||||
12, // 12: headscale.v1.HeadscaleService.ExpireMachine:input_type -> headscale.v1.ExpireMachineRequest
|
||||
13, // 13: headscale.v1.HeadscaleService.ListMachines:input_type -> headscale.v1.ListMachinesRequest
|
||||
14, // 14: headscale.v1.HeadscaleService.ShareMachine:input_type -> headscale.v1.ShareMachineRequest
|
||||
15, // 15: headscale.v1.HeadscaleService.UnshareMachine:input_type -> headscale.v1.UnshareMachineRequest
|
||||
16, // 16: headscale.v1.HeadscaleService.GetMachineRoute:input_type -> headscale.v1.GetMachineRouteRequest
|
||||
17, // 17: headscale.v1.HeadscaleService.EnableMachineRoutes:input_type -> headscale.v1.EnableMachineRoutesRequest
|
||||
18, // 18: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
19, // 19: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
20, // 20: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
21, // 21: headscale.v1.HeadscaleService.GetNamespace:output_type -> headscale.v1.GetNamespaceResponse
|
||||
22, // 22: headscale.v1.HeadscaleService.CreateNamespace:output_type -> headscale.v1.CreateNamespaceResponse
|
||||
23, // 23: headscale.v1.HeadscaleService.RenameNamespace:output_type -> headscale.v1.RenameNamespaceResponse
|
||||
24, // 24: headscale.v1.HeadscaleService.DeleteNamespace:output_type -> headscale.v1.DeleteNamespaceResponse
|
||||
25, // 25: headscale.v1.HeadscaleService.ListNamespaces:output_type -> headscale.v1.ListNamespacesResponse
|
||||
26, // 26: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
27, // 27: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
28, // 28: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
29, // 29: headscale.v1.HeadscaleService.DebugCreateMachine:output_type -> headscale.v1.DebugCreateMachineResponse
|
||||
30, // 30: headscale.v1.HeadscaleService.GetMachine:output_type -> headscale.v1.GetMachineResponse
|
||||
31, // 31: headscale.v1.HeadscaleService.RegisterMachine:output_type -> headscale.v1.RegisterMachineResponse
|
||||
32, // 32: headscale.v1.HeadscaleService.DeleteMachine:output_type -> headscale.v1.DeleteMachineResponse
|
||||
33, // 33: headscale.v1.HeadscaleService.ExpireMachine:output_type -> headscale.v1.ExpireMachineResponse
|
||||
34, // 34: headscale.v1.HeadscaleService.ListMachines:output_type -> headscale.v1.ListMachinesResponse
|
||||
35, // 35: headscale.v1.HeadscaleService.ShareMachine:output_type -> headscale.v1.ShareMachineResponse
|
||||
36, // 36: headscale.v1.HeadscaleService.UnshareMachine:output_type -> headscale.v1.UnshareMachineResponse
|
||||
37, // 37: headscale.v1.HeadscaleService.GetMachineRoute:output_type -> headscale.v1.GetMachineRouteResponse
|
||||
38, // 38: headscale.v1.HeadscaleService.EnableMachineRoutes:output_type -> headscale.v1.EnableMachineRoutesResponse
|
||||
39, // 39: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
40, // 40: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
41, // 41: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
21, // [21:42] is the sub-list for method output_type
|
||||
0, // [0:21] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_headscale_proto_init() }
|
||||
func file_headscale_v1_headscale_proto_init() {
|
||||
if File_headscale_v1_headscale_proto != nil {
|
||||
return
|
||||
}
|
||||
file_headscale_v1_namespace_proto_init()
|
||||
file_headscale_v1_preauthkey_proto_init()
|
||||
file_headscale_v1_machine_proto_init()
|
||||
file_headscale_v1_routes_proto_init()
|
||||
file_headscale_v1_apikey_proto_init()
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_headscale_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_headscale_v1_headscale_proto_goTypes,
|
||||
DependencyIndexes: file_headscale_v1_headscale_proto_depIdxs,
|
||||
}.Build()
|
||||
File_headscale_v1_headscale_proto = out.File
|
||||
file_headscale_v1_headscale_proto_rawDesc = nil
|
||||
file_headscale_v1_headscale_proto_goTypes = nil
|
||||
file_headscale_v1_headscale_proto_depIdxs = nil
|
||||
}
|
||||
2019
gen/go/headscale/v1/headscale.pb.gw.go
Normal file
2019
gen/go/headscale/v1/headscale.pb.gw.go
Normal file
File diff suppressed because it is too large
Load Diff
832
gen/go/headscale/v1/headscale_grpc.pb.go
Normal file
832
gen/go/headscale/v1/headscale_grpc.pb.go
Normal file
@@ -0,0 +1,832 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// HeadscaleServiceClient is the client API for HeadscaleService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type HeadscaleServiceClient interface {
|
||||
// --- Namespace start ---
|
||||
GetNamespace(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error)
|
||||
CreateNamespace(ctx context.Context, in *CreateNamespaceRequest, opts ...grpc.CallOption) (*CreateNamespaceResponse, error)
|
||||
RenameNamespace(ctx context.Context, in *RenameNamespaceRequest, opts ...grpc.CallOption) (*RenameNamespaceResponse, error)
|
||||
DeleteNamespace(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*DeleteNamespaceResponse, error)
|
||||
ListNamespaces(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error)
|
||||
// --- PreAuthKeys start ---
|
||||
CreatePreAuthKey(ctx context.Context, in *CreatePreAuthKeyRequest, opts ...grpc.CallOption) (*CreatePreAuthKeyResponse, error)
|
||||
ExpirePreAuthKey(ctx context.Context, in *ExpirePreAuthKeyRequest, opts ...grpc.CallOption) (*ExpirePreAuthKeyResponse, error)
|
||||
ListPreAuthKeys(ctx context.Context, in *ListPreAuthKeysRequest, opts ...grpc.CallOption) (*ListPreAuthKeysResponse, error)
|
||||
// --- Machine start ---
|
||||
DebugCreateMachine(ctx context.Context, in *DebugCreateMachineRequest, opts ...grpc.CallOption) (*DebugCreateMachineResponse, error)
|
||||
GetMachine(ctx context.Context, in *GetMachineRequest, opts ...grpc.CallOption) (*GetMachineResponse, error)
|
||||
RegisterMachine(ctx context.Context, in *RegisterMachineRequest, opts ...grpc.CallOption) (*RegisterMachineResponse, error)
|
||||
DeleteMachine(ctx context.Context, in *DeleteMachineRequest, opts ...grpc.CallOption) (*DeleteMachineResponse, error)
|
||||
ExpireMachine(ctx context.Context, in *ExpireMachineRequest, opts ...grpc.CallOption) (*ExpireMachineResponse, error)
|
||||
ListMachines(ctx context.Context, in *ListMachinesRequest, opts ...grpc.CallOption) (*ListMachinesResponse, error)
|
||||
ShareMachine(ctx context.Context, in *ShareMachineRequest, opts ...grpc.CallOption) (*ShareMachineResponse, error)
|
||||
UnshareMachine(ctx context.Context, in *UnshareMachineRequest, opts ...grpc.CallOption) (*UnshareMachineResponse, error)
|
||||
// --- Route start ---
|
||||
GetMachineRoute(ctx context.Context, in *GetMachineRouteRequest, opts ...grpc.CallOption) (*GetMachineRouteResponse, error)
|
||||
EnableMachineRoutes(ctx context.Context, in *EnableMachineRoutesRequest, opts ...grpc.CallOption) (*EnableMachineRoutesResponse, error)
|
||||
// --- ApiKeys start ---
|
||||
CreateApiKey(ctx context.Context, in *CreateApiKeyRequest, opts ...grpc.CallOption) (*CreateApiKeyResponse, error)
|
||||
ExpireApiKey(ctx context.Context, in *ExpireApiKeyRequest, opts ...grpc.CallOption) (*ExpireApiKeyResponse, error)
|
||||
ListApiKeys(ctx context.Context, in *ListApiKeysRequest, opts ...grpc.CallOption) (*ListApiKeysResponse, error)
|
||||
}
|
||||
|
||||
type headscaleServiceClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewHeadscaleServiceClient(cc grpc.ClientConnInterface) HeadscaleServiceClient {
|
||||
return &headscaleServiceClient{cc}
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) GetNamespace(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error) {
|
||||
out := new(GetNamespaceResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/GetNamespace", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) CreateNamespace(ctx context.Context, in *CreateNamespaceRequest, opts ...grpc.CallOption) (*CreateNamespaceResponse, error) {
|
||||
out := new(CreateNamespaceResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/CreateNamespace", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) RenameNamespace(ctx context.Context, in *RenameNamespaceRequest, opts ...grpc.CallOption) (*RenameNamespaceResponse, error) {
|
||||
out := new(RenameNamespaceResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/RenameNamespace", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DeleteNamespace(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*DeleteNamespaceResponse, error) {
|
||||
out := new(DeleteNamespaceResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/DeleteNamespace", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ListNamespaces(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error) {
|
||||
out := new(ListNamespacesResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ListNamespaces", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) CreatePreAuthKey(ctx context.Context, in *CreatePreAuthKeyRequest, opts ...grpc.CallOption) (*CreatePreAuthKeyResponse, error) {
|
||||
out := new(CreatePreAuthKeyResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/CreatePreAuthKey", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ExpirePreAuthKey(ctx context.Context, in *ExpirePreAuthKeyRequest, opts ...grpc.CallOption) (*ExpirePreAuthKeyResponse, error) {
|
||||
out := new(ExpirePreAuthKeyResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ExpirePreAuthKey", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ListPreAuthKeys(ctx context.Context, in *ListPreAuthKeysRequest, opts ...grpc.CallOption) (*ListPreAuthKeysResponse, error) {
|
||||
out := new(ListPreAuthKeysResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ListPreAuthKeys", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DebugCreateMachine(ctx context.Context, in *DebugCreateMachineRequest, opts ...grpc.CallOption) (*DebugCreateMachineResponse, error) {
|
||||
out := new(DebugCreateMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/DebugCreateMachine", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) GetMachine(ctx context.Context, in *GetMachineRequest, opts ...grpc.CallOption) (*GetMachineResponse, error) {
|
||||
out := new(GetMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/GetMachine", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) RegisterMachine(ctx context.Context, in *RegisterMachineRequest, opts ...grpc.CallOption) (*RegisterMachineResponse, error) {
|
||||
out := new(RegisterMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/RegisterMachine", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DeleteMachine(ctx context.Context, in *DeleteMachineRequest, opts ...grpc.CallOption) (*DeleteMachineResponse, error) {
|
||||
out := new(DeleteMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/DeleteMachine", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ExpireMachine(ctx context.Context, in *ExpireMachineRequest, opts ...grpc.CallOption) (*ExpireMachineResponse, error) {
|
||||
out := new(ExpireMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ExpireMachine", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ListMachines(ctx context.Context, in *ListMachinesRequest, opts ...grpc.CallOption) (*ListMachinesResponse, error) {
|
||||
out := new(ListMachinesResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ListMachines", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ShareMachine(ctx context.Context, in *ShareMachineRequest, opts ...grpc.CallOption) (*ShareMachineResponse, error) {
|
||||
out := new(ShareMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ShareMachine", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) UnshareMachine(ctx context.Context, in *UnshareMachineRequest, opts ...grpc.CallOption) (*UnshareMachineResponse, error) {
|
||||
out := new(UnshareMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/UnshareMachine", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) GetMachineRoute(ctx context.Context, in *GetMachineRouteRequest, opts ...grpc.CallOption) (*GetMachineRouteResponse, error) {
|
||||
out := new(GetMachineRouteResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/GetMachineRoute", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) EnableMachineRoutes(ctx context.Context, in *EnableMachineRoutesRequest, opts ...grpc.CallOption) (*EnableMachineRoutesResponse, error) {
|
||||
out := new(EnableMachineRoutesResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/EnableMachineRoutes", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) CreateApiKey(ctx context.Context, in *CreateApiKeyRequest, opts ...grpc.CallOption) (*CreateApiKeyResponse, error) {
|
||||
out := new(CreateApiKeyResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/CreateApiKey", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ExpireApiKey(ctx context.Context, in *ExpireApiKeyRequest, opts ...grpc.CallOption) (*ExpireApiKeyResponse, error) {
|
||||
out := new(ExpireApiKeyResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ExpireApiKey", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ListApiKeys(ctx context.Context, in *ListApiKeysRequest, opts ...grpc.CallOption) (*ListApiKeysResponse, error) {
|
||||
out := new(ListApiKeysResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ListApiKeys", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// HeadscaleServiceServer is the server API for HeadscaleService service.
|
||||
// All implementations must embed UnimplementedHeadscaleServiceServer
|
||||
// for forward compatibility
|
||||
type HeadscaleServiceServer interface {
|
||||
// --- Namespace start ---
|
||||
GetNamespace(context.Context, *GetNamespaceRequest) (*GetNamespaceResponse, error)
|
||||
CreateNamespace(context.Context, *CreateNamespaceRequest) (*CreateNamespaceResponse, error)
|
||||
RenameNamespace(context.Context, *RenameNamespaceRequest) (*RenameNamespaceResponse, error)
|
||||
DeleteNamespace(context.Context, *DeleteNamespaceRequest) (*DeleteNamespaceResponse, error)
|
||||
ListNamespaces(context.Context, *ListNamespacesRequest) (*ListNamespacesResponse, error)
|
||||
// --- PreAuthKeys start ---
|
||||
CreatePreAuthKey(context.Context, *CreatePreAuthKeyRequest) (*CreatePreAuthKeyResponse, error)
|
||||
ExpirePreAuthKey(context.Context, *ExpirePreAuthKeyRequest) (*ExpirePreAuthKeyResponse, error)
|
||||
ListPreAuthKeys(context.Context, *ListPreAuthKeysRequest) (*ListPreAuthKeysResponse, error)
|
||||
// --- Machine start ---
|
||||
DebugCreateMachine(context.Context, *DebugCreateMachineRequest) (*DebugCreateMachineResponse, error)
|
||||
GetMachine(context.Context, *GetMachineRequest) (*GetMachineResponse, error)
|
||||
RegisterMachine(context.Context, *RegisterMachineRequest) (*RegisterMachineResponse, error)
|
||||
DeleteMachine(context.Context, *DeleteMachineRequest) (*DeleteMachineResponse, error)
|
||||
ExpireMachine(context.Context, *ExpireMachineRequest) (*ExpireMachineResponse, error)
|
||||
ListMachines(context.Context, *ListMachinesRequest) (*ListMachinesResponse, error)
|
||||
ShareMachine(context.Context, *ShareMachineRequest) (*ShareMachineResponse, error)
|
||||
UnshareMachine(context.Context, *UnshareMachineRequest) (*UnshareMachineResponse, error)
|
||||
// --- Route start ---
|
||||
GetMachineRoute(context.Context, *GetMachineRouteRequest) (*GetMachineRouteResponse, error)
|
||||
EnableMachineRoutes(context.Context, *EnableMachineRoutesRequest) (*EnableMachineRoutesResponse, error)
|
||||
// --- ApiKeys start ---
|
||||
CreateApiKey(context.Context, *CreateApiKeyRequest) (*CreateApiKeyResponse, error)
|
||||
ExpireApiKey(context.Context, *ExpireApiKeyRequest) (*ExpireApiKeyResponse, error)
|
||||
ListApiKeys(context.Context, *ListApiKeysRequest) (*ListApiKeysResponse, error)
|
||||
mustEmbedUnimplementedHeadscaleServiceServer()
|
||||
}
|
||||
|
||||
// UnimplementedHeadscaleServiceServer must be embedded to have forward compatible implementations.
|
||||
type UnimplementedHeadscaleServiceServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedHeadscaleServiceServer) GetNamespace(context.Context, *GetNamespaceRequest) (*GetNamespaceResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetNamespace not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) CreateNamespace(context.Context, *CreateNamespaceRequest) (*CreateNamespaceResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateNamespace not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) RenameNamespace(context.Context, *RenameNamespaceRequest) (*RenameNamespaceResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RenameNamespace not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DeleteNamespace(context.Context, *DeleteNamespaceRequest) (*DeleteNamespaceResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteNamespace not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ListNamespaces(context.Context, *ListNamespacesRequest) (*ListNamespacesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListNamespaces not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) CreatePreAuthKey(context.Context, *CreatePreAuthKeyRequest) (*CreatePreAuthKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreatePreAuthKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ExpirePreAuthKey(context.Context, *ExpirePreAuthKeyRequest) (*ExpirePreAuthKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ExpirePreAuthKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ListPreAuthKeys(context.Context, *ListPreAuthKeysRequest) (*ListPreAuthKeysResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListPreAuthKeys not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DebugCreateMachine(context.Context, *DebugCreateMachineRequest) (*DebugCreateMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DebugCreateMachine not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) GetMachine(context.Context, *GetMachineRequest) (*GetMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetMachine not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) RegisterMachine(context.Context, *RegisterMachineRequest) (*RegisterMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RegisterMachine not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DeleteMachine(context.Context, *DeleteMachineRequest) (*DeleteMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteMachine not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ExpireMachine(context.Context, *ExpireMachineRequest) (*ExpireMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ExpireMachine not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ListMachines(context.Context, *ListMachinesRequest) (*ListMachinesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListMachines not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ShareMachine(context.Context, *ShareMachineRequest) (*ShareMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ShareMachine not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) UnshareMachine(context.Context, *UnshareMachineRequest) (*UnshareMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method UnshareMachine not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) GetMachineRoute(context.Context, *GetMachineRouteRequest) (*GetMachineRouteResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetMachineRoute not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) EnableMachineRoutes(context.Context, *EnableMachineRoutesRequest) (*EnableMachineRoutesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method EnableMachineRoutes not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) CreateApiKey(context.Context, *CreateApiKeyRequest) (*CreateApiKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateApiKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ExpireApiKey(context.Context, *ExpireApiKeyRequest) (*ExpireApiKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ExpireApiKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ListApiKeys(context.Context, *ListApiKeysRequest) (*ListApiKeysResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListApiKeys not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) mustEmbedUnimplementedHeadscaleServiceServer() {}
|
||||
|
||||
// UnsafeHeadscaleServiceServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to HeadscaleServiceServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeHeadscaleServiceServer interface {
|
||||
mustEmbedUnimplementedHeadscaleServiceServer()
|
||||
}
|
||||
|
||||
func RegisterHeadscaleServiceServer(s grpc.ServiceRegistrar, srv HeadscaleServiceServer) {
|
||||
s.RegisterService(&HeadscaleService_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _HeadscaleService_GetNamespace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetNamespaceRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).GetNamespace(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/GetNamespace",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).GetNamespace(ctx, req.(*GetNamespaceRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_CreateNamespace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CreateNamespaceRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).CreateNamespace(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/CreateNamespace",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).CreateNamespace(ctx, req.(*CreateNamespaceRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_RenameNamespace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RenameNamespaceRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).RenameNamespace(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/RenameNamespace",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).RenameNamespace(ctx, req.(*RenameNamespaceRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_DeleteNamespace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeleteNamespaceRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).DeleteNamespace(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/DeleteNamespace",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).DeleteNamespace(ctx, req.(*DeleteNamespaceRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_ListNamespaces_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListNamespacesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).ListNamespaces(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ListNamespaces",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ListNamespaces(ctx, req.(*ListNamespacesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_CreatePreAuthKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CreatePreAuthKeyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).CreatePreAuthKey(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/CreatePreAuthKey",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).CreatePreAuthKey(ctx, req.(*CreatePreAuthKeyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_ExpirePreAuthKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ExpirePreAuthKeyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).ExpirePreAuthKey(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ExpirePreAuthKey",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ExpirePreAuthKey(ctx, req.(*ExpirePreAuthKeyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_ListPreAuthKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListPreAuthKeysRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).ListPreAuthKeys(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ListPreAuthKeys",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ListPreAuthKeys(ctx, req.(*ListPreAuthKeysRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_DebugCreateMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DebugCreateMachineRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).DebugCreateMachine(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/DebugCreateMachine",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).DebugCreateMachine(ctx, req.(*DebugCreateMachineRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_GetMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetMachineRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).GetMachine(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/GetMachine",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).GetMachine(ctx, req.(*GetMachineRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_RegisterMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RegisterMachineRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).RegisterMachine(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/RegisterMachine",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).RegisterMachine(ctx, req.(*RegisterMachineRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_DeleteMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeleteMachineRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).DeleteMachine(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/DeleteMachine",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).DeleteMachine(ctx, req.(*DeleteMachineRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_ExpireMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ExpireMachineRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).ExpireMachine(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ExpireMachine",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ExpireMachine(ctx, req.(*ExpireMachineRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_ListMachines_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListMachinesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).ListMachines(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ListMachines",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ListMachines(ctx, req.(*ListMachinesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_ShareMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ShareMachineRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).ShareMachine(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ShareMachine",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ShareMachine(ctx, req.(*ShareMachineRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_UnshareMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(UnshareMachineRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).UnshareMachine(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/UnshareMachine",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).UnshareMachine(ctx, req.(*UnshareMachineRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_GetMachineRoute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetMachineRouteRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).GetMachineRoute(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/GetMachineRoute",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).GetMachineRoute(ctx, req.(*GetMachineRouteRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_EnableMachineRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(EnableMachineRoutesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).EnableMachineRoutes(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/EnableMachineRoutes",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).EnableMachineRoutes(ctx, req.(*EnableMachineRoutesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_CreateApiKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CreateApiKeyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).CreateApiKey(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/CreateApiKey",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).CreateApiKey(ctx, req.(*CreateApiKeyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_ExpireApiKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ExpireApiKeyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).ExpireApiKey(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ExpireApiKey",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ExpireApiKey(ctx, req.(*ExpireApiKeyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_ListApiKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListApiKeysRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).ListApiKeys(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ListApiKeys",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ListApiKeys(ctx, req.(*ListApiKeysRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// HeadscaleService_ServiceDesc is the grpc.ServiceDesc for HeadscaleService service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "headscale.v1.HeadscaleService",
|
||||
HandlerType: (*HeadscaleServiceServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "GetNamespace",
|
||||
Handler: _HeadscaleService_GetNamespace_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CreateNamespace",
|
||||
Handler: _HeadscaleService_CreateNamespace_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "RenameNamespace",
|
||||
Handler: _HeadscaleService_RenameNamespace_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteNamespace",
|
||||
Handler: _HeadscaleService_DeleteNamespace_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ListNamespaces",
|
||||
Handler: _HeadscaleService_ListNamespaces_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CreatePreAuthKey",
|
||||
Handler: _HeadscaleService_CreatePreAuthKey_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ExpirePreAuthKey",
|
||||
Handler: _HeadscaleService_ExpirePreAuthKey_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ListPreAuthKeys",
|
||||
Handler: _HeadscaleService_ListPreAuthKeys_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DebugCreateMachine",
|
||||
Handler: _HeadscaleService_DebugCreateMachine_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetMachine",
|
||||
Handler: _HeadscaleService_GetMachine_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "RegisterMachine",
|
||||
Handler: _HeadscaleService_RegisterMachine_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteMachine",
|
||||
Handler: _HeadscaleService_DeleteMachine_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ExpireMachine",
|
||||
Handler: _HeadscaleService_ExpireMachine_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ListMachines",
|
||||
Handler: _HeadscaleService_ListMachines_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ShareMachine",
|
||||
Handler: _HeadscaleService_ShareMachine_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "UnshareMachine",
|
||||
Handler: _HeadscaleService_UnshareMachine_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetMachineRoute",
|
||||
Handler: _HeadscaleService_GetMachineRoute_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "EnableMachineRoutes",
|
||||
Handler: _HeadscaleService_EnableMachineRoutes_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CreateApiKey",
|
||||
Handler: _HeadscaleService_CreateApiKey_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ExpireApiKey",
|
||||
Handler: _HeadscaleService_ExpireApiKey_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ListApiKeys",
|
||||
Handler: _HeadscaleService_ListApiKeys_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "headscale/v1/headscale.proto",
|
||||
}
|
||||
1446
gen/go/headscale/v1/machine.pb.go
Normal file
1446
gen/go/headscale/v1/machine.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
802
gen/go/headscale/v1/namespace.pb.go
Normal file
802
gen/go/headscale/v1/namespace.pb.go
Normal file
@@ -0,0 +1,802 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/namespace.proto
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type Namespace struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Namespace) Reset() {
|
||||
*x = Namespace{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Namespace) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Namespace) ProtoMessage() {}
|
||||
|
||||
func (x *Namespace) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Namespace.ProtoReflect.Descriptor instead.
|
||||
func (*Namespace) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Namespace) GetId() string {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Namespace) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Namespace) GetCreatedAt() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.CreatedAt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type GetNamespaceRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetNamespaceRequest) Reset() {
|
||||
*x = GetNamespaceRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetNamespaceRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetNamespaceRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetNamespaceRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetNamespaceRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetNamespaceRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *GetNamespaceRequest) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type GetNamespaceResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Namespace *Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetNamespaceResponse) Reset() {
|
||||
*x = GetNamespaceResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetNamespaceResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetNamespaceResponse) ProtoMessage() {}
|
||||
|
||||
func (x *GetNamespaceResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetNamespaceResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetNamespaceResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *GetNamespaceResponse) GetNamespace() *Namespace {
|
||||
if x != nil {
|
||||
return x.Namespace
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type CreateNamespaceRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreateNamespaceRequest) Reset() {
|
||||
*x = CreateNamespaceRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *CreateNamespaceRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*CreateNamespaceRequest) ProtoMessage() {}
|
||||
|
||||
func (x *CreateNamespaceRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use CreateNamespaceRequest.ProtoReflect.Descriptor instead.
|
||||
func (*CreateNamespaceRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *CreateNamespaceRequest) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type CreateNamespaceResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Namespace *Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreateNamespaceResponse) Reset() {
|
||||
*x = CreateNamespaceResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *CreateNamespaceResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*CreateNamespaceResponse) ProtoMessage() {}
|
||||
|
||||
func (x *CreateNamespaceResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use CreateNamespaceResponse.ProtoReflect.Descriptor instead.
|
||||
func (*CreateNamespaceResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *CreateNamespaceResponse) GetNamespace() *Namespace {
|
||||
if x != nil {
|
||||
return x.Namespace
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type RenameNamespaceRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
OldName string `protobuf:"bytes,1,opt,name=old_name,json=oldName,proto3" json:"old_name,omitempty"`
|
||||
NewName string `protobuf:"bytes,2,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"`
|
||||
}
|
||||
|
||||
func (x *RenameNamespaceRequest) Reset() {
|
||||
*x = RenameNamespaceRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *RenameNamespaceRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*RenameNamespaceRequest) ProtoMessage() {}
|
||||
|
||||
func (x *RenameNamespaceRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use RenameNamespaceRequest.ProtoReflect.Descriptor instead.
|
||||
func (*RenameNamespaceRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *RenameNamespaceRequest) GetOldName() string {
|
||||
if x != nil {
|
||||
return x.OldName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *RenameNamespaceRequest) GetNewName() string {
|
||||
if x != nil {
|
||||
return x.NewName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type RenameNamespaceResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Namespace *Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
}
|
||||
|
||||
func (x *RenameNamespaceResponse) Reset() {
|
||||
*x = RenameNamespaceResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *RenameNamespaceResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*RenameNamespaceResponse) ProtoMessage() {}
|
||||
|
||||
func (x *RenameNamespaceResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[6]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use RenameNamespaceResponse.ProtoReflect.Descriptor instead.
|
||||
func (*RenameNamespaceResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *RenameNamespaceResponse) GetNamespace() *Namespace {
|
||||
if x != nil {
|
||||
return x.Namespace
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type DeleteNamespaceRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DeleteNamespaceRequest) Reset() {
|
||||
*x = DeleteNamespaceRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeleteNamespaceRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeleteNamespaceRequest) ProtoMessage() {}
|
||||
|
||||
func (x *DeleteNamespaceRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[7]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeleteNamespaceRequest.ProtoReflect.Descriptor instead.
|
||||
func (*DeleteNamespaceRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
func (x *DeleteNamespaceRequest) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type DeleteNamespaceResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *DeleteNamespaceResponse) Reset() {
|
||||
*x = DeleteNamespaceResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeleteNamespaceResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeleteNamespaceResponse) ProtoMessage() {}
|
||||
|
||||
func (x *DeleteNamespaceResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[8]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeleteNamespaceResponse.ProtoReflect.Descriptor instead.
|
||||
func (*DeleteNamespaceResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
type ListNamespacesRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *ListNamespacesRequest) Reset() {
|
||||
*x = ListNamespacesRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[9]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ListNamespacesRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ListNamespacesRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ListNamespacesRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[9]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ListNamespacesRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ListNamespacesRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{9}
|
||||
}
|
||||
|
||||
type ListNamespacesResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Namespaces []*Namespace `protobuf:"bytes,1,rep,name=namespaces,proto3" json:"namespaces,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ListNamespacesResponse) Reset() {
|
||||
*x = ListNamespacesResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[10]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ListNamespacesResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ListNamespacesResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ListNamespacesResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[10]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ListNamespacesResponse.ProtoReflect.Descriptor instead.
|
||||
func (*ListNamespacesResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{10}
|
||||
}
|
||||
|
||||
func (x *ListNamespacesResponse) GetNamespaces() []*Namespace {
|
||||
if x != nil {
|
||||
return x.Namespaces
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_headscale_v1_namespace_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_headscale_v1_namespace_proto_rawDesc = []byte{
|
||||
0x0a, 0x1c, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a,
|
||||
0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
|
||||
0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39,
|
||||
0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09,
|
||||
0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x29, 0x0a, 0x13, 0x47, 0x65, 0x74,
|
||||
0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x22, 0x4d, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73,
|
||||
0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x09,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x17, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
|
||||
0x61, 0x63, 0x65, 0x22, 0x2c, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d,
|
||||
0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a,
|
||||
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
|
||||
0x65, 0x22, 0x50, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73,
|
||||
0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x09,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x17, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
|
||||
0x61, 0x63, 0x65, 0x22, 0x4e, 0x0a, 0x16, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4e, 0x61, 0x6d,
|
||||
0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a,
|
||||
0x08, 0x6f, 0x6c, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x07, 0x6f, 0x6c, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x4e,
|
||||
0x61, 0x6d, 0x65, 0x22, 0x50, 0x0a, 0x17, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4e, 0x61, 0x6d,
|
||||
0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35,
|
||||
0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x17, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x2c, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
|
||||
0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x61, 0x6d,
|
||||
0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x17,
|
||||
0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x51, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x4e,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x12, 0x37, 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18,
|
||||
0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x0a,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69,
|
||||
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e,
|
||||
0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f,
|
||||
0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_headscale_v1_namespace_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_namespace_proto_rawDescData = file_headscale_v1_namespace_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_headscale_v1_namespace_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_namespace_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_namespace_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_namespace_proto_rawDescData)
|
||||
})
|
||||
return file_headscale_v1_namespace_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_headscale_v1_namespace_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
|
||||
var file_headscale_v1_namespace_proto_goTypes = []interface{}{
|
||||
(*Namespace)(nil), // 0: headscale.v1.Namespace
|
||||
(*GetNamespaceRequest)(nil), // 1: headscale.v1.GetNamespaceRequest
|
||||
(*GetNamespaceResponse)(nil), // 2: headscale.v1.GetNamespaceResponse
|
||||
(*CreateNamespaceRequest)(nil), // 3: headscale.v1.CreateNamespaceRequest
|
||||
(*CreateNamespaceResponse)(nil), // 4: headscale.v1.CreateNamespaceResponse
|
||||
(*RenameNamespaceRequest)(nil), // 5: headscale.v1.RenameNamespaceRequest
|
||||
(*RenameNamespaceResponse)(nil), // 6: headscale.v1.RenameNamespaceResponse
|
||||
(*DeleteNamespaceRequest)(nil), // 7: headscale.v1.DeleteNamespaceRequest
|
||||
(*DeleteNamespaceResponse)(nil), // 8: headscale.v1.DeleteNamespaceResponse
|
||||
(*ListNamespacesRequest)(nil), // 9: headscale.v1.ListNamespacesRequest
|
||||
(*ListNamespacesResponse)(nil), // 10: headscale.v1.ListNamespacesResponse
|
||||
(*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp
|
||||
}
|
||||
var file_headscale_v1_namespace_proto_depIdxs = []int32{
|
||||
11, // 0: headscale.v1.Namespace.created_at:type_name -> google.protobuf.Timestamp
|
||||
0, // 1: headscale.v1.GetNamespaceResponse.namespace:type_name -> headscale.v1.Namespace
|
||||
0, // 2: headscale.v1.CreateNamespaceResponse.namespace:type_name -> headscale.v1.Namespace
|
||||
0, // 3: headscale.v1.RenameNamespaceResponse.namespace:type_name -> headscale.v1.Namespace
|
||||
0, // 4: headscale.v1.ListNamespacesResponse.namespaces:type_name -> headscale.v1.Namespace
|
||||
5, // [5:5] is the sub-list for method output_type
|
||||
5, // [5:5] is the sub-list for method input_type
|
||||
5, // [5:5] is the sub-list for extension type_name
|
||||
5, // [5:5] is the sub-list for extension extendee
|
||||
0, // [0:5] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_namespace_proto_init() }
|
||||
func file_headscale_v1_namespace_proto_init() {
|
||||
if File_headscale_v1_namespace_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_headscale_v1_namespace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Namespace); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_namespace_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetNamespaceRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_namespace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetNamespaceResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_namespace_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*CreateNamespaceRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_namespace_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*CreateNamespaceResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_namespace_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RenameNamespaceRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_namespace_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RenameNamespaceResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_namespace_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DeleteNamespaceRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_namespace_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DeleteNamespaceResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_namespace_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListNamespacesRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_namespace_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListNamespacesResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_namespace_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 11,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_headscale_v1_namespace_proto_goTypes,
|
||||
DependencyIndexes: file_headscale_v1_namespace_proto_depIdxs,
|
||||
MessageInfos: file_headscale_v1_namespace_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_namespace_proto = out.File
|
||||
file_headscale_v1_namespace_proto_rawDesc = nil
|
||||
file_headscale_v1_namespace_proto_goTypes = nil
|
||||
file_headscale_v1_namespace_proto_depIdxs = nil
|
||||
}
|
||||
641
gen/go/headscale/v1/preauthkey.pb.go
Normal file
641
gen/go/headscale/v1/preauthkey.pb.go
Normal file
@@ -0,0 +1,641 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/preauthkey.proto
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type PreAuthKey struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Reusable bool `protobuf:"varint,4,opt,name=reusable,proto3" json:"reusable,omitempty"`
|
||||
Ephemeral bool `protobuf:"varint,5,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
|
||||
Used bool `protobuf:"varint,6,opt,name=used,proto3" json:"used,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) Reset() {
|
||||
*x = PreAuthKey{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*PreAuthKey) ProtoMessage() {}
|
||||
|
||||
func (x *PreAuthKey) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use PreAuthKey.ProtoReflect.Descriptor instead.
|
||||
func (*PreAuthKey) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetNamespace() string {
|
||||
if x != nil {
|
||||
return x.Namespace
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetId() string {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetKey() string {
|
||||
if x != nil {
|
||||
return x.Key
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetReusable() bool {
|
||||
if x != nil {
|
||||
return x.Reusable
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetEphemeral() bool {
|
||||
if x != nil {
|
||||
return x.Ephemeral
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetUsed() bool {
|
||||
if x != nil {
|
||||
return x.Used
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetExpiration() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.Expiration
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetCreatedAt() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.CreatedAt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type CreatePreAuthKeyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
Reusable bool `protobuf:"varint,2,opt,name=reusable,proto3" json:"reusable,omitempty"`
|
||||
Ephemeral bool `protobuf:"varint,3,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) Reset() {
|
||||
*x = CreatePreAuthKeyRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*CreatePreAuthKeyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use CreatePreAuthKeyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*CreatePreAuthKeyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) GetNamespace() string {
|
||||
if x != nil {
|
||||
return x.Namespace
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) GetReusable() bool {
|
||||
if x != nil {
|
||||
return x.Reusable
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) GetEphemeral() bool {
|
||||
if x != nil {
|
||||
return x.Ephemeral
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) GetExpiration() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.Expiration
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type CreatePreAuthKeyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
PreAuthKey *PreAuthKey `protobuf:"bytes,1,opt,name=pre_auth_key,json=preAuthKey,proto3" json:"pre_auth_key,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyResponse) Reset() {
|
||||
*x = CreatePreAuthKeyResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*CreatePreAuthKeyResponse) ProtoMessage() {}
|
||||
|
||||
func (x *CreatePreAuthKeyResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use CreatePreAuthKeyResponse.ProtoReflect.Descriptor instead.
|
||||
func (*CreatePreAuthKeyResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyResponse) GetPreAuthKey() *PreAuthKey {
|
||||
if x != nil {
|
||||
return x.PreAuthKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ExpirePreAuthKeyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyRequest) Reset() {
|
||||
*x = ExpirePreAuthKeyRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ExpirePreAuthKeyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ExpirePreAuthKeyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ExpirePreAuthKeyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ExpirePreAuthKeyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyRequest) GetNamespace() string {
|
||||
if x != nil {
|
||||
return x.Namespace
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyRequest) GetKey() string {
|
||||
if x != nil {
|
||||
return x.Key
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ExpirePreAuthKeyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyResponse) Reset() {
|
||||
*x = ExpirePreAuthKeyResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ExpirePreAuthKeyResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ExpirePreAuthKeyResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ExpirePreAuthKeyResponse.ProtoReflect.Descriptor instead.
|
||||
func (*ExpirePreAuthKeyResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
type ListPreAuthKeysRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysRequest) Reset() {
|
||||
*x = ListPreAuthKeysRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ListPreAuthKeysRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ListPreAuthKeysRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ListPreAuthKeysRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ListPreAuthKeysRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysRequest) GetNamespace() string {
|
||||
if x != nil {
|
||||
return x.Namespace
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ListPreAuthKeysResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
PreAuthKeys []*PreAuthKey `protobuf:"bytes,1,rep,name=pre_auth_keys,json=preAuthKeys,proto3" json:"pre_auth_keys,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysResponse) Reset() {
|
||||
*x = ListPreAuthKeysResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ListPreAuthKeysResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ListPreAuthKeysResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[6]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ListPreAuthKeysResponse.ProtoReflect.Descriptor instead.
|
||||
func (*ListPreAuthKeysResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysResponse) GetPreAuthKeys() []*PreAuthKey {
|
||||
if x != nil {
|
||||
return x.PreAuthKeys
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_headscale_v1_preauthkey_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_headscale_v1_preauthkey_proto_rawDesc = []byte{
|
||||
0x0a, 0x1d, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x70,
|
||||
0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
|
||||
0x0c, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
|
||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x91,
|
||||
0x02, 0x0a, 0x0a, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x12, 0x1c, 0x0a,
|
||||
0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69,
|
||||
0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b,
|
||||
0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x1a, 0x0a,
|
||||
0x08, 0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52,
|
||||
0x08, 0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x70, 0x68,
|
||||
0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x70,
|
||||
0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64, 0x18,
|
||||
0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x75, 0x73, 0x65, 0x64, 0x12, 0x3a, 0x0a, 0x0a, 0x65,
|
||||
0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70,
|
||||
0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74,
|
||||
0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
|
||||
0x41, 0x74, 0x22, 0xad, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65,
|
||||
0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c,
|
||||
0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08,
|
||||
0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08,
|
||||
0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x70, 0x68, 0x65,
|
||||
0x6d, 0x65, 0x72, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x70, 0x68,
|
||||
0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
|
||||
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x22, 0x56, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41,
|
||||
0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a,
|
||||
0x0a, 0x0c, 0x70, 0x72, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x0a,
|
||||
0x70, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x22, 0x49, 0x0a, 0x17, 0x45, 0x78,
|
||||
0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
|
||||
0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
|
||||
0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x1a, 0x0a, 0x18, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50,
|
||||
0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x22, 0x36, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68,
|
||||
0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x57, 0x0a, 0x17, 0x4c, 0x69, 0x73,
|
||||
0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68,
|
||||
0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x41, 0x75,
|
||||
0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65,
|
||||
0x79, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_headscale_v1_preauthkey_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_preauthkey_proto_rawDescData = file_headscale_v1_preauthkey_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_headscale_v1_preauthkey_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_preauthkey_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_preauthkey_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_preauthkey_proto_rawDescData)
|
||||
})
|
||||
return file_headscale_v1_preauthkey_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_headscale_v1_preauthkey_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
|
||||
var file_headscale_v1_preauthkey_proto_goTypes = []interface{}{
|
||||
(*PreAuthKey)(nil), // 0: headscale.v1.PreAuthKey
|
||||
(*CreatePreAuthKeyRequest)(nil), // 1: headscale.v1.CreatePreAuthKeyRequest
|
||||
(*CreatePreAuthKeyResponse)(nil), // 2: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyRequest)(nil), // 3: headscale.v1.ExpirePreAuthKeyRequest
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 4: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysRequest)(nil), // 5: headscale.v1.ListPreAuthKeysRequest
|
||||
(*ListPreAuthKeysResponse)(nil), // 6: headscale.v1.ListPreAuthKeysResponse
|
||||
(*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp
|
||||
}
|
||||
var file_headscale_v1_preauthkey_proto_depIdxs = []int32{
|
||||
7, // 0: headscale.v1.PreAuthKey.expiration:type_name -> google.protobuf.Timestamp
|
||||
7, // 1: headscale.v1.PreAuthKey.created_at:type_name -> google.protobuf.Timestamp
|
||||
7, // 2: headscale.v1.CreatePreAuthKeyRequest.expiration:type_name -> google.protobuf.Timestamp
|
||||
0, // 3: headscale.v1.CreatePreAuthKeyResponse.pre_auth_key:type_name -> headscale.v1.PreAuthKey
|
||||
0, // 4: headscale.v1.ListPreAuthKeysResponse.pre_auth_keys:type_name -> headscale.v1.PreAuthKey
|
||||
5, // [5:5] is the sub-list for method output_type
|
||||
5, // [5:5] is the sub-list for method input_type
|
||||
5, // [5:5] is the sub-list for extension type_name
|
||||
5, // [5:5] is the sub-list for extension extendee
|
||||
0, // [0:5] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_preauthkey_proto_init() }
|
||||
func file_headscale_v1_preauthkey_proto_init() {
|
||||
if File_headscale_v1_preauthkey_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_headscale_v1_preauthkey_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PreAuthKey); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_preauthkey_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*CreatePreAuthKeyRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_preauthkey_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*CreatePreAuthKeyResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_preauthkey_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ExpirePreAuthKeyRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_preauthkey_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ExpirePreAuthKeyResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_preauthkey_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListPreAuthKeysRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_preauthkey_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListPreAuthKeysResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_preauthkey_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 7,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_headscale_v1_preauthkey_proto_goTypes,
|
||||
DependencyIndexes: file_headscale_v1_preauthkey_proto_depIdxs,
|
||||
MessageInfos: file_headscale_v1_preauthkey_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_preauthkey_proto = out.File
|
||||
file_headscale_v1_preauthkey_proto_rawDesc = nil
|
||||
file_headscale_v1_preauthkey_proto_goTypes = nil
|
||||
file_headscale_v1_preauthkey_proto_depIdxs = nil
|
||||
}
|
||||
425
gen/go/headscale/v1/routes.pb.go
Normal file
425
gen/go/headscale/v1/routes.pb.go
Normal file
@@ -0,0 +1,425 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/routes.proto
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type Routes struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
AdvertisedRoutes []string `protobuf:"bytes,1,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
|
||||
EnabledRoutes []string `protobuf:"bytes,2,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Routes) Reset() {
|
||||
*x = Routes{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Routes) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Routes) ProtoMessage() {}
|
||||
|
||||
func (x *Routes) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Routes.ProtoReflect.Descriptor instead.
|
||||
func (*Routes) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Routes) GetAdvertisedRoutes() []string {
|
||||
if x != nil {
|
||||
return x.AdvertisedRoutes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Routes) GetEnabledRoutes() []string {
|
||||
if x != nil {
|
||||
return x.EnabledRoutes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type GetMachineRouteRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
MachineId uint64 `protobuf:"varint,1,opt,name=machine_id,json=machineId,proto3" json:"machine_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetMachineRouteRequest) Reset() {
|
||||
*x = GetMachineRouteRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetMachineRouteRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetMachineRouteRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetMachineRouteRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetMachineRouteRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetMachineRouteRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *GetMachineRouteRequest) GetMachineId() uint64 {
|
||||
if x != nil {
|
||||
return x.MachineId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type GetMachineRouteResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Routes *Routes `protobuf:"bytes,1,opt,name=routes,proto3" json:"routes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetMachineRouteResponse) Reset() {
|
||||
*x = GetMachineRouteResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetMachineRouteResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetMachineRouteResponse) ProtoMessage() {}
|
||||
|
||||
func (x *GetMachineRouteResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetMachineRouteResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetMachineRouteResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *GetMachineRouteResponse) GetRoutes() *Routes {
|
||||
if x != nil {
|
||||
return x.Routes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type EnableMachineRoutesRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
MachineId uint64 `protobuf:"varint,1,opt,name=machine_id,json=machineId,proto3" json:"machine_id,omitempty"`
|
||||
Routes []string `protobuf:"bytes,2,rep,name=routes,proto3" json:"routes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesRequest) Reset() {
|
||||
*x = EnableMachineRoutesRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EnableMachineRoutesRequest) ProtoMessage() {}
|
||||
|
||||
func (x *EnableMachineRoutesRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EnableMachineRoutesRequest.ProtoReflect.Descriptor instead.
|
||||
func (*EnableMachineRoutesRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesRequest) GetMachineId() uint64 {
|
||||
if x != nil {
|
||||
return x.MachineId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesRequest) GetRoutes() []string {
|
||||
if x != nil {
|
||||
return x.Routes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type EnableMachineRoutesResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Routes *Routes `protobuf:"bytes,1,opt,name=routes,proto3" json:"routes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesResponse) Reset() {
|
||||
*x = EnableMachineRoutesResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EnableMachineRoutesResponse) ProtoMessage() {}
|
||||
|
||||
func (x *EnableMachineRoutesResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EnableMachineRoutesResponse.ProtoReflect.Descriptor instead.
|
||||
func (*EnableMachineRoutesResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesResponse) GetRoutes() *Routes {
|
||||
if x != nil {
|
||||
return x.Routes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_headscale_v1_routes_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_headscale_v1_routes_proto_rawDesc = []byte{
|
||||
0x0a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x22, 0x5c, 0x0a, 0x06, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65,
|
||||
0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10,
|
||||
0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x12, 0x25, 0x0a, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
|
||||
0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x37, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x49, 0x64,
|
||||
0x22, 0x47, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x72,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x73, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x53, 0x0a, 0x1a, 0x45, 0x6e, 0x61,
|
||||
0x62, 0x6c, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x4b,
|
||||
0x0a, 0x1b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a,
|
||||
0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67,
|
||||
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f,
|
||||
0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e,
|
||||
0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_headscale_v1_routes_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_routes_proto_rawDescData = file_headscale_v1_routes_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_headscale_v1_routes_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_routes_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_routes_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_routes_proto_rawDescData)
|
||||
})
|
||||
return file_headscale_v1_routes_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_headscale_v1_routes_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
|
||||
var file_headscale_v1_routes_proto_goTypes = []interface{}{
|
||||
(*Routes)(nil), // 0: headscale.v1.Routes
|
||||
(*GetMachineRouteRequest)(nil), // 1: headscale.v1.GetMachineRouteRequest
|
||||
(*GetMachineRouteResponse)(nil), // 2: headscale.v1.GetMachineRouteResponse
|
||||
(*EnableMachineRoutesRequest)(nil), // 3: headscale.v1.EnableMachineRoutesRequest
|
||||
(*EnableMachineRoutesResponse)(nil), // 4: headscale.v1.EnableMachineRoutesResponse
|
||||
}
|
||||
var file_headscale_v1_routes_proto_depIdxs = []int32{
|
||||
0, // 0: headscale.v1.GetMachineRouteResponse.routes:type_name -> headscale.v1.Routes
|
||||
0, // 1: headscale.v1.EnableMachineRoutesResponse.routes:type_name -> headscale.v1.Routes
|
||||
2, // [2:2] is the sub-list for method output_type
|
||||
2, // [2:2] is the sub-list for method input_type
|
||||
2, // [2:2] is the sub-list for extension type_name
|
||||
2, // [2:2] is the sub-list for extension extendee
|
||||
0, // [0:2] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_routes_proto_init() }
|
||||
func file_headscale_v1_routes_proto_init() {
|
||||
if File_headscale_v1_routes_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_headscale_v1_routes_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Routes); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_routes_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetMachineRouteRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_routes_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetMachineRouteResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_routes_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*EnableMachineRoutesRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_routes_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*EnableMachineRoutesResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_routes_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 5,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_headscale_v1_routes_proto_goTypes,
|
||||
DependencyIndexes: file_headscale_v1_routes_proto_depIdxs,
|
||||
MessageInfos: file_headscale_v1_routes_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_routes_proto = out.File
|
||||
file_headscale_v1_routes_proto_rawDesc = nil
|
||||
file_headscale_v1_routes_proto_goTypes = nil
|
||||
file_headscale_v1_routes_proto_depIdxs = nil
|
||||
}
|
||||
43
gen/openapiv2/headscale/v1/apikey.swagger.json
Normal file
43
gen/openapiv2/headscale/v1/apikey.swagger.json
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "headscale/v1/apikey.proto",
|
||||
"version": "version not set"
|
||||
},
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"paths": {},
|
||||
"definitions": {
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"@type": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": {}
|
||||
},
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
43
gen/openapiv2/headscale/v1/device.swagger.json
Normal file
43
gen/openapiv2/headscale/v1/device.swagger.json
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "headscale/v1/device.proto",
|
||||
"version": "version not set"
|
||||
},
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"paths": {},
|
||||
"definitions": {
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"@type": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": {}
|
||||
},
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
1071
gen/openapiv2/headscale/v1/headscale.swagger.json
Normal file
1071
gen/openapiv2/headscale/v1/headscale.swagger.json
Normal file
File diff suppressed because it is too large
Load Diff
43
gen/openapiv2/headscale/v1/machine.swagger.json
Normal file
43
gen/openapiv2/headscale/v1/machine.swagger.json
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "headscale/v1/machine.proto",
|
||||
"version": "version not set"
|
||||
},
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"paths": {},
|
||||
"definitions": {
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"@type": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": {}
|
||||
},
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user