mirror of
https://github.com/juanfont/headscale.git
synced 2025-12-17 13:43:32 +00:00
Compare commits
829 Commits
v0.5.2
...
v0.13.0-be
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5b5ecd52e1 | ||
|
|
eddd62eee0 | ||
|
|
38c27f6bf8 | ||
|
|
90fb9aa4ed | ||
|
|
3af1253a65 | ||
|
|
eb1ce64b7c | ||
|
|
2c9ed63021 | ||
|
|
4c779d306b | ||
|
|
0862f60ff0 | ||
|
|
991175f2aa | ||
|
|
1815040d98 | ||
|
|
71ab4c9b2c | ||
|
|
e0c22a414b | ||
|
|
4e63bba4fe | ||
|
|
445c04baf7 | ||
|
|
ad4e3a89e0 | ||
|
|
6f6018bad5 | ||
|
|
ccd41b9a13 | ||
|
|
d8ce440309 | ||
|
|
2f576b2fb1 | ||
|
|
853a5288f1 | ||
|
|
cd0df1e46f | ||
|
|
b195c87418 | ||
|
|
45bcf39894 | ||
|
|
0a1db89d33 | ||
|
|
dbfb9e16e0 | ||
|
|
8aa2606853 | ||
|
|
a238a8b33a | ||
|
|
74f26d3685 | ||
|
|
e66f8b0eeb | ||
|
|
e7b69dbf91 | ||
|
|
13f23d2e7e | ||
|
|
7a86321252 | ||
|
|
7aace7eb6b | ||
|
|
7a6be36f46 | ||
|
|
bb27c80bad | ||
|
|
c0c3b7d511 | ||
|
|
6220836050 | ||
|
|
b122d06f12 | ||
|
|
6f9ed958ca | ||
|
|
39ce59fcb1 | ||
|
|
052fccdc98 | ||
|
|
17411b65f3 | ||
|
|
bf7ee78324 | ||
|
|
fbe5054a67 | ||
|
|
761147ea3b | ||
|
|
25ccf5ef18 | ||
|
|
b4f8961e44 | ||
|
|
726ccc8c1f | ||
|
|
126e694f26 | ||
|
|
ab45cd37f8 | ||
|
|
f59071ff1c | ||
|
|
4d60aeae18 | ||
|
|
67d1dd984f | ||
|
|
b02f8dd45d | ||
|
|
3837f1714a | ||
|
|
ed5498ef86 | ||
|
|
e2f8c69e2e | ||
|
|
beb3e9abc2 | ||
|
|
78039f4cea | ||
|
|
ed39b91f71 | ||
|
|
8f632e9062 | ||
|
|
a32175f791 | ||
|
|
d35fb8bba0 | ||
|
|
115d0cbe85 | ||
|
|
1a6e5d8770 | ||
|
|
3a3aecb774 | ||
|
|
8b40343277 | ||
|
|
7ec8346179 | ||
|
|
46cdce00af | ||
|
|
86f3f26a18 | ||
|
|
febbb6006f | ||
|
|
1d68509463 | ||
|
|
b6d0c4f2aa | ||
|
|
2c057c2d89 | ||
|
|
cf7effda1b | ||
|
|
19effe7034 | ||
|
|
41053482b3 | ||
|
|
e463283a58 | ||
|
|
99814b468b | ||
|
|
163ecb2a6b | ||
|
|
4660b265d9 | ||
|
|
1b6bad0b63 | ||
|
|
26623d794b | ||
|
|
e9cc60e49c | ||
|
|
be2a28dd61 | ||
|
|
cec236ce24 | ||
|
|
45d331da99 | ||
|
|
897fa558b0 | ||
|
|
d971cf1295 | ||
|
|
42bed58329 | ||
|
|
d9f52efe70 | ||
|
|
25b5eb8d7f | ||
|
|
81c60939c9 | ||
|
|
4edc96d14d | ||
|
|
6b7c74133d | ||
|
|
8da029bd14 | ||
|
|
1d01103b67 | ||
|
|
5df100539c | ||
|
|
11c86acbe3 | ||
|
|
86f36f9a43 | ||
|
|
271cb71754 | ||
|
|
80d196cbfd | ||
|
|
3ce3ccb559 | ||
|
|
a11c6fd8b9 | ||
|
|
a75c5a4cff | ||
|
|
8d504c35bf | ||
|
|
8a07a63b1c | ||
|
|
74fd5de43d | ||
|
|
f9e6722635 | ||
|
|
0bd4250a53 | ||
|
|
4b44aa2180 | ||
|
|
f78984f2ef | ||
|
|
3de311b7f4 | ||
|
|
5192841016 | ||
|
|
07384fd2bb | ||
|
|
a795e7c0c9 | ||
|
|
ebfbd4a37d | ||
|
|
fb933b7d41 | ||
|
|
1c7cb98042 | ||
|
|
fb634cdfc2 | ||
|
|
f60f62792a | ||
|
|
418fde2731 | ||
|
|
53108207be | ||
|
|
3fb3db6f20 | ||
|
|
5a504fa711 | ||
|
|
b4cce22415 | ||
|
|
54c2306637 | ||
|
|
bc8f5f484d | ||
|
|
d00780b574 | ||
|
|
686384ebb7 | ||
|
|
3a85c4d367 | ||
|
|
1b007d2208 | ||
|
|
5a7f669505 | ||
|
|
0c13d9da15 | ||
|
|
58ec26ee89 | ||
|
|
969bcf17c4 | ||
|
|
04d81a0e5c | ||
|
|
a6e99525ac | ||
|
|
7e95b3501d | ||
|
|
ab52acba4b | ||
|
|
07a437c707 | ||
|
|
d56fb1aaa1 | ||
|
|
c046ffbceb | ||
|
|
3435d95c80 | ||
|
|
acaab7a3de | ||
|
|
74ba452025 | ||
|
|
500be2de58 | ||
|
|
5bc0398aaf | ||
|
|
78eba97bf9 | ||
|
|
6350d528a7 | ||
|
|
42eb6b9e01 | ||
|
|
2e2fb68715 | ||
|
|
6fc6355d66 | ||
|
|
48fc93bbdc | ||
|
|
3e941ef959 | ||
|
|
1dc008133c | ||
|
|
2d2ae62176 | ||
|
|
8e9a94613c | ||
|
|
8932133ae7 | ||
|
|
5b8587037d | ||
|
|
e167be6d64 | ||
|
|
34f4109fbd | ||
|
|
fa813bc0d7 | ||
|
|
32006f3a20 | ||
|
|
ff8c961dbb | ||
|
|
e9d5214d1c | ||
|
|
6295b0bd84 | ||
|
|
550f4016dc | ||
|
|
2ae882d801 | ||
|
|
ef81845deb | ||
|
|
d96b681c83 | ||
|
|
59aeaa8476 | ||
|
|
cb2ea300ad | ||
|
|
c38f00fab8 | ||
|
|
0012c76170 | ||
|
|
cfd53bc4aa | ||
|
|
07418140a2 | ||
|
|
c63c259d31 | ||
|
|
50b47adaa3 | ||
|
|
b6ae60cc44 | ||
|
|
d944aa6e79 | ||
|
|
06f05d6cc2 | ||
|
|
0819c6515a | ||
|
|
c7f3e0632b | ||
|
|
58fd6c4ba5 | ||
|
|
aab4a6043a | ||
|
|
a52a4d45c0 | ||
|
|
45bc3f7a09 | ||
|
|
5620858549 | ||
|
|
f2e273b8a2 | ||
|
|
cec1e86b58 | ||
|
|
dcbf289470 | ||
|
|
fdd64d98c8 | ||
|
|
9968992be0 | ||
|
|
f50f9ac894 | ||
|
|
2eca344f0e | ||
|
|
349264830b | ||
|
|
0b5c29022b | ||
|
|
1f1c45a2c0 | ||
|
|
68dc2a70db | ||
|
|
caf1b1cabc | ||
|
|
021c464148 | ||
|
|
e600ead3e9 | ||
|
|
200c10e48c | ||
|
|
e8faff4fe2 | ||
|
|
5cbd4513a4 | ||
|
|
a477c808c7 | ||
|
|
74044f62f4 | ||
|
|
fcd4d94927 | ||
|
|
fac33e46e1 | ||
|
|
b152e53b13 | ||
|
|
1687e3b03f | ||
|
|
c2393685f1 | ||
|
|
fd5f42c2e6 | ||
|
|
bda2d9c3b0 | ||
|
|
c4ecc4db91 | ||
|
|
8ccc51ae57 | ||
|
|
a2b9f3bede | ||
|
|
bd1d1b1a3b | ||
|
|
f1c05f8010 | ||
|
|
f85a77edb5 | ||
|
|
1c7aff5dd9 | ||
|
|
e91f72fe4c | ||
|
|
5a2cae5081 | ||
|
|
6a9dd2029e | ||
|
|
9aac1fb255 | ||
|
|
106b1e7e8d | ||
|
|
836986aa59 | ||
|
|
58d1255357 | ||
|
|
981f712660 | ||
|
|
50dcb8bb75 | ||
|
|
a8a8f01429 | ||
|
|
35c3fe9608 | ||
|
|
f74b9f5fe2 | ||
|
|
de42fe3b04 | ||
|
|
49f835d8cf | ||
|
|
7bc2f41b33 | ||
|
|
a10388b709 | ||
|
|
bd7b5e97cb | ||
|
|
4b525a3967 | ||
|
|
d6739386a0 | ||
|
|
25b790d025 | ||
|
|
db8be91d8b | ||
|
|
c4d4c9c4e4 | ||
|
|
715542ac1c | ||
|
|
0c005a6b01 | ||
|
|
0c45f8d252 | ||
|
|
2dde1242cf | ||
|
|
78cfba0a31 | ||
|
|
8ae682b412 | ||
|
|
333be80f9c | ||
|
|
2efefca737 | ||
|
|
c6bc9fffe9 | ||
|
|
8454c1b52c | ||
|
|
471c0b4993 | ||
|
|
53ed749f45 | ||
|
|
ba084b9987 | ||
|
|
85f28a3f4a | ||
|
|
796072a5a4 | ||
|
|
9390348a65 | ||
|
|
c9c16c7fb8 | ||
|
|
19cd7a4eac | ||
|
|
0315f55fcd | ||
|
|
668e958d3e | ||
|
|
4ace54c4e1 | ||
|
|
89eb13c6cb | ||
|
|
d0ef850035 | ||
|
|
2f8e9f272c | ||
|
|
1af4a3b958 | ||
|
|
1969802c6b | ||
|
|
052883aa55 | ||
|
|
d2918edc14 | ||
|
|
f3da299457 | ||
|
|
e8726b1e22 | ||
|
|
b897a26f42 | ||
|
|
5ec7158b5d | ||
|
|
7d77acd88e | ||
|
|
c0f16603c5 | ||
|
|
34dba0ade8 | ||
|
|
acf7e462ad | ||
|
|
f94b0b54d8 | ||
|
|
806f0d3e6c | ||
|
|
b653572272 | ||
|
|
fa0922d5bb | ||
|
|
95b9f03fb3 | ||
|
|
24e0c944b1 | ||
|
|
148437f716 | ||
|
|
3ddd9962ce | ||
|
|
2634215f12 | ||
|
|
dae34ca8c5 | ||
|
|
03b7ec62ca | ||
|
|
edfcdc466c | ||
|
|
6b3114ad6f | ||
|
|
ba65092926 | ||
|
|
f44138c944 | ||
|
|
c290ce4b91 | ||
|
|
3b34c7b89a | ||
|
|
83e72ec57d | ||
|
|
49893305b4 | ||
|
|
0803c407a9 | ||
|
|
6371135459 | ||
|
|
43af11c46a | ||
|
|
b210858dc5 | ||
|
|
e1f45f9d07 | ||
|
|
dce6b8d72e | ||
|
|
67953bfe2f | ||
|
|
6076656373 | ||
|
|
9a26fa7989 | ||
|
|
d47b83f80b | ||
|
|
b11acad1c9 | ||
|
|
b15efb5201 | ||
|
|
2dfd42f80c | ||
|
|
ce3f79a3bf | ||
|
|
a249d3fe39 | ||
|
|
a6d487de00 | ||
|
|
3720da6386 | ||
|
|
26718e8308 | ||
|
|
f5a196088a | ||
|
|
74f0d08f50 | ||
|
|
046681f4ef | ||
|
|
29531a5e90 | ||
|
|
137a9d6333 | ||
|
|
8115f50d03 | ||
|
|
b75e8ae2bd | ||
|
|
3ad2350c79 | ||
|
|
204f99dd51 | ||
|
|
8df41b069f | ||
|
|
be4256b1d0 | ||
|
|
77a973878c | ||
|
|
7b0d2dfb4a | ||
|
|
79871d2463 | ||
|
|
dce82f4323 | ||
|
|
9e9049307e | ||
|
|
cd34a5d6f3 | ||
|
|
319237910b | ||
|
|
3eed356d70 | ||
|
|
706ff59d70 | ||
|
|
c2eb3f4d36 | ||
|
|
9acc3e0e73 | ||
|
|
94dbaa6822 | ||
|
|
5526ccc696 | ||
|
|
95690e614e | ||
|
|
77f5f8bd1c | ||
|
|
787814ea89 | ||
|
|
67adea5cab | ||
|
|
4226da3d6b | ||
|
|
5270361989 | ||
|
|
a6aa6a4f7b | ||
|
|
1c530be66c | ||
|
|
7c774bc547 | ||
|
|
9954a3c599 | ||
|
|
b91c115ade | ||
|
|
53df9afc2a | ||
|
|
8db45a4e75 | ||
|
|
1c9b1ea91a | ||
|
|
12f2a7cee0 | ||
|
|
3f30bf1e33 | ||
|
|
f968b0abdf | ||
|
|
16ccbf4cdb | ||
|
|
d803fe6123 | ||
|
|
ca15a53fad | ||
|
|
264e5964f6 | ||
|
|
223c611820 | ||
|
|
fbdfa55629 | ||
|
|
73d22cdf54 | ||
|
|
bac81176b2 | ||
|
|
cd2914dbc9 | ||
|
|
cbf3f5d640 | ||
|
|
018e42acad | ||
|
|
482a31b66b | ||
|
|
2b340e8fa4 | ||
|
|
434fac52b7 | ||
|
|
6aacada852 | ||
|
|
7301d7eb67 | ||
|
|
b2d2d5653e | ||
|
|
72fd2a2780 | ||
|
|
9ef031f0f8 | ||
|
|
81b8610dff | ||
|
|
eefd82a574 | ||
|
|
002b5c1dad | ||
|
|
68dab0fe7b | ||
|
|
6d10be8fff | ||
|
|
a23d82e33a | ||
|
|
c7fa9b6e4a | ||
|
|
07bbeafa3b | ||
|
|
06700c1dc4 | ||
|
|
2d252da221 | ||
|
|
2c071a8a2d | ||
|
|
f9187bdfc4 | ||
|
|
25c67cf2aa | ||
|
|
b00a2729e3 | ||
|
|
6c01b86e4c | ||
|
|
d086cf4691 | ||
|
|
5054ed41ac | ||
|
|
e91174e83f | ||
|
|
c9bd25d05c | ||
|
|
f779372154 | ||
|
|
acd9ebbdf8 | ||
|
|
6369cea10e | ||
|
|
2d92719095 | ||
|
|
d4265779ef | ||
|
|
8f2ef6a57d | ||
|
|
6e764942a2 | ||
|
|
11d987549f | ||
|
|
b8c89cd63c | ||
|
|
2f045b20fb | ||
|
|
caa4d33cbd | ||
|
|
a9da7c8fd9 | ||
|
|
b096a2e7e5 | ||
|
|
f9ece0087d | ||
|
|
c76d3b53d9 | ||
|
|
e8277595f5 | ||
|
|
4d3b638a3d | ||
|
|
1d9954d8e9 | ||
|
|
dd7557850e | ||
|
|
c8e1afb14b | ||
|
|
6d162eeff9 | ||
|
|
746d4037da | ||
|
|
1237e02f7c | ||
|
|
7da3d4ba50 | ||
|
|
c22b93734e | ||
|
|
8853315dcc | ||
|
|
5aaffaaecb | ||
|
|
389a8d47a3 | ||
|
|
a355769416 | ||
|
|
1a8c9216d6 | ||
|
|
81316ef644 | ||
|
|
4d4d0de356 | ||
|
|
b85adbc40a | ||
|
|
aefbd66317 | ||
|
|
d875cca69d | ||
|
|
0e902fe949 | ||
|
|
582eb57a09 | ||
|
|
177f1eca06 | ||
|
|
57f46ded83 | ||
|
|
aa245c2d06 | ||
|
|
e836db1ead | ||
|
|
5420347d24 | ||
|
|
9e2637d65f | ||
|
|
c6046597ed | ||
|
|
a46c8fe914 | ||
|
|
f822816cdb | ||
|
|
f3bf9b4bbb | ||
|
|
9f02899261 | ||
|
|
75f3e1fb03 | ||
|
|
9fbfa7c1f5 | ||
|
|
d5aef85bf2 | ||
|
|
88b32e4b18 | ||
|
|
e425e3ffd3 | ||
|
|
355483fd86 | ||
|
|
672d8474b9 | ||
|
|
73e4d38670 | ||
|
|
561c15bbe8 | ||
|
|
b93aa723cb | ||
|
|
636943c715 | ||
|
|
0a6a67da85 | ||
|
|
e9ffd366dd | ||
|
|
4be0b3f556 | ||
|
|
a0bfad6d6e | ||
|
|
bb1f17f5af | ||
|
|
95bc2ee241 | ||
|
|
16a90e799c | ||
|
|
4c2f84b211 | ||
|
|
b799635fbb | ||
|
|
bc145952d4 | ||
|
|
2c5701917d | ||
|
|
ed7b840fea | ||
|
|
23372e29cd | ||
|
|
fb569b0483 | ||
|
|
e2b5638ca0 | ||
|
|
8f5a1dce3e | ||
|
|
6b0f5da113 | ||
|
|
5159b6d085 | ||
|
|
03d97c3872 | ||
|
|
41c5a0ddf5 | ||
|
|
19165a40d2 | ||
|
|
d1ebcb59f1 | ||
|
|
31344128a0 | ||
|
|
86ecc2a234 | ||
|
|
d1e8ac7ba5 | ||
|
|
efe208fef5 | ||
|
|
7b40e99aec | ||
|
|
06706aab9a | ||
|
|
0318af5a33 | ||
|
|
995dcfc6ae | ||
|
|
2236cc8bf7 | ||
|
|
7bb354117b | ||
|
|
18b00b5d8d | ||
|
|
dbe193ad17 | ||
|
|
e7424222db | ||
|
|
d2a162e3ee | ||
|
|
da14750396 | ||
|
|
8fe72dcb74 | ||
|
|
d35f5fe498 | ||
|
|
677bd9b657 | ||
|
|
a347d276bd | ||
|
|
9e1253ada1 | ||
|
|
244e79f575 | ||
|
|
b4e6a32b4b | ||
|
|
023cd8f4cd | ||
|
|
10d24e64cd | ||
|
|
37e191a75d | ||
|
|
01a5fe3c51 | ||
|
|
9e3339b4f1 | ||
|
|
b06e34f144 | ||
|
|
710616f118 | ||
|
|
ddf042cab1 | ||
|
|
687e8d12be | ||
|
|
01f755ecf9 | ||
|
|
8094e6fdef | ||
|
|
061efa1822 | ||
|
|
9a7472218e | ||
|
|
7dcf4a5147 | ||
|
|
306a80cf57 | ||
|
|
d0cd5af419 | ||
|
|
afbfc1d370 | ||
|
|
a9a1a8fb3c | ||
|
|
85ddc0db33 | ||
|
|
fddc2aa8fa | ||
|
|
be3a379d10 | ||
|
|
d0daff180e | ||
|
|
be36480a64 | ||
|
|
9f52a64a6a | ||
|
|
52511af8e4 | ||
|
|
ddb6bd795c | ||
|
|
271660a284 | ||
|
|
0b0f7db534 | ||
|
|
5a7b377f6f | ||
|
|
654d2b9910 | ||
|
|
829a8c4381 | ||
|
|
0603e29c46 | ||
|
|
5807562b56 | ||
|
|
985c6e7cc9 | ||
|
|
0d13e16fed | ||
|
|
91d135e069 | ||
|
|
3e1e07e8c1 | ||
|
|
6c4c761408 | ||
|
|
abfb1791f1 | ||
|
|
7ce8c4c649 | ||
|
|
2ddca366f2 | ||
|
|
9a6ac6e3e6 | ||
|
|
cc3e8705bd | ||
|
|
809a5b84e7 | ||
|
|
06ae2a6c50 | ||
|
|
93517aa6f8 | ||
|
|
5f0f3705c0 | ||
|
|
70ae18c3a8 | ||
|
|
6aa763a1ae | ||
|
|
ebfb8c8c5e | ||
|
|
30788e1a70 | ||
|
|
27947c6746 | ||
|
|
6924b7bf4c | ||
|
|
fa8cd96108 | ||
|
|
dd1e425d02 | ||
|
|
7f2027d7f2 | ||
|
|
48f5a9a18c | ||
|
|
8843188b84 | ||
|
|
087c461762 | ||
|
|
d579c1718c | ||
|
|
4c5f667504 | ||
|
|
4c4c95198b | ||
|
|
5ce1526a06 | ||
|
|
d70c3d6189 | ||
|
|
74e6c1479e | ||
|
|
9a0c9768ad | ||
|
|
6884798404 | ||
|
|
c4487b73c4 | ||
|
|
32c3f09eb4 | ||
|
|
d4dc133e20 | ||
|
|
fc5153af3e | ||
|
|
2997f4d251 | ||
|
|
e407d423d4 | ||
|
|
35795c79c3 | ||
|
|
fd8d888ddb | ||
|
|
06f56411dd | ||
|
|
e4f197b709 | ||
|
|
13406175c1 | ||
|
|
20117c51a2 | ||
|
|
f0c54490ed | ||
|
|
95f726fb31 | ||
|
|
ba391bc2ed | ||
|
|
c487591437 | ||
|
|
c582c8d206 | ||
|
|
1a0f6f6e39 | ||
|
|
6981543db6 | ||
|
|
722084fbd3 | ||
|
|
a01a0d1039 | ||
|
|
8abc7575cd | ||
|
|
c9a411e341 | ||
|
|
b02a9f9769 | ||
|
|
a0fa652449 | ||
|
|
2eef535b4b | ||
|
|
61870a275f | ||
|
|
088e8248d3 | ||
|
|
da4a9dadd5 | ||
|
|
02bc7314f4 | ||
|
|
6fb8d67825 | ||
|
|
1a41a9f2c7 | ||
|
|
040a18e6f8 | ||
|
|
ec911981c2 | ||
|
|
f6a7564ec8 | ||
|
|
2eb57e6288 | ||
|
|
94ba5181fc | ||
|
|
1d5b090579 | ||
|
|
ef0f7c0c09 | ||
|
|
e60ceefea9 | ||
|
|
ed6b5bc279 | ||
|
|
d3ef39a58f | ||
|
|
07e32be5ce | ||
|
|
ed0b31d072 | ||
|
|
fcc6991d62 | ||
|
|
c09428acca | ||
|
|
931ef9482b | ||
|
|
772541afab | ||
|
|
2090a13dcd | ||
|
|
31b4f03f96 | ||
|
|
7793012409 | ||
|
|
566c2bc1fb | ||
|
|
99efeb98f8 | ||
|
|
836ee74e57 | ||
|
|
06689ed726 | ||
|
|
817cc1e567 | ||
|
|
8fa0fe65ba | ||
|
|
1d81333685 | ||
|
|
1bddf1147b | ||
|
|
63fa475913 | ||
|
|
d637a9c302 | ||
|
|
3c3189caa6 | ||
|
|
0d4a006536 | ||
|
|
0475eb6ef7 | ||
|
|
0d1b60ad63 | ||
|
|
78a0f3ca37 | ||
|
|
2c83eac36f | ||
|
|
42913e2c37 | ||
|
|
54daa0da23 | ||
|
|
0435089eba | ||
|
|
39abc4e973 | ||
|
|
cefe2d5bcc | ||
|
|
ed728f57e0 | ||
|
|
6ffea2225d | ||
|
|
64185cc2bc | ||
|
|
990ff153c0 | ||
|
|
47dcc940c0 | ||
|
|
8d60ae2c7e | ||
|
|
19492650d4 | ||
|
|
36ae14bccf | ||
|
|
45e71ecba0 | ||
|
|
e432e98413 | ||
|
|
656237e167 | ||
|
|
5dbf6b5127 | ||
|
|
c9e4da3ff5 | ||
|
|
cfd4781eb4 | ||
|
|
986725519f | ||
|
|
0393ab524c | ||
|
|
3f3cfedffa | ||
|
|
e9ea698130 | ||
|
|
a6adcdafa9 | ||
|
|
7c37086dd6 | ||
|
|
2048e9e136 | ||
|
|
cc054d71fe | ||
|
|
0bbf343348 | ||
|
|
8248b71153 | ||
|
|
9811809f6a | ||
|
|
237a14858a | ||
|
|
59c3d4bcfe | ||
|
|
7612cc84d2 | ||
|
|
4aa91bc420 | ||
|
|
c801a8c553 | ||
|
|
5626f598ce | ||
|
|
7d0da8b578 | ||
|
|
eb87fc9215 | ||
|
|
b22a9781a2 | ||
|
|
ada40960bd | ||
|
|
83ead36fce | ||
|
|
05a5f21c3d | ||
|
|
e7a2501fe8 | ||
|
|
a36328dbfc | ||
|
|
cab5641d95 | ||
|
|
b83894abd6 | ||
|
|
8e588ae146 | ||
|
|
83815f567d | ||
|
|
7db91c68be | ||
|
|
109115c13b | ||
|
|
11e0402396 | ||
|
|
fd94105483 | ||
|
|
96e8142540 | ||
|
|
9615138202 | ||
|
|
9900b215cc | ||
|
|
d5ea224e11 | ||
|
|
024d6ee7c3 | ||
|
|
f653b00258 | ||
|
|
ff1ee4ca64 | ||
|
|
830aa250e1 | ||
|
|
f0bbc3c7d8 | ||
|
|
994b4eef17 | ||
|
|
f905812afa | ||
|
|
d68d201722 | ||
|
|
da209e89a7 | ||
|
|
7940dbc78b | ||
|
|
4d22b4252f | ||
|
|
b3efd1e47b | ||
|
|
2d39d6602c | ||
|
|
dfcab2b6d5 | ||
|
|
40c5263927 | ||
|
|
bf26e37e0e | ||
|
|
e154e7a0fb | ||
|
|
b28ebb5d20 | ||
|
|
5840f88251 | ||
|
|
2c2968473a | ||
|
|
8f1f48b7d0 | ||
|
|
536e8b71bf | ||
|
|
acc43c39af | ||
|
|
eae1b6a3de | ||
|
|
31cc61478f | ||
|
|
3095c1e150 | ||
|
|
e1d5da5bd9 | ||
|
|
5f818b7349 | ||
|
|
0aac79f8fa | ||
|
|
1e93347a26 | ||
|
|
18867a4c84 | ||
|
|
3b97c7bdec | ||
|
|
203e6bc6b2 | ||
|
|
e27753e46e | ||
|
|
11fbef4bf0 | ||
|
|
c4e6ad1ec7 | ||
|
|
263a3f1983 | ||
|
|
8acaea0fbe | ||
|
|
bd6adfaec6 | ||
|
|
4b4a5a4b93 | ||
|
|
b098d84557 | ||
|
|
b937f9b762 | ||
|
|
55f3e07bd4 | ||
|
|
2780623076 | ||
|
|
75a342f96e | ||
|
|
729cd54401 | ||
|
|
a023f51971 | ||
|
|
5076eb9215 | ||
|
|
7edd0cd14c | ||
|
|
7ce4738d8a | ||
|
|
7287e0259c | ||
|
|
d86de68b40 | ||
|
|
4ba107a765 | ||
|
|
187b016d09 | ||
|
|
7010f5afad | ||
|
|
48b73fa12f | ||
|
|
1ecd0d7ca4 | ||
|
|
6faaae0c5f | ||
|
|
e4ef65be76 | ||
|
|
39c661d408 | ||
|
|
91a48d6a43 | ||
|
|
123f0fa185 | ||
|
|
ba3dffecbf | ||
|
|
8735e5675c | ||
|
|
3f5e06a0f8 | ||
|
|
ba40a40b73 | ||
|
|
b3732e7fb9 | ||
|
|
104776ee84 | ||
|
|
01e781e546 | ||
|
|
e77c16b55a | ||
|
|
987bbee1db | ||
|
|
74d2fe1baa | ||
|
|
98e63d5561 | ||
|
|
059f13fc9d | ||
|
|
ebd27b46af | ||
|
|
ca8d814918 | ||
|
|
0aeeaac361 | ||
|
|
28ed8a5742 | ||
|
|
f749be1490 | ||
|
|
693bce1b10 | ||
|
|
4f97e077db | ||
|
|
c883e79884 | ||
|
|
a613501ff2 | ||
|
|
a054e2514a | ||
|
|
c49fe26da7 | ||
|
|
75afdc6306 | ||
|
|
f02beaf075 | ||
|
|
8bcc7e88f0 | ||
|
|
0adbd720bf | ||
|
|
d93a7f2e02 | ||
|
|
88d7ac04bf | ||
|
|
1f422af1c8 | ||
|
|
53168d54d8 | ||
|
|
b0ec945dbb | ||
|
|
48ef6e5a6f | ||
|
|
8d1adaaef3 | ||
|
|
dd8c0d1e9e | ||
|
|
57b79aa852 | ||
|
|
2f883410d2 | ||
|
|
6fa61380b2 | ||
|
|
47b61c0cea | ||
|
|
d739ac830f | ||
|
|
26024fedc7 | ||
|
|
a376b697c0 | ||
|
|
bc2574680d | ||
|
|
f194b41435 | ||
|
|
350f7da55d | ||
|
|
36f5f78f46 | ||
|
|
55fe5b0b41 | ||
|
|
7d1a5c00a0 | ||
|
|
036061664e | ||
|
|
5b1b40ce93 | ||
|
|
a8d9fdce3c | ||
|
|
700382cba4 | ||
|
|
9698abbfd5 | ||
|
|
5bfcf5c917 | ||
|
|
8eb7d47072 | ||
|
|
ab61c87701 | ||
|
|
c1e6157847 | ||
|
|
4c849539fc | ||
|
|
9c2a630055 | ||
|
|
0e1ddf9715 | ||
|
|
54da1a4155 | ||
|
|
7141e2ed70 | ||
|
|
c9e5048015 | ||
|
|
4e077b053c | ||
|
|
f973aef80c | ||
|
|
a43bb1bb40 | ||
|
|
d86123195c | ||
|
|
91ffd10192 | ||
|
|
642c7824a7 | ||
|
|
149279f3d5 | ||
|
|
275214920f | ||
|
|
0124899759 | ||
|
|
033136cb9a | ||
|
|
05e08e0ac7 | ||
|
|
226cb89d97 | ||
|
|
3007c0ec4f | ||
|
|
3fa1ac9c79 |
18
.dockerignore
Normal file
18
.dockerignore
Normal file
@@ -0,0 +1,18 @@
|
||||
// integration tests are not needed in docker
|
||||
// ignoring it let us speed up the integration test
|
||||
// development
|
||||
integration_test.go
|
||||
integration_test/
|
||||
|
||||
Dockerfile*
|
||||
docker-compose*
|
||||
.dockerignore
|
||||
.goreleaser.yml
|
||||
.git
|
||||
.github
|
||||
.gitignore
|
||||
README.md
|
||||
LICENSE
|
||||
.vscode
|
||||
|
||||
*.sock
|
||||
28
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
28
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
name: "Bug report"
|
||||
about: "Create a bug report to help us improve"
|
||||
title: ""
|
||||
labels: ["bug"]
|
||||
assignees: ""
|
||||
---
|
||||
|
||||
**Bug description**
|
||||
|
||||
<!-- A clear and concise description of what the bug is. Describe the expected bahavior
|
||||
and how it is currently different. If you are unsure if it is a bug, consider discussing
|
||||
it on our Discord server first. -->
|
||||
|
||||
**To Reproduce**
|
||||
|
||||
<!-- Steps to reproduce the behavior. -->
|
||||
|
||||
**Context info**
|
||||
|
||||
<!-- Please add relevant information about your system. For example:
|
||||
- Version of headscale used
|
||||
- Version of tailscale client
|
||||
- OS (e.g. Linux, Mac, Cygwin, WSL, etc.) and version
|
||||
- Kernel version
|
||||
- The relevant config parameters you used
|
||||
- Log output
|
||||
-->
|
||||
11
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
11
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# Issues must have some content
|
||||
blank_issues_enabled: false
|
||||
|
||||
# Contact links
|
||||
contact_links:
|
||||
- name: "headscale usage documentation"
|
||||
url: "https://github.com/juanfont/headscale/blob/main/docs"
|
||||
about: "Find documentation about how to configure and run headscale."
|
||||
- name: "headscale Discord community"
|
||||
url: "https://discord.com/invite/XcQxk2VHjx"
|
||||
about: "Please ask and answer questions about usage of headscale here."
|
||||
15
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
15
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
name: "Feature request"
|
||||
about: "Suggest an idea for headscale"
|
||||
title: ""
|
||||
labels: ["enhancement"]
|
||||
assignees: ""
|
||||
---
|
||||
|
||||
**Feature request**
|
||||
|
||||
<!-- A clear and precise description of what new or changed feature you want. -->
|
||||
|
||||
<!-- Please include the reason, why you would need the feature. E.g. what problem
|
||||
does it solve? Or which workflow is currently frustrating and will be improved by
|
||||
this? -->
|
||||
28
.github/ISSUE_TEMPLATE/other_issue.md
vendored
Normal file
28
.github/ISSUE_TEMPLATE/other_issue.md
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
name: "Other issue"
|
||||
about: "Report a different issue"
|
||||
title: ""
|
||||
labels: ["bug"]
|
||||
assignees: ""
|
||||
---
|
||||
|
||||
<!-- If you have a question, please consider using our Discord for asking questions -->
|
||||
|
||||
**Issue description**
|
||||
|
||||
<!-- Please add your issue description. -->
|
||||
|
||||
**To Reproduce**
|
||||
|
||||
<!-- Steps to reproduce the behavior. -->
|
||||
|
||||
**Context info**
|
||||
|
||||
<!-- Please add relevant information about your system. For example:
|
||||
- Version of headscale used
|
||||
- Version of tailscale client
|
||||
- OS (e.g. Linux, Mac, Cygwin, WSL, etc.) and version
|
||||
- Kernel version
|
||||
- The relevant config parameters you used
|
||||
- Log output
|
||||
-->
|
||||
10
.github/pull_request_template.md
vendored
Normal file
10
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
<!-- Please tick if the following things apply. You… -->
|
||||
|
||||
- [] read the [CONTRIBUTING guidelines](README.md#user-content-contributing)
|
||||
- [] raised a GitHub issue or discussed it on the projects chat beforehand
|
||||
- [] added unit tests
|
||||
- [] added integration tests
|
||||
- [] updated documentation if needed
|
||||
- [] updated CHANGELOG.md
|
||||
|
||||
<!-- If applicable, please reference the issue using `Fixes #XXX` and add tests to cover your new code. -->
|
||||
51
.github/workflows/build.yml
vendored
Normal file
51
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
with:
|
||||
files: |
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- name: Setup Go
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
|
||||
- name: Install dependencies
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
go version
|
||||
sudo apt update
|
||||
sudo apt install -y make
|
||||
|
||||
- name: Run build
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: make build
|
||||
|
||||
- uses: actions/upload-artifact@v2
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: headscale-linux
|
||||
path: headscale
|
||||
24
.github/workflows/contributors.yml
vendored
Normal file
24
.github/workflows/contributors.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Contributors
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
add-contributors:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: BobAnkh/add-contributors@master
|
||||
with:
|
||||
CONTRIBUTOR: "## Contributors"
|
||||
COLUMN_PER_ROW: "6"
|
||||
ACCESS_TOKEN: ${{secrets.GITHUB_TOKEN}}
|
||||
IMG_WIDTH: "100"
|
||||
FONT_SIZE: "14"
|
||||
PATH: "/README.md"
|
||||
COMMIT_MESSAGE: "docs(README): update contributors"
|
||||
AVATAR_SHAPE: "round"
|
||||
BRANCH: "update-contributors"
|
||||
PULL_REQUEST: "main"
|
||||
74
.github/workflows/lint.yml
vendored
Normal file
74
.github/workflows/lint.yml
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
---
|
||||
name: CI
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
golangci-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
with:
|
||||
files: |
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- name: golangci-lint
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
version: latest
|
||||
|
||||
# Only block PRs on new problems.
|
||||
# If this is not enabled, we will end up having PRs
|
||||
# blocked because new linters has appared and other
|
||||
# parts of the code is affected.
|
||||
only-new-issues: true
|
||||
|
||||
prettier-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
with:
|
||||
files: |
|
||||
**/*.md
|
||||
**/*.yml
|
||||
**/*.yaml
|
||||
**/*.ts
|
||||
**/*.js
|
||||
**/*.sass
|
||||
**/*.css
|
||||
**/*.scss
|
||||
**/*.html
|
||||
|
||||
- name: Prettify code
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: creyD/prettier_action@v4.0
|
||||
with:
|
||||
prettier_options: >-
|
||||
--check **/*.{ts,js,md,yaml,yml,sass,css,scss,html}
|
||||
only_changed: false
|
||||
dry: true
|
||||
|
||||
proto-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: bufbuild/buf-setup-action@v0.7.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: "proto"
|
||||
213
.github/workflows/release.yml
vendored
213
.github/workflows/release.yml
vendored
@@ -1,30 +1,223 @@
|
||||
name: goreleaser
|
||||
---
|
||||
name: release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "*" # triggers only if push new tag version
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-18.04 # due to CGO we need to user an older version
|
||||
runs-on: ubuntu-18.04 # due to CGO we need to user an older version
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Set up Go
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
-
|
||||
name: Run GoReleaser
|
||||
go-version: 1.17
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install -y gcc-aarch64-linux-gnu
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: latest
|
||||
args: release --rm-dist
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
docker-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Set up QEMU for multiple platforms
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: arm64,amd64
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/headscale
|
||||
ghcr.io/${{ github.repository_owner }}/headscale
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value=latest
|
||||
type=sha
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new
|
||||
- name: Prepare cache for next build
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
|
||||
docker-debug-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Set up QEMU for multiple platforms
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: arm64,amd64
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache-debug
|
||||
key: ${{ runner.os }}-buildx-debug-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-debug-
|
||||
- name: Docker meta
|
||||
id: meta-debug
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/headscale
|
||||
ghcr.io/${{ github.repository_owner }}/headscale
|
||||
flavor: |
|
||||
latest=false
|
||||
tags: |
|
||||
type=semver,pattern={{version}}-debug
|
||||
type=semver,pattern={{major}}.{{minor}}-debug
|
||||
type=semver,pattern={{major}}-debug
|
||||
type=raw,value=latest-debug
|
||||
type=sha,suffix=-debug
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
file: Dockerfile.debug
|
||||
tags: ${{ steps.meta-debug.outputs.tags }}
|
||||
labels: ${{ steps.meta-debug.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache-debug
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-debug-new
|
||||
- name: Prepare cache for next build
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache-debug
|
||||
mv /tmp/.buildx-cache-debug-new /tmp/.buildx-cache-debug
|
||||
|
||||
docker-alpine-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Set up QEMU for multiple platforms
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: arm64,amd64
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache-alpine
|
||||
key: ${{ runner.os }}-buildx-alpine-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-alpine-
|
||||
- name: Docker meta
|
||||
id: meta-alpine
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/headscale
|
||||
ghcr.io/${{ github.repository_owner }}/headscale
|
||||
flavor: |
|
||||
latest=false
|
||||
tags: |
|
||||
type=semver,pattern={{version}}-alpine
|
||||
type=semver,pattern={{major}}.{{minor}}-alpine
|
||||
type=semver,pattern={{major}}-alpine
|
||||
type=raw,value=latest-alpine
|
||||
type=sha,suffix=-alpine
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
file: Dockerfile.alpine
|
||||
tags: ${{ steps.meta-alpine.outputs.tags }}
|
||||
labels: ${{ steps.meta-alpine.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache-alpine
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-alpine-new
|
||||
- name: Prepare cache for next build
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache-alpine
|
||||
mv /tmp/.buildx-cache-alpine-new /tmp/.buildx-cache-alpine
|
||||
|
||||
32
.github/workflows/test-integration.yml
vendored
Normal file
32
.github/workflows/test-integration.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: CI
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
integration-test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
with:
|
||||
files: |
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- name: Setup Go
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
|
||||
- name: Run Integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: go test -tags integration -timeout 30m
|
||||
62
.github/workflows/test.yml
vendored
62
.github/workflows/test.yml
vendored
@@ -3,43 +3,41 @@ name: CI
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
# The "build" workflow
|
||||
test:
|
||||
# The type of runner that the job will run on
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
# Steps represent a sequence of tasks that will be executed as part of the job
|
||||
steps:
|
||||
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
# Install and run golangci-lint as a separate step, it's much faster this
|
||||
# way because this action has caching. It'll get run again in `make lint`
|
||||
# below, but it's still much faster in the end than installing
|
||||
# golangci-lint manually in the `Run lint` step.
|
||||
- uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
args: --timeout 2m
|
||||
|
||||
# Setup Go
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.16.3' # The Go version to download (if necessary) and use.
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
with:
|
||||
files: |
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
# Install all the dependencies
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
go version
|
||||
go install golang.org/x/lint/golint@latest
|
||||
sudo apt update
|
||||
sudo apt install -y make
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
||||
- name: Setup Go
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
|
||||
- name: Run lint
|
||||
run: make lint
|
||||
- name: Install dependencies
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
go version
|
||||
sudo apt update
|
||||
sudo apt install -y make
|
||||
|
||||
- name: Run build
|
||||
run: make
|
||||
- name: Run tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: make test
|
||||
|
||||
- name: Run build
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: make
|
||||
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -16,5 +16,14 @@
|
||||
|
||||
/headscale
|
||||
config.json
|
||||
config.yaml
|
||||
derp.yaml
|
||||
*.hujson
|
||||
*.key
|
||||
/db.sqlite
|
||||
*.sqlite3
|
||||
|
||||
# Exclude Jetbrains Editors
|
||||
.idea
|
||||
|
||||
test_output/
|
||||
|
||||
56
.golangci.yaml
Normal file
56
.golangci.yaml
Normal file
@@ -0,0 +1,56 @@
|
||||
---
|
||||
run:
|
||||
timeout: 10m
|
||||
|
||||
issues:
|
||||
skip-dirs:
|
||||
- gen
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- exhaustivestruct
|
||||
- revive
|
||||
- lll
|
||||
- interfacer
|
||||
- scopelint
|
||||
- maligned
|
||||
- golint
|
||||
- gofmt
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocognit
|
||||
- funlen
|
||||
- exhaustivestruct
|
||||
- tagliatelle
|
||||
- godox
|
||||
- ireturn
|
||||
|
||||
# We should strive to enable these:
|
||||
- wrapcheck
|
||||
- dupl
|
||||
- makezero
|
||||
|
||||
# We might want to enable this, but it might be a lot of work
|
||||
- cyclop
|
||||
- nestif
|
||||
- wsl # might be incompatible with gofumpt
|
||||
- testpackage
|
||||
- paralleltest
|
||||
|
||||
linters-settings:
|
||||
varnamelen:
|
||||
ignore-type-assert-ok: true
|
||||
ignore-map-index-ok: true
|
||||
ignore-names:
|
||||
- err
|
||||
- db
|
||||
- id
|
||||
- ip
|
||||
- ok
|
||||
- c
|
||||
|
||||
gocritic:
|
||||
disabled-checks:
|
||||
- appendAssign
|
||||
# TODO(kradalby): Remove this
|
||||
- ifElseChain
|
||||
@@ -1,12 +1,15 @@
|
||||
# This is an example .goreleaser.yml file with some sane defaults.
|
||||
# Make sure to check the documentation at http://goreleaser.com
|
||||
---
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
- go mod tidy -compat=1.17
|
||||
|
||||
release:
|
||||
prerelease: auto
|
||||
|
||||
builds:
|
||||
- id: darwin-amd64
|
||||
main: ./cmd/headscale/headscale.go
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
goos:
|
||||
- darwin
|
||||
goarch:
|
||||
@@ -19,16 +22,17 @@ builds:
|
||||
flags:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w -X main.version={{.Version}}
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
|
||||
- id: linux-armhf
|
||||
main: ./cmd/headscale/headscale.go
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- arm
|
||||
goarm:
|
||||
- 7
|
||||
- "7"
|
||||
env:
|
||||
- CC=arm-linux-gnueabihf-gcc
|
||||
- CXX=arm-linux-gnueabihf-g++
|
||||
@@ -39,8 +43,7 @@ builds:
|
||||
flags:
|
||||
- -mod=readonly
|
||||
ldflags:
|
||||
- -s -w -X main.version={{.Version}}
|
||||
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
|
||||
- id: linux-amd64
|
||||
env:
|
||||
@@ -49,11 +52,23 @@ builds:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
goarm:
|
||||
- 6
|
||||
- 7
|
||||
main: ./cmd/headscale/headscale.go
|
||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
ldflags:
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
|
||||
- id: linux-arm64
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- arm64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- CC=aarch64-linux-gnu-gcc
|
||||
main: ./cmd/headscale/headscale.go
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
ldflags:
|
||||
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
|
||||
|
||||
archives:
|
||||
- id: golang-cross
|
||||
@@ -61,16 +76,17 @@ archives:
|
||||
- darwin-amd64
|
||||
- linux-armhf
|
||||
- linux-amd64
|
||||
- linux-arm64
|
||||
name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||
format: binary
|
||||
|
||||
checksum:
|
||||
name_template: 'checksums.txt'
|
||||
name_template: "checksums.txt"
|
||||
snapshot:
|
||||
name_template: "{{ .Tag }}-next"
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- '^docs:'
|
||||
- '^test:'
|
||||
- "^docs:"
|
||||
- "^test:"
|
||||
|
||||
68
CHANGELOG.md
Normal file
68
CHANGELOG.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# CHANGELOG
|
||||
|
||||
**TBD (TBD):**
|
||||
|
||||
**0.13.0 (2022-xx-xx):**
|
||||
|
||||
**Features**:
|
||||
|
||||
- Add IPv6 support to the prefix assigned to namespaces
|
||||
|
||||
**Changes**:
|
||||
|
||||
- `ip_prefix` is now superseded by `ip_prefixes` in the configuration [#208](https://github.com/juanfont/headscale/pull/208)
|
||||
|
||||
**0.12.4 (2022-01-29):**
|
||||
|
||||
**Changes**:
|
||||
|
||||
- Make gRPC Unix Socket permissions configurable [#292](https://github.com/juanfont/headscale/pull/292)
|
||||
- Trim whitespace before reading Private Key from file [#289](https://github.com/juanfont/headscale/pull/289)
|
||||
- Add new command to generate a private key for `headscale` [#290](https://github.com/juanfont/headscale/pull/290)
|
||||
- Fixed issue where hosts deleted from control server may be written back to the database, as long as they are connected to the control server [#278](https://github.com/juanfont/headscale/pull/278)
|
||||
|
||||
**0.12.3 (2022-01-13):**
|
||||
|
||||
**Changes**:
|
||||
|
||||
- Added Alpine container [#270](https://github.com/juanfont/headscale/pull/270)
|
||||
- Minor updates in dependencies [#271](https://github.com/juanfont/headscale/pull/271)
|
||||
|
||||
**0.12.2 (2022-01-11):**
|
||||
|
||||
Happy New Year!
|
||||
|
||||
**Changes**:
|
||||
|
||||
- Fix Docker release [#258](https://github.com/juanfont/headscale/pull/258)
|
||||
- Rewrite main docs [#262](https://github.com/juanfont/headscale/pull/262)
|
||||
- Improve Docker docs [#263](https://github.com/juanfont/headscale/pull/263)
|
||||
|
||||
**0.12.1 (2021-12-24):**
|
||||
|
||||
(We are skipping 0.12.0 to correct a mishap done weeks ago with the version tagging)
|
||||
|
||||
**BREAKING**:
|
||||
|
||||
- Upgrade to Tailscale 1.18 [#229](https://github.com/juanfont/headscale/pull/229)
|
||||
- This change requires a new format for private key, private keys are now generated automatically:
|
||||
1. Delete your current key
|
||||
2. Restart `headscale`, a new key will be generated.
|
||||
3. Restart all Tailscale clients to fetch the new key
|
||||
|
||||
**Changes**:
|
||||
|
||||
- Unify configuration example [#197](https://github.com/juanfont/headscale/pull/197)
|
||||
- Add stricter linting and formatting [#223](https://github.com/juanfont/headscale/pull/223)
|
||||
|
||||
**Features**:
|
||||
|
||||
- Add gRPC and HTTP API (HTTP API is currently disabled) [#204](https://github.com/juanfont/headscale/pull/204)
|
||||
- Use gRPC between the CLI and the server [#206](https://github.com/juanfont/headscale/pull/206), [#212](https://github.com/juanfont/headscale/pull/212)
|
||||
- Beta OpenID Connect support [#126](https://github.com/juanfont/headscale/pull/126), [#227](https://github.com/juanfont/headscale/pull/227)
|
||||
|
||||
**0.11.0 (2021-10-25):**
|
||||
|
||||
**BREAKING**:
|
||||
|
||||
- Make headscale fetch DERP map from URL and file [#196](https://github.com/juanfont/headscale/pull/196)
|
||||
20
Dockerfile
20
Dockerfile
@@ -1,12 +1,22 @@
|
||||
FROM golang:latest AS build
|
||||
# Builder image
|
||||
FROM docker.io/golang:1.17.1-bullseye AS build
|
||||
ENV GOPATH /go
|
||||
COPY . /go/src/headscale
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
COPY go.mod go.sum /go/src/headscale/
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN go install -a -ldflags="-extldflags=-static" -tags netgo,sqlite_omit_load_extension ./cmd/headscale
|
||||
RUN strip /go/bin/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
FROM scratch
|
||||
COPY --from=build /go/bin/headscale /go/bin/headscale
|
||||
# Production image
|
||||
FROM gcr.io/distroless/base-debian11
|
||||
|
||||
COPY --from=build /go/bin/headscale /bin/headscale
|
||||
ENV TZ UTC
|
||||
|
||||
EXPOSE 8080/tcp
|
||||
ENTRYPOINT ["/go/bin/headscale"]
|
||||
CMD ["headscale"]
|
||||
|
||||
23
Dockerfile.alpine
Normal file
23
Dockerfile.alpine
Normal file
@@ -0,0 +1,23 @@
|
||||
# Builder image
|
||||
FROM docker.io/golang:1.17.1-alpine AS build
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
COPY go.mod go.sum /go/src/headscale/
|
||||
RUN apk add gcc musl-dev
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN go install -a -ldflags="-extldflags=-static" -tags netgo,sqlite_omit_load_extension ./cmd/headscale
|
||||
RUN strip /go/bin/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
# Production image
|
||||
FROM docker.io/alpine:latest
|
||||
|
||||
COPY --from=build /go/bin/headscale /bin/headscale
|
||||
ENV TZ UTC
|
||||
|
||||
EXPOSE 8080/tcp
|
||||
CMD ["headscale"]
|
||||
23
Dockerfile.debug
Normal file
23
Dockerfile.debug
Normal file
@@ -0,0 +1,23 @@
|
||||
# Builder image
|
||||
FROM docker.io/golang:1.17.1-bullseye AS build
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
COPY go.mod go.sum /go/src/headscale/
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN go install -a -ldflags="-extldflags=-static" -tags netgo,sqlite_omit_load_extension ./cmd/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
# Debug image
|
||||
FROM gcr.io/distroless/base-debian11:debug
|
||||
|
||||
COPY --from=build /go/bin/headscale /bin/headscale
|
||||
ENV TZ UTC
|
||||
|
||||
# Need to reset the entrypoint or everything will run as a busybox script
|
||||
ENTRYPOINT []
|
||||
EXPOSE 8080/tcp
|
||||
CMD ["headscale"]
|
||||
11
Dockerfile.tailscale
Normal file
11
Dockerfile.tailscale
Normal file
@@ -0,0 +1,11 @@
|
||||
FROM ubuntu:latest
|
||||
|
||||
ARG TAILSCALE_VERSION
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y gnupg curl \
|
||||
&& curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.gpg | apt-key add - \
|
||||
&& curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.list | tee /etc/apt/sources.list.d/tailscale.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y tailscale=${TAILSCALE_VERSION} dnsutils \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
37
Makefile
37
Makefile
@@ -1,14 +1,28 @@
|
||||
# Calculate version
|
||||
version = $(shell ./scripts/version-at-commit.sh)
|
||||
|
||||
rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d))
|
||||
|
||||
# GO_SOURCES = $(wildcard *.go)
|
||||
# PROTO_SOURCES = $(wildcard **/*.proto)
|
||||
GO_SOURCES = $(call rwildcard,,*.go)
|
||||
PROTO_SOURCES = $(call rwildcard,,*.proto)
|
||||
|
||||
|
||||
build:
|
||||
go build -ldflags "-s -w -X main.version=$(version)" cmd/headscale/headscale.go
|
||||
go build -ldflags "-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$(version)" cmd/headscale/headscale.go
|
||||
|
||||
dev: lint test build
|
||||
|
||||
test:
|
||||
@go test -coverprofile=coverage.out ./...
|
||||
|
||||
test_integration:
|
||||
go test -tags integration -timeout 30m -count=1 ./...
|
||||
|
||||
test_integration_cli:
|
||||
go test -tags integration -v integration_cli_test.go integration_common_test.go
|
||||
|
||||
coverprofile_func:
|
||||
go tool cover -func=coverage.out
|
||||
|
||||
@@ -16,9 +30,26 @@ coverprofile_html:
|
||||
go tool cover -html=coverage.out
|
||||
|
||||
lint:
|
||||
golint
|
||||
golangci-lint run
|
||||
golangci-lint run --fix --timeout 10m
|
||||
|
||||
fmt:
|
||||
prettier --write '**/**.{ts,js,md,yaml,yml,sass,css,scss,html}'
|
||||
golines --max-len=88 --base-formatter=gofumpt -w $(GO_SOURCES)
|
||||
clang-format -style="{BasedOnStyle: Google, IndentWidth: 4, AlignConsecutiveDeclarations: true, AlignConsecutiveAssignments: true, ColumnLimit: 0}" -i $(PROTO_SOURCES)
|
||||
|
||||
proto-lint:
|
||||
cd proto/ && buf lint
|
||||
|
||||
compress: build
|
||||
upx --brute headscale
|
||||
|
||||
generate:
|
||||
rm -rf gen
|
||||
buf generate proto
|
||||
|
||||
install-protobuf-plugins:
|
||||
go install \
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway \
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2 \
|
||||
google.golang.org/protobuf/cmd/protoc-gen-go \
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc
|
||||
|
||||
416
README.md
416
README.md
@@ -1,9 +1,13 @@
|
||||
# Headscale
|
||||
# headscale
|
||||
|
||||
[](https://gitter.im/headscale-dev/community?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) 
|
||||

|
||||
|
||||
An open source, self-hosted implementation of the Tailscale coordination server.
|
||||
|
||||
Join our [Discord](https://discord.gg/XcQxk2VHjx) server for a chat.
|
||||
|
||||
**Note:** Always select the same GitHub tag as the released version you use to ensure you have the correct example configuration and documentation. The `main` branch might contain unreleased changes.
|
||||
|
||||
## Overview
|
||||
|
||||
Tailscale is [a modern VPN](https://tailscale.com/) built on top of [Wireguard](https://www.wireguard.com/). It [works like an overlay network](https://tailscale.com/blog/how-tailscale-works/) between the computers of your networks - using all kinds of [NAT traversal sorcery](https://tailscale.com/blog/how-nat-traversal-works/).
|
||||
@@ -12,195 +16,265 @@ Everything in Tailscale is Open Source, except the GUI clients for proprietary O
|
||||
|
||||
The control server works as an exchange point of Wireguard public keys for the nodes in the Tailscale network. It also assigns the IP addresses of the clients, creates the boundaries between each user, enables sharing machines between users, and exposes the advertised routes of your nodes.
|
||||
|
||||
Headscale implements this coordination server.
|
||||
headscale implements this coordination server.
|
||||
|
||||
## Status
|
||||
|
||||
- [x] Base functionality (nodes can communicate with each other)
|
||||
- [x] Node registration through the web flow
|
||||
- [x] Network changes are relied to the nodes
|
||||
- [x] Namespace support (~equivalent to multi-user in Tailscale.com)
|
||||
- [x] Network changes are relayed to the nodes
|
||||
- [x] Namespaces support (~tailnets in Tailscale.com naming)
|
||||
- [x] Routing (advertise & accept, including exit nodes)
|
||||
- [x] Node registration via pre-auth keys (including reusable keys, and ephemeral node support)
|
||||
- [X] JSON-formatted output
|
||||
- [X] ACLs
|
||||
- [X] Support for alternative IP ranges in the tailnets (default Tailscale's 100.64.0.0/10)
|
||||
- [ ] Share nodes between ~~users~~ namespaces
|
||||
- [ ] DNS
|
||||
- [x] JSON-formatted output
|
||||
- [x] ACLs
|
||||
- [x] Taildrop (File Sharing)
|
||||
- [x] Support for alternative IP ranges in the tailnets (default Tailscale's 100.64.0.0/10)
|
||||
- [x] DNS (passing DNS servers to nodes)
|
||||
- [x] Single-Sign-On (via Open ID Connect)
|
||||
- [x] Share nodes between namespaces
|
||||
- [x] MagicDNS (see `docs/`)
|
||||
|
||||
## Client OS support
|
||||
|
||||
| OS | Supports headscale |
|
||||
| ------- | ----------------------------------------------------------------------------------------------------------------- |
|
||||
| Linux | Yes |
|
||||
| OpenBSD | Yes |
|
||||
| macOS | Yes (see `/apple` on your headscale for more information) |
|
||||
| Windows | Yes |
|
||||
| Android | [You need to compile the client yourself](https://github.com/juanfont/headscale/issues/58#issuecomment-885255270) |
|
||||
| iOS | Not yet |
|
||||
|
||||
## Roadmap 🤷
|
||||
|
||||
We are now focusing on adding integration tests with the official clients.
|
||||
|
||||
Suggestions/PRs welcomed!
|
||||
|
||||
## Running headscale
|
||||
|
||||
|
||||
## Running it
|
||||
|
||||
1. Download the Headscale binary https://github.com/juanfont/headscale/releases, and place it somewhere in your PATH
|
||||
|
||||
|
||||
2. (Optional, you can also use SQLite) Get yourself a PostgreSQL DB running
|
||||
|
||||
```shell
|
||||
docker run --name headscale -e POSTGRES_DB=headscale -e \
|
||||
POSTGRES_USER=foo -e POSTGRES_PASSWORD=bar -p 5432:5432 -d postgres
|
||||
```
|
||||
|
||||
3. Set some stuff up (headscale Wireguard keys & the config.json file)
|
||||
```shell
|
||||
wg genkey > private.key
|
||||
wg pubkey < private.key > public.key # not needed
|
||||
|
||||
# Postgres
|
||||
cp config.json.postgres.example config.json
|
||||
# or
|
||||
# SQLite
|
||||
cp config.json.sqlite.example config.json
|
||||
```
|
||||
|
||||
4. Create a namespace (a namespace is a 'tailnet', a group of Tailscale nodes that can talk to each other)
|
||||
```shell
|
||||
headscale namespaces create myfirstnamespace
|
||||
```
|
||||
|
||||
5. Run the server
|
||||
```shell
|
||||
headscale serve
|
||||
```
|
||||
|
||||
6. If you used tailscale.com before in your nodes, make sure you clear the tailscaled data folder
|
||||
```shell
|
||||
systemctl stop tailscaled
|
||||
rm -fr /var/lib/tailscale
|
||||
systemctl start tailscaled
|
||||
```
|
||||
|
||||
7. Add your first machine
|
||||
```shell
|
||||
tailscale up -login-server YOUR_HEADSCALE_URL
|
||||
```
|
||||
|
||||
8. Navigate to the URL you will get with `tailscale up`, where you'll find your machine key.
|
||||
|
||||
9. In the server, register your machine to a namespace with the CLI
|
||||
```shell
|
||||
headscale -n myfirstnamespace node register YOURMACHINEKEY
|
||||
```
|
||||
|
||||
Alternatively, you can use Auth Keys to register your machines:
|
||||
|
||||
1. Create an authkey
|
||||
```shell
|
||||
headscale -n myfirstnamespace preauthkeys create --reusable --expiration 24h
|
||||
```
|
||||
|
||||
2. Use the authkey from your machine to register it
|
||||
```shell
|
||||
tailscale up -login-server YOUR_HEADSCALE_URL --authkey YOURAUTHKEY
|
||||
```
|
||||
|
||||
If you create an authkey with the `--ephemeral` flag, that key will create ephemeral nodes. This implies that `--reusable` is true.
|
||||
|
||||
Please bear in mind that all the commands from headscale support adding `-o json` or `-o json-line` to get a nicely JSON-formatted output.
|
||||
|
||||
|
||||
## Configuration reference
|
||||
|
||||
Headscale's configuration file is named `config.json` or `config.yaml`. Headscale will look for it in `/etc/headscale`, `~/.headscale` and finally the directory from where the Headscale binary is executed.
|
||||
|
||||
```
|
||||
"server_url": "http://192.168.1.12:8080",
|
||||
"listen_addr": "0.0.0.0:8080",
|
||||
"ip_prefix": "100.64.0.0/10"
|
||||
```
|
||||
|
||||
`server_url` is the external URL via which Headscale is reachable. `listen_addr` is the IP address and port the Headscale program should listen on. `ip_prefix` is the IP prefix (range) in which IP addresses for nodes will be allocated (default 100.64.0.0/10, e.g., 192.168.4.0/24, 10.0.0.0/8)
|
||||
|
||||
```
|
||||
"log_level": "debug"
|
||||
```
|
||||
`log_level` can be used to set the Log level for Headscale, it defaults to `debug`, and the available levels are: `trace`, `debug`, `info`, `warn` and `error`.
|
||||
|
||||
```
|
||||
"private_key_path": "private.key",
|
||||
```
|
||||
|
||||
`private_key_path` is the path to the Wireguard private key. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.
|
||||
|
||||
```
|
||||
"derp_map_path": "derp.yaml",
|
||||
```
|
||||
|
||||
`derp_map_path` is the path to the [DERP](https://pkg.go.dev/tailscale.com/derp) map file. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.
|
||||
|
||||
```
|
||||
"ephemeral_node_inactivity_timeout": "30m",
|
||||
```
|
||||
|
||||
`ephemeral_node_inactivity_timeout` is the timeout after which inactive ephemeral node records will be deleted from the database. The default is 30 minutes. This value must be higher than 65 seconds (the keepalive timeout for the HTTP long poll is 60 seconds, plus a few seconds to avoid race conditions).
|
||||
|
||||
```
|
||||
"db_host": "localhost",
|
||||
"db_port": 5432,
|
||||
"db_name": "headscale",
|
||||
"db_user": "foo",
|
||||
"db_pass": "bar",
|
||||
```
|
||||
|
||||
The fields starting with `db_` are used for the PostgreSQL connection information.
|
||||
|
||||
|
||||
### Running the service via TLS (optional)
|
||||
|
||||
```
|
||||
"tls_cert_path": ""
|
||||
"tls_key_path": ""
|
||||
```
|
||||
|
||||
Headscale can be configured to expose its web service via TLS. To configure the certificate and key file manually, set the `tls_cert_path` and `tls_cert_path` configuration parameters. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.
|
||||
|
||||
```
|
||||
"tls_letsencrypt_hostname": "",
|
||||
"tls_letsencrypt_listen": ":http",
|
||||
"tls_letsencrypt_cache_dir": ".cache",
|
||||
"tls_letsencrypt_challenge_type": "HTTP-01",
|
||||
```
|
||||
|
||||
To get a certificate automatically via [Let's Encrypt](https://letsencrypt.org/), set `tls_letsencrypt_hostname` to the desired certificate hostname. This name must resolve to the IP address(es) Headscale is reachable on (i.e., it must correspond to the `server_url` configuration parameter). The certificate and Let's Encrypt account credentials will be stored in the directory configured in `tls_letsencrypt_cache_dir`. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from. The certificate will automatically be renewed as needed.
|
||||
|
||||
#### Challenge type HTTP-01
|
||||
|
||||
The default challenge type `HTTP-01` requires that Headscale is reachable on port 80 for the Let's Encrypt automated validation, in addition to whatever port is configured in `listen_addr`. By default, Headscale listens on port 80 on all local IPs for Let's Encrypt automated validation.
|
||||
|
||||
If you need to change the ip and/or port used by Headscale for the Let's Encrypt validation process, set `tls_letsencrypt_listen` to the appropriate value. This can be handy if you are running Headscale as a non-root user (or can't run `setcap`). Keep in mind, however, that Let's Encrypt will _only_ connect to port 80 for the validation callback, so if you change `tls_letsencrypt_listen` you will also need to configure something else (e.g. a firewall rule) to forward the traffic from port 80 to the ip:port combination specified in `tls_letsencrypt_listen`.
|
||||
|
||||
#### Challenge type TLS-ALPN-01
|
||||
|
||||
Alternatively, `tls_letsencrypt_challenge_type` can be set to `TLS-ALPN-01`. In this configuration, Headscale listens on the ip:port combination defined in `listen_addr`. Let's Encrypt will _only_ connect to port 443 for the validation callback, so if `listen_addr` is not set to port 443, something else (e.g. a firewall rule) will be required to forward the traffic from port 443 to the ip:port combination specified in `listen_addr`.
|
||||
|
||||
### Policy ACLs
|
||||
|
||||
Headscale implements the same policy ACLs as Tailscale.com, adapted to the self-hosted environment.
|
||||
|
||||
For instance, instead of referring to users when defining groups you must
|
||||
use namespaces (which are the equivalent to user/logins in Tailscale.com).
|
||||
|
||||
Please check https://tailscale.com/kb/1018/acls/, and `./tests/acls/` in this repo for working examples.
|
||||
|
||||
Please have a look at the documentation under [`docs/`](docs/).
|
||||
|
||||
## Disclaimer
|
||||
|
||||
1. We have nothing to do with Tailscale, or Tailscale Inc.
|
||||
2. The purpose of writing this was to learn how Tailscale works.
|
||||
2. The purpose of Headscale is maintaining a working, self-hosted Tailscale control panel.
|
||||
|
||||
## Contributing
|
||||
|
||||
To contribute to Headscale you would need the lastest version of [Go](https://golang.org) and [Buf](https://buf.build)(Protobuf generator).
|
||||
|
||||
## More on Tailscale
|
||||
### Code style
|
||||
|
||||
- https://tailscale.com/blog/how-tailscale-works/
|
||||
- https://tailscale.com/blog/tailscale-key-management/
|
||||
- https://tailscale.com/blog/an-unlikely-database-migration/
|
||||
To ensure we have some consistency with a growing number of contributions, this project has adopted linting and style/formatting rules:
|
||||
|
||||
The **Go** code is linted with [`golangci-lint`](https://golangci-lint.run) and
|
||||
formatted with [`golines`](https://github.com/segmentio/golines) (width 88) and
|
||||
[`gofumpt`](https://github.com/mvdan/gofumpt).
|
||||
Please configure your editor to run the tools while developing and make sure to
|
||||
run `make lint` and `make fmt` before committing any code.
|
||||
|
||||
The **Proto** code is linted with [`buf`](https://docs.buf.build/lint/overview) and
|
||||
formatted with [`clang-format`](https://clang.llvm.org/docs/ClangFormat.html).
|
||||
|
||||
The **rest** (Markdown, YAML, etc) is formatted with [`prettier`](https://prettier.io).
|
||||
|
||||
Check out the `.golangci.yaml` and `Makefile` to see the specific configuration.
|
||||
|
||||
### Install development tools
|
||||
|
||||
- Go
|
||||
- Buf
|
||||
- Protobuf tools:
|
||||
|
||||
```shell
|
||||
make install-protobuf-plugins
|
||||
```
|
||||
|
||||
### Testing and building
|
||||
|
||||
Some parts of the project require the generation of Go code from Protobuf (if changes are made in `proto/`) and it must be (re-)generated with:
|
||||
|
||||
```shell
|
||||
make generate
|
||||
```
|
||||
|
||||
**Note**: Please check in changes from `gen/` in a separate commit to make it easier to review.
|
||||
|
||||
To run the tests:
|
||||
|
||||
```shell
|
||||
make test
|
||||
```
|
||||
|
||||
To build the program:
|
||||
|
||||
```shell
|
||||
make build
|
||||
```
|
||||
|
||||
## Contributors
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/juanfont>
|
||||
<img src=https://avatars.githubusercontent.com/u/181059?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Juan Font/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Juan Font</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/kradalby>
|
||||
<img src=https://avatars.githubusercontent.com/u/98431?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Kristoffer Dalby/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Kristoffer Dalby</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/cure>
|
||||
<img src=https://avatars.githubusercontent.com/u/149135?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ward Vandewege/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Ward Vandewege</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ohdearaugustin>
|
||||
<img src=https://avatars.githubusercontent.com/u/14001491?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ohdearaugustin/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>ohdearaugustin</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/unreality>
|
||||
<img src=https://avatars.githubusercontent.com/u/352522?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=unreality/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>unreality</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/qbit>
|
||||
<img src=https://avatars.githubusercontent.com/u/68368?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Aaron Bieber/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Aaron Bieber</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ptman>
|
||||
<img src=https://avatars.githubusercontent.com/u/24669?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Paul Tötterman/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Paul Tötterman</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/cmars>
|
||||
<img src=https://avatars.githubusercontent.com/u/23741?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Casey Marshall/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Casey Marshall</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/SilverBut>
|
||||
<img src=https://avatars.githubusercontent.com/u/6560655?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Silver Bullet/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Silver Bullet</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/t56k>
|
||||
<img src=https://avatars.githubusercontent.com/u/12165422?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=thomas/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>thomas</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/awoimbee>
|
||||
<img src=https://avatars.githubusercontent.com/u/22431493?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Arthur Woimbée/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Arthur Woimbée</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/fkr>
|
||||
<img src=https://avatars.githubusercontent.com/u/51063?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Felix Kronlage-Dammers/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Felix Kronlage-Dammers</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/felixonmars>
|
||||
<img src=https://avatars.githubusercontent.com/u/1006477?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Felix Yan/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Felix Yan</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/shaananc>
|
||||
<img src=https://avatars.githubusercontent.com/u/2287839?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Shaanan Cohney/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Shaanan Cohney</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Teteros>
|
||||
<img src=https://avatars.githubusercontent.com/u/5067989?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Teteros/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Teteros</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/gitter-badger>
|
||||
<img src=https://avatars.githubusercontent.com/u/8518239?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=The Gitter Badger/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>The Gitter Badger</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/tianon>
|
||||
<img src=https://avatars.githubusercontent.com/u/161631?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tianon Gravi/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Tianon Gravi</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/woudsma>
|
||||
<img src=https://avatars.githubusercontent.com/u/6162978?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Tjerk Woudsma/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Tjerk Woudsma</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/zekker6>
|
||||
<img src=https://avatars.githubusercontent.com/u/1367798?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Zakhar Bessarab/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>Zakhar Bessarab</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/derelm>
|
||||
<img src=https://avatars.githubusercontent.com/u/465155?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=derelm/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>derelm</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ignoramous>
|
||||
<img src=https://avatars.githubusercontent.com/u/852289?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ignoramous/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>ignoramous</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/xpzouying>
|
||||
<img src=https://avatars.githubusercontent.com/u/3946563?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=zy/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>zy</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
200
acls.go
200
acls.go
@@ -9,22 +9,39 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"github.com/tailscale/hujson"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
const errorEmptyPolicy = Error("empty policy")
|
||||
const errorInvalidAction = Error("invalid action")
|
||||
const errorInvalidUserSection = Error("invalid user section")
|
||||
const errorInvalidGroup = Error("invalid group")
|
||||
const errorInvalidTag = Error("invalid tag")
|
||||
const errorInvalidNamespace = Error("invalid namespace")
|
||||
const errorInvalidPortFormat = Error("invalid port format")
|
||||
const (
|
||||
errEmptyPolicy = Error("empty policy")
|
||||
errInvalidAction = Error("invalid action")
|
||||
errInvalidUserSection = Error("invalid user section")
|
||||
errInvalidGroup = Error("invalid group")
|
||||
errInvalidTag = Error("invalid tag")
|
||||
errInvalidNamespace = Error("invalid namespace")
|
||||
errInvalidPortFormat = Error("invalid port format")
|
||||
)
|
||||
|
||||
// LoadACLPolicy loads the ACL policy from the specify path, and generates the ACL rules
|
||||
const (
|
||||
Base8 = 8
|
||||
Base10 = 10
|
||||
BitSize16 = 16
|
||||
BitSize32 = 32
|
||||
BitSize64 = 64
|
||||
portRangeBegin = 0
|
||||
portRangeEnd = 65535
|
||||
expectedTokenItems = 2
|
||||
)
|
||||
|
||||
// LoadACLPolicy loads the ACL policy from the specify path, and generates the ACL rules.
|
||||
func (h *Headscale) LoadACLPolicy(path string) error {
|
||||
log.Debug().
|
||||
Str("func", "LoadACLPolicy").
|
||||
Str("path", path).
|
||||
Msg("Loading ACL policy from path")
|
||||
|
||||
policyFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -32,16 +49,23 @@ func (h *Headscale) LoadACLPolicy(path string) error {
|
||||
defer policyFile.Close()
|
||||
|
||||
var policy ACLPolicy
|
||||
b, err := io.ReadAll(policyFile)
|
||||
policyBytes, err := io.ReadAll(policyFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = hujson.Unmarshal(b, &policy)
|
||||
|
||||
ast, err := hujson.Parse(policyBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ast.Standardize()
|
||||
policyBytes = ast.Pack()
|
||||
err = json.Unmarshal(policyBytes, &policy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if policy.IsZero() {
|
||||
return errorEmptyPolicy
|
||||
return errEmptyPolicy
|
||||
}
|
||||
|
||||
h.aclPolicy = &policy
|
||||
@@ -50,40 +74,45 @@ func (h *Headscale) LoadACLPolicy(path string) error {
|
||||
return err
|
||||
}
|
||||
h.aclRules = rules
|
||||
|
||||
log.Trace().Interface("ACL", rules).Msg("ACL rules generated")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Headscale) generateACLRules() (*[]tailcfg.FilterRule, error) {
|
||||
func (h *Headscale) generateACLRules() ([]tailcfg.FilterRule, error) {
|
||||
rules := []tailcfg.FilterRule{}
|
||||
|
||||
for i, a := range h.aclPolicy.ACLs {
|
||||
if a.Action != "accept" {
|
||||
return nil, errorInvalidAction
|
||||
for index, acl := range h.aclPolicy.ACLs {
|
||||
if acl.Action != "accept" {
|
||||
return nil, errInvalidAction
|
||||
}
|
||||
|
||||
r := tailcfg.FilterRule{}
|
||||
filterRule := tailcfg.FilterRule{}
|
||||
|
||||
srcIPs := []string{}
|
||||
for j, u := range a.Users {
|
||||
srcs, err := h.generateACLPolicySrcIP(u)
|
||||
for innerIndex, user := range acl.Users {
|
||||
srcs, err := h.generateACLPolicySrcIP(user)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing ACL %d, User %d", i, j)
|
||||
Msgf("Error parsing ACL %d, User %d", index, innerIndex)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
srcIPs = append(srcIPs, *srcs...)
|
||||
srcIPs = append(srcIPs, srcs...)
|
||||
}
|
||||
r.SrcIPs = srcIPs
|
||||
filterRule.SrcIPs = srcIPs
|
||||
|
||||
destPorts := []tailcfg.NetPortRange{}
|
||||
for j, d := range a.Ports {
|
||||
dests, err := h.generateACLPolicyDestPorts(d)
|
||||
for innerIndex, ports := range acl.Ports {
|
||||
dests, err := h.generateACLPolicyDestPorts(ports)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing ACL %d, Port %d", i, j)
|
||||
Msgf("Error parsing ACL %d, Port %d", index, innerIndex)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
destPorts = append(destPorts, *dests...)
|
||||
destPorts = append(destPorts, dests...)
|
||||
}
|
||||
|
||||
rules = append(rules, tailcfg.FilterRule{
|
||||
@@ -92,17 +121,19 @@ func (h *Headscale) generateACLRules() (*[]tailcfg.FilterRule, error) {
|
||||
})
|
||||
}
|
||||
|
||||
return &rules, nil
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) generateACLPolicySrcIP(u string) (*[]string, error) {
|
||||
func (h *Headscale) generateACLPolicySrcIP(u string) ([]string, error) {
|
||||
return h.expandAlias(u)
|
||||
}
|
||||
|
||||
func (h *Headscale) generateACLPolicyDestPorts(d string) (*[]tailcfg.NetPortRange, error) {
|
||||
func (h *Headscale) generateACLPolicyDestPorts(
|
||||
d string,
|
||||
) ([]tailcfg.NetPortRange, error) {
|
||||
tokens := strings.Split(d, ":")
|
||||
if len(tokens) < 2 || len(tokens) > 3 {
|
||||
return nil, errorInvalidPortFormat
|
||||
if len(tokens) < expectedTokenItems || len(tokens) > 3 {
|
||||
return nil, errInvalidPortFormat
|
||||
}
|
||||
|
||||
var alias string
|
||||
@@ -112,7 +143,7 @@ func (h *Headscale) generateACLPolicyDestPorts(d string) (*[]tailcfg.NetPortRang
|
||||
// tag:montreal-webserver:80,443
|
||||
// tag:api-server:443
|
||||
// example-host-1:*
|
||||
if len(tokens) == 2 {
|
||||
if len(tokens) == expectedTokenItems {
|
||||
alias = tokens[0]
|
||||
} else {
|
||||
alias = fmt.Sprintf("%s:%s", tokens[0], tokens[1])
|
||||
@@ -128,7 +159,7 @@ func (h *Headscale) generateACLPolicyDestPorts(d string) (*[]tailcfg.NetPortRang
|
||||
}
|
||||
|
||||
dests := []tailcfg.NetPortRange{}
|
||||
for _, d := range *expanded {
|
||||
for _, d := range expanded {
|
||||
for _, p := range *ports {
|
||||
pr := tailcfg.NetPortRange{
|
||||
IP: d,
|
||||
@@ -137,34 +168,36 @@ func (h *Headscale) generateACLPolicyDestPorts(d string) (*[]tailcfg.NetPortRang
|
||||
dests = append(dests, pr)
|
||||
}
|
||||
}
|
||||
return &dests, nil
|
||||
|
||||
return dests, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) expandAlias(s string) (*[]string, error) {
|
||||
if s == "*" {
|
||||
return &[]string{"*"}, nil
|
||||
func (h *Headscale) expandAlias(alias string) ([]string, error) {
|
||||
if alias == "*" {
|
||||
return []string{"*"}, nil
|
||||
}
|
||||
|
||||
if strings.HasPrefix(s, "group:") {
|
||||
if _, ok := h.aclPolicy.Groups[s]; !ok {
|
||||
return nil, errorInvalidGroup
|
||||
if strings.HasPrefix(alias, "group:") {
|
||||
if _, ok := h.aclPolicy.Groups[alias]; !ok {
|
||||
return nil, errInvalidGroup
|
||||
}
|
||||
ips := []string{}
|
||||
for _, n := range h.aclPolicy.Groups[s] {
|
||||
for _, n := range h.aclPolicy.Groups[alias] {
|
||||
nodes, err := h.ListMachinesInNamespace(n)
|
||||
if err != nil {
|
||||
return nil, errorInvalidNamespace
|
||||
return nil, errInvalidNamespace
|
||||
}
|
||||
for _, node := range *nodes {
|
||||
ips = append(ips, node.IPAddress)
|
||||
for _, node := range nodes {
|
||||
ips = append(ips, node.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
}
|
||||
return &ips, nil
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
if strings.HasPrefix(s, "tag:") {
|
||||
if _, ok := h.aclPolicy.TagOwners[s]; !ok {
|
||||
return nil, errorInvalidTag
|
||||
if strings.HasPrefix(alias, "tag:") {
|
||||
if _, ok := h.aclPolicy.TagOwners[alias]; !ok {
|
||||
return nil, errInvalidTag
|
||||
}
|
||||
|
||||
// This will have HORRIBLE performance.
|
||||
@@ -174,10 +207,10 @@ func (h *Headscale) expandAlias(s string) (*[]string, error) {
|
||||
return nil, err
|
||||
}
|
||||
ips := []string{}
|
||||
for _, m := range machines {
|
||||
for _, machine := range machines {
|
||||
hostinfo := tailcfg.Hostinfo{}
|
||||
if len(m.HostInfo) != 0 {
|
||||
hi, err := m.HostInfo.MarshalJSON()
|
||||
if len(machine.HostInfo) != 0 {
|
||||
hi, err := machine.HostInfo.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -188,69 +221,76 @@ func (h *Headscale) expandAlias(s string) (*[]string, error) {
|
||||
|
||||
// FIXME: Check TagOwners allows this
|
||||
for _, t := range hostinfo.RequestTags {
|
||||
if s[4:] == t {
|
||||
ips = append(ips, m.IPAddress)
|
||||
if alias[4:] == t {
|
||||
ips = append(ips, machine.IPAddresses.ToStringSlice()...)
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return &ips, nil
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
n, err := h.GetNamespace(s)
|
||||
n, err := h.GetNamespace(alias)
|
||||
if err == nil {
|
||||
nodes, err := h.ListMachinesInNamespace(n.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ips := []string{}
|
||||
for _, n := range *nodes {
|
||||
ips = append(ips, n.IPAddress)
|
||||
for _, n := range nodes {
|
||||
ips = append(ips, n.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
return &ips, nil
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
if h, ok := h.aclPolicy.Hosts[s]; ok {
|
||||
return &[]string{h.String()}, nil
|
||||
if h, ok := h.aclPolicy.Hosts[alias]; ok {
|
||||
return []string{h.String()}, nil
|
||||
}
|
||||
|
||||
ip, err := netaddr.ParseIP(s)
|
||||
ip, err := netaddr.ParseIP(alias)
|
||||
if err == nil {
|
||||
return &[]string{ip.String()}, nil
|
||||
return []string{ip.String()}, nil
|
||||
}
|
||||
|
||||
cidr, err := netaddr.ParseIPPrefix(s)
|
||||
cidr, err := netaddr.ParseIPPrefix(alias)
|
||||
if err == nil {
|
||||
return &[]string{cidr.String()}, nil
|
||||
return []string{cidr.String()}, nil
|
||||
}
|
||||
|
||||
return nil, errorInvalidUserSection
|
||||
return nil, errInvalidUserSection
|
||||
}
|
||||
|
||||
func (h *Headscale) expandPorts(s string) (*[]tailcfg.PortRange, error) {
|
||||
if s == "*" {
|
||||
return &[]tailcfg.PortRange{{First: 0, Last: 65535}}, nil
|
||||
func (h *Headscale) expandPorts(portsStr string) (*[]tailcfg.PortRange, error) {
|
||||
if portsStr == "*" {
|
||||
return &[]tailcfg.PortRange{
|
||||
{First: portRangeBegin, Last: portRangeEnd},
|
||||
}, nil
|
||||
}
|
||||
|
||||
ports := []tailcfg.PortRange{}
|
||||
for _, p := range strings.Split(s, ",") {
|
||||
rang := strings.Split(p, "-")
|
||||
if len(rang) == 1 {
|
||||
pi, err := strconv.ParseUint(rang[0], 10, 16)
|
||||
for _, portStr := range strings.Split(portsStr, ",") {
|
||||
rang := strings.Split(portStr, "-")
|
||||
switch len(rang) {
|
||||
case 1:
|
||||
port, err := strconv.ParseUint(rang[0], Base10, BitSize16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ports = append(ports, tailcfg.PortRange{
|
||||
First: uint16(pi),
|
||||
Last: uint16(pi),
|
||||
First: uint16(port),
|
||||
Last: uint16(port),
|
||||
})
|
||||
} else if len(rang) == 2 {
|
||||
start, err := strconv.ParseUint(rang[0], 10, 16)
|
||||
|
||||
case expectedTokenItems:
|
||||
start, err := strconv.ParseUint(rang[0], Base10, BitSize16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
last, err := strconv.ParseUint(rang[1], 10, 16)
|
||||
last, err := strconv.ParseUint(rang[1], Base10, BitSize16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -258,9 +298,11 @@ func (h *Headscale) expandPorts(s string) (*[]tailcfg.PortRange, error) {
|
||||
First: uint16(start),
|
||||
Last: uint16(last),
|
||||
})
|
||||
} else {
|
||||
return nil, errorInvalidPortFormat
|
||||
|
||||
default:
|
||||
return nil, errInvalidPortFormat
|
||||
}
|
||||
}
|
||||
|
||||
return &ports, nil
|
||||
}
|
||||
|
||||
135
acls_test.go
135
acls_test.go
@@ -5,156 +5,163 @@ import (
|
||||
)
|
||||
|
||||
func (s *Suite) TestWrongPath(c *check.C) {
|
||||
err := h.LoadACLPolicy("asdfg")
|
||||
err := app.LoadACLPolicy("asdfg")
|
||||
c.Assert(err, check.NotNil)
|
||||
}
|
||||
|
||||
func (s *Suite) TestBrokenHuJson(c *check.C) {
|
||||
err := h.LoadACLPolicy("./tests/acls/broken.hujson")
|
||||
err := app.LoadACLPolicy("./tests/acls/broken.hujson")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
}
|
||||
|
||||
func (s *Suite) TestInvalidPolicyHuson(c *check.C) {
|
||||
err := h.LoadACLPolicy("./tests/acls/invalid.hujson")
|
||||
err := app.LoadACLPolicy("./tests/acls/invalid.hujson")
|
||||
c.Assert(err, check.NotNil)
|
||||
c.Assert(err, check.Equals, errorEmptyPolicy)
|
||||
c.Assert(err, check.Equals, errEmptyPolicy)
|
||||
}
|
||||
|
||||
func (s *Suite) TestParseHosts(c *check.C) {
|
||||
var hs Hosts
|
||||
err := hs.UnmarshalJSON([]byte(`{"example-host-1": "100.100.100.100","example-host-2": "100.100.101.100/24"}`))
|
||||
c.Assert(hs, check.NotNil)
|
||||
var hosts Hosts
|
||||
err := hosts.UnmarshalJSON(
|
||||
[]byte(
|
||||
`{"example-host-1": "100.100.100.100","example-host-2": "100.100.101.100/24"}`,
|
||||
),
|
||||
)
|
||||
c.Assert(hosts, check.NotNil)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *Suite) TestParseInvalidCIDR(c *check.C) {
|
||||
var hs Hosts
|
||||
err := hs.UnmarshalJSON([]byte(`{"example-host-1": "100.100.100.100/42"}`))
|
||||
c.Assert(hs, check.IsNil)
|
||||
var hosts Hosts
|
||||
err := hosts.UnmarshalJSON([]byte(`{"example-host-1": "100.100.100.100/42"}`))
|
||||
c.Assert(hosts, check.IsNil)
|
||||
c.Assert(err, check.NotNil)
|
||||
}
|
||||
|
||||
func (s *Suite) TestRuleInvalidGeneration(c *check.C) {
|
||||
err := h.LoadACLPolicy("./tests/acls/acl_policy_invalid.hujson")
|
||||
err := app.LoadACLPolicy("./tests/acls/acl_policy_invalid.hujson")
|
||||
c.Assert(err, check.NotNil)
|
||||
}
|
||||
|
||||
func (s *Suite) TestBasicRule(c *check.C) {
|
||||
err := h.LoadACLPolicy("./tests/acls/acl_policy_basic_1.hujson")
|
||||
err := app.LoadACLPolicy("./tests/acls/acl_policy_basic_1.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := h.generateACLRules()
|
||||
rules, err := app.generateACLRules()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
}
|
||||
|
||||
func (s *Suite) TestPortRange(c *check.C) {
|
||||
err := h.LoadACLPolicy("./tests/acls/acl_policy_basic_range.hujson")
|
||||
err := app.LoadACLPolicy("./tests/acls/acl_policy_basic_range.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := h.generateACLRules()
|
||||
rules, err := app.generateACLRules()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
c.Assert(*rules, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.First, check.Equals, uint16(5400))
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.Last, check.Equals, uint16(5500))
|
||||
c.Assert(rules, check.HasLen, 1)
|
||||
c.Assert(rules[0].DstPorts, check.HasLen, 1)
|
||||
c.Assert(rules[0].DstPorts[0].Ports.First, check.Equals, uint16(5400))
|
||||
c.Assert(rules[0].DstPorts[0].Ports.Last, check.Equals, uint16(5500))
|
||||
}
|
||||
|
||||
func (s *Suite) TestPortWildcard(c *check.C) {
|
||||
err := h.LoadACLPolicy("./tests/acls/acl_policy_basic_wildcards.hujson")
|
||||
err := app.LoadACLPolicy("./tests/acls/acl_policy_basic_wildcards.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := h.generateACLRules()
|
||||
rules, err := app.generateACLRules()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
c.Assert(*rules, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.First, check.Equals, uint16(0))
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.Last, check.Equals, uint16(65535))
|
||||
c.Assert((*rules)[0].SrcIPs, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].SrcIPs[0], check.Equals, "*")
|
||||
c.Assert(rules, check.HasLen, 1)
|
||||
c.Assert(rules[0].DstPorts, check.HasLen, 1)
|
||||
c.Assert(rules[0].DstPorts[0].Ports.First, check.Equals, uint16(0))
|
||||
c.Assert(rules[0].DstPorts[0].Ports.Last, check.Equals, uint16(65535))
|
||||
c.Assert(rules[0].SrcIPs, check.HasLen, 1)
|
||||
c.Assert(rules[0].SrcIPs[0], check.Equals, "*")
|
||||
}
|
||||
|
||||
func (s *Suite) TestPortNamespace(c *check.C) {
|
||||
n, err := h.CreateNamespace("testnamespace")
|
||||
namespace, err := app.CreateNamespace("testnamespace")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = h.GetMachine("testnamespace", "testmachine")
|
||||
_, err = app.GetMachine("testnamespace", "testmachine")
|
||||
c.Assert(err, check.NotNil)
|
||||
ip, _ := h.getAvailableIP()
|
||||
m := Machine{
|
||||
ips, _ := app.getAvailableIPs()
|
||||
machine := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
NamespaceID: n.ID,
|
||||
NamespaceID: namespace.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
IPAddress: ip.String(),
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: ips,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
}
|
||||
h.db.Save(&m)
|
||||
app.db.Save(&machine)
|
||||
|
||||
err = h.LoadACLPolicy("./tests/acls/acl_policy_basic_namespace_as_user.hujson")
|
||||
err = app.LoadACLPolicy(
|
||||
"./tests/acls/acl_policy_basic_namespace_as_user.hujson",
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := h.generateACLRules()
|
||||
rules, err := app.generateACLRules()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
c.Assert(*rules, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.First, check.Equals, uint16(0))
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.Last, check.Equals, uint16(65535))
|
||||
c.Assert((*rules)[0].SrcIPs, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].SrcIPs[0], check.Not(check.Equals), "not an ip")
|
||||
c.Assert((*rules)[0].SrcIPs[0], check.Equals, ip.String())
|
||||
c.Assert(rules, check.HasLen, 1)
|
||||
c.Assert(rules[0].DstPorts, check.HasLen, 1)
|
||||
c.Assert(rules[0].DstPorts[0].Ports.First, check.Equals, uint16(0))
|
||||
c.Assert(rules[0].DstPorts[0].Ports.Last, check.Equals, uint16(65535))
|
||||
c.Assert(rules[0].SrcIPs, check.HasLen, 1)
|
||||
c.Assert(rules[0].SrcIPs[0], check.Not(check.Equals), "not an ip")
|
||||
c.Assert(len(ips), check.Equals, 1)
|
||||
c.Assert(rules[0].SrcIPs[0], check.Equals, ips[0].String())
|
||||
}
|
||||
|
||||
func (s *Suite) TestPortGroup(c *check.C) {
|
||||
n, err := h.CreateNamespace("testnamespace")
|
||||
namespace, err := app.CreateNamespace("testnamespace")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := h.CreatePreAuthKey(n.Name, false, false, nil)
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = h.GetMachine("testnamespace", "testmachine")
|
||||
_, err = app.GetMachine("testnamespace", "testmachine")
|
||||
c.Assert(err, check.NotNil)
|
||||
ip, _ := h.getAvailableIP()
|
||||
m := Machine{
|
||||
ips, _ := app.getAvailableIPs()
|
||||
machine := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "foo",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
NamespaceID: n.ID,
|
||||
NamespaceID: namespace.ID,
|
||||
Registered: true,
|
||||
RegisterMethod: "authKey",
|
||||
IPAddress: ip.String(),
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: ips,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
}
|
||||
h.db.Save(&m)
|
||||
app.db.Save(&machine)
|
||||
|
||||
err = h.LoadACLPolicy("./tests/acls/acl_policy_basic_groups.hujson")
|
||||
err = app.LoadACLPolicy("./tests/acls/acl_policy_basic_groups.hujson")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
rules, err := h.generateACLRules()
|
||||
rules, err := app.generateACLRules()
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(rules, check.NotNil)
|
||||
|
||||
c.Assert(*rules, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.First, check.Equals, uint16(0))
|
||||
c.Assert((*rules)[0].DstPorts[0].Ports.Last, check.Equals, uint16(65535))
|
||||
c.Assert((*rules)[0].SrcIPs, check.HasLen, 1)
|
||||
c.Assert((*rules)[0].SrcIPs[0], check.Not(check.Equals), "not an ip")
|
||||
c.Assert((*rules)[0].SrcIPs[0], check.Equals, ip.String())
|
||||
c.Assert(rules, check.HasLen, 1)
|
||||
c.Assert(rules[0].DstPorts, check.HasLen, 1)
|
||||
c.Assert(rules[0].DstPorts[0].Ports.First, check.Equals, uint16(0))
|
||||
c.Assert(rules[0].DstPorts[0].Ports.Last, check.Equals, uint16(65535))
|
||||
c.Assert(rules[0].SrcIPs, check.HasLen, 1)
|
||||
c.Assert(rules[0].SrcIPs[0], check.Not(check.Equals), "not an ip")
|
||||
c.Assert(len(ips), check.Equals, 1)
|
||||
c.Assert(rules[0].SrcIPs[0], check.Equals, ips[0].String())
|
||||
}
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
|
||||
"github.com/tailscale/hujson"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
|
||||
// ACLPolicy represents a Tailscale ACL Policy
|
||||
// ACLPolicy represents a Tailscale ACL Policy.
|
||||
type ACLPolicy struct {
|
||||
Groups Groups `json:"Groups"`
|
||||
Hosts Hosts `json:"Hosts"`
|
||||
@@ -16,55 +17,63 @@ type ACLPolicy struct {
|
||||
Tests []ACLTest `json:"Tests"`
|
||||
}
|
||||
|
||||
// ACL is a basic rule for the ACL Policy
|
||||
// ACL is a basic rule for the ACL Policy.
|
||||
type ACL struct {
|
||||
Action string `json:"Action"`
|
||||
Users []string `json:"Users"`
|
||||
Ports []string `json:"Ports"`
|
||||
}
|
||||
|
||||
// Groups references a series of alias in the ACL rules
|
||||
// Groups references a series of alias in the ACL rules.
|
||||
type Groups map[string][]string
|
||||
|
||||
// Hosts are alias for IP addresses or subnets
|
||||
// Hosts are alias for IP addresses or subnets.
|
||||
type Hosts map[string]netaddr.IPPrefix
|
||||
|
||||
// TagOwners specify what users (namespaces?) are allow to use certain tags
|
||||
// TagOwners specify what users (namespaces?) are allow to use certain tags.
|
||||
type TagOwners map[string][]string
|
||||
|
||||
// ACLTest is not implemented, but should be use to check if a certain rule is allowed
|
||||
// ACLTest is not implemented, but should be use to check if a certain rule is allowed.
|
||||
type ACLTest struct {
|
||||
User string `json:"User"`
|
||||
Allow []string `json:"Allow"`
|
||||
Deny []string `json:"Deny,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON allows to parse the Hosts directly into netaddr objects
|
||||
func (h *Hosts) UnmarshalJSON(data []byte) error {
|
||||
hosts := Hosts{}
|
||||
hs := make(map[string]string)
|
||||
err := hujson.Unmarshal(data, &hs)
|
||||
// UnmarshalJSON allows to parse the Hosts directly into netaddr objects.
|
||||
func (hosts *Hosts) UnmarshalJSON(data []byte) error {
|
||||
newHosts := Hosts{}
|
||||
hostIPPrefixMap := make(map[string]string)
|
||||
ast, err := hujson.Parse(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range hs {
|
||||
if !strings.Contains(v, "/") {
|
||||
v = v + "/32"
|
||||
ast.Standardize()
|
||||
data = ast.Pack()
|
||||
err = json.Unmarshal(data, &hostIPPrefixMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for host, prefixStr := range hostIPPrefixMap {
|
||||
if !strings.Contains(prefixStr, "/") {
|
||||
prefixStr += "/32"
|
||||
}
|
||||
prefix, err := netaddr.ParseIPPrefix(v)
|
||||
prefix, err := netaddr.ParseIPPrefix(prefixStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hosts[k] = prefix
|
||||
newHosts[host] = prefix
|
||||
}
|
||||
*h = hosts
|
||||
*hosts = newHosts
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsZero is perhaps a bit naive here
|
||||
func (p ACLPolicy) IsZero() bool {
|
||||
if len(p.Groups) == 0 && len(p.Hosts) == 0 && len(p.ACLs) == 0 {
|
||||
// IsZero is perhaps a bit naive here.
|
||||
func (policy ACLPolicy) IsZero() bool {
|
||||
if len(policy.Groups) == 0 && len(policy.Hosts) == 0 && len(policy.ACLs) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
677
app.go
677
app.go
@@ -1,32 +1,79 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"github.com/gin-gonic/gin"
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/patrickmn/go-cache"
|
||||
zerolog "github.com/philip-bui/grpc-zerolog"
|
||||
zl "github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/soheilhy/cmux"
|
||||
ginprometheus "github.com/zsais/go-gin-prometheus"
|
||||
"golang.org/x/crypto/acme"
|
||||
"golang.org/x/crypto/acme/autocert"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/reflection"
|
||||
"google.golang.org/grpc/status"
|
||||
"gorm.io/gorm"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/wgkey"
|
||||
"tailscale.com/types/dnstype"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
// Config contains the initial Headscale configuration
|
||||
const (
|
||||
AuthPrefix = "Bearer "
|
||||
Postgres = "postgres"
|
||||
Sqlite = "sqlite3"
|
||||
updateInterval = 5000
|
||||
HTTPReadTimeout = 30 * time.Second
|
||||
privateKeyFileMode = 0o600
|
||||
|
||||
requestedExpiryCacheExpiration = time.Minute * 5
|
||||
requestedExpiryCacheCleanupInterval = time.Minute * 10
|
||||
|
||||
errUnsupportedDatabase = Error("unsupported DB")
|
||||
errUnsupportedLetsEncryptChallengeType = Error(
|
||||
"unknown value for Lets Encrypt challenge type",
|
||||
)
|
||||
)
|
||||
|
||||
// Config contains the initial Headscale configuration.
|
||||
type Config struct {
|
||||
ServerURL string
|
||||
Addr string
|
||||
PrivateKeyPath string
|
||||
DerpMap *tailcfg.DERPMap
|
||||
EphemeralNodeInactivityTimeout time.Duration
|
||||
IPPrefix netaddr.IPPrefix
|
||||
IPPrefixes []netaddr.IPPrefix
|
||||
PrivateKeyPath string
|
||||
BaseDomain string
|
||||
|
||||
DERP DERPConfig
|
||||
|
||||
DBtype string
|
||||
DBpath string
|
||||
@@ -43,73 +90,137 @@ type Config struct {
|
||||
|
||||
TLSCertPath string
|
||||
TLSKeyPath string
|
||||
|
||||
ACMEURL string
|
||||
ACMEEmail string
|
||||
|
||||
DNSConfig *tailcfg.DNSConfig
|
||||
|
||||
UnixSocket string
|
||||
UnixSocketPermission fs.FileMode
|
||||
|
||||
OIDC OIDCConfig
|
||||
|
||||
CLI CLIConfig
|
||||
}
|
||||
|
||||
// Headscale represents the base app of the service
|
||||
type OIDCConfig struct {
|
||||
Issuer string
|
||||
ClientID string
|
||||
ClientSecret string
|
||||
MatchMap map[string]string
|
||||
}
|
||||
|
||||
type DERPConfig struct {
|
||||
URLs []url.URL
|
||||
Paths []string
|
||||
AutoUpdate bool
|
||||
UpdateFrequency time.Duration
|
||||
}
|
||||
|
||||
type CLIConfig struct {
|
||||
Address string
|
||||
APIKey string
|
||||
Insecure bool
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// Headscale represents the base app of the service.
|
||||
type Headscale struct {
|
||||
cfg Config
|
||||
db *gorm.DB
|
||||
dbString string
|
||||
dbType string
|
||||
dbDebug bool
|
||||
publicKey *wgkey.Key
|
||||
privateKey *wgkey.Private
|
||||
privateKey *key.MachinePrivate
|
||||
|
||||
DERPMap *tailcfg.DERPMap
|
||||
|
||||
aclPolicy *ACLPolicy
|
||||
aclRules *[]tailcfg.FilterRule
|
||||
aclRules []tailcfg.FilterRule
|
||||
|
||||
clientsPolling sync.Map
|
||||
lastStateChange sync.Map
|
||||
|
||||
oidcProvider *oidc.Provider
|
||||
oauth2Config *oauth2.Config
|
||||
oidcStateCache *cache.Cache
|
||||
|
||||
requestedExpiryCache *cache.Cache
|
||||
}
|
||||
|
||||
// NewHeadscale returns the Headscale app
|
||||
// NewHeadscale returns the Headscale app.
|
||||
func NewHeadscale(cfg Config) (*Headscale, error) {
|
||||
content, err := os.ReadFile(cfg.PrivateKeyPath)
|
||||
privKey, err := readOrCreatePrivateKey(cfg.PrivateKeyPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to read or create private key: %w", err)
|
||||
}
|
||||
privKey, err := wgkey.ParsePrivate(string(content))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pubKey := privKey.Public()
|
||||
|
||||
var dbString string
|
||||
switch cfg.DBtype {
|
||||
case "postgres":
|
||||
dbString = fmt.Sprintf("host=%s port=%d dbname=%s user=%s password=%s sslmode=disable", cfg.DBhost,
|
||||
cfg.DBport, cfg.DBname, cfg.DBuser, cfg.DBpass)
|
||||
case "sqlite3":
|
||||
case Postgres:
|
||||
dbString = fmt.Sprintf(
|
||||
"host=%s port=%d dbname=%s user=%s password=%s sslmode=disable",
|
||||
cfg.DBhost,
|
||||
cfg.DBport,
|
||||
cfg.DBname,
|
||||
cfg.DBuser,
|
||||
cfg.DBpass,
|
||||
)
|
||||
case Sqlite:
|
||||
dbString = cfg.DBpath
|
||||
default:
|
||||
return nil, errors.New("unsupported DB")
|
||||
return nil, errUnsupportedDatabase
|
||||
}
|
||||
|
||||
h := Headscale{
|
||||
cfg: cfg,
|
||||
dbType: cfg.DBtype,
|
||||
dbString: dbString,
|
||||
privateKey: privKey,
|
||||
publicKey: &pubKey,
|
||||
aclRules: &tailcfg.FilterAllowAll, // default allowall
|
||||
requestedExpiryCache := cache.New(
|
||||
requestedExpiryCacheExpiration,
|
||||
requestedExpiryCacheCleanupInterval,
|
||||
)
|
||||
|
||||
app := Headscale{
|
||||
cfg: cfg,
|
||||
dbType: cfg.DBtype,
|
||||
dbString: dbString,
|
||||
privateKey: privKey,
|
||||
aclRules: tailcfg.FilterAllowAll, // default allowall
|
||||
requestedExpiryCache: requestedExpiryCache,
|
||||
}
|
||||
|
||||
err = h.initDB()
|
||||
err = app.initDB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &h, nil
|
||||
if cfg.OIDC.Issuer != "" {
|
||||
err = app.initOIDC()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if app.cfg.DNSConfig != nil && app.cfg.DNSConfig.Proxied { // if MagicDNS
|
||||
magicDNSDomains := generateMagicDNSRootDomains(app.cfg.IPPrefixes)
|
||||
// we might have routes already from Split DNS
|
||||
if app.cfg.DNSConfig.Routes == nil {
|
||||
app.cfg.DNSConfig.Routes = make(map[string][]dnstype.Resolver)
|
||||
}
|
||||
for _, d := range magicDNSDomains {
|
||||
app.cfg.DNSConfig.Routes[d.WithoutTrailingDot()] = nil
|
||||
}
|
||||
}
|
||||
|
||||
return &app, nil
|
||||
}
|
||||
|
||||
// Redirect to our TLS url
|
||||
// Redirect to our TLS url.
|
||||
func (h *Headscale) redirect(w http.ResponseWriter, req *http.Request) {
|
||||
target := h.cfg.ServerURL + req.URL.RequestURI()
|
||||
http.Redirect(w, req, target, http.StatusFound)
|
||||
}
|
||||
|
||||
// ExpireEphemeralNodes deletes ephemeral machine records that have not been
|
||||
// seen for longer than h.cfg.EphemeralNodeInactivityTimeout
|
||||
func (h *Headscale) ExpireEphemeralNodes(milliSeconds int64) {
|
||||
// expireEphemeralNodes deletes ephemeral machine records that have not been
|
||||
// seen for longer than h.cfg.EphemeralNodeInactivityTimeout.
|
||||
func (h *Headscale) expireEphemeralNodes(milliSeconds int64) {
|
||||
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
|
||||
for range ticker.C {
|
||||
h.expireEphemeralNodesWorker()
|
||||
@@ -120,28 +231,46 @@ func (h *Headscale) expireEphemeralNodesWorker() {
|
||||
namespaces, err := h.ListNamespaces()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error listing namespaces")
|
||||
|
||||
return
|
||||
}
|
||||
for _, ns := range *namespaces {
|
||||
machines, err := h.ListMachinesInNamespace(ns.Name)
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
machines, err := h.ListMachinesInNamespace(namespace.Name)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("namespace", ns.Name).Msg("Error listing machines in namespace")
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("namespace", namespace.Name).
|
||||
Msg("Error listing machines in namespace")
|
||||
|
||||
return
|
||||
}
|
||||
for _, m := range *machines {
|
||||
if m.AuthKey != nil && m.LastSeen != nil && m.AuthKey.Ephemeral && time.Now().After(m.LastSeen.Add(h.cfg.EphemeralNodeInactivityTimeout)) {
|
||||
log.Info().Str("machine", m.Name).Msg("Ephemeral client removed from database")
|
||||
err = h.db.Unscoped().Delete(m).Error
|
||||
|
||||
for _, machine := range machines {
|
||||
if machine.AuthKey != nil && machine.LastSeen != nil &&
|
||||
machine.AuthKey.Ephemeral &&
|
||||
time.Now().
|
||||
After(machine.LastSeen.Add(h.cfg.EphemeralNodeInactivityTimeout)) {
|
||||
log.Info().
|
||||
Str("machine", machine.Name).
|
||||
Msg("Ephemeral client removed from database")
|
||||
|
||||
err = h.db.Unscoped().Delete(machine).Error
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("machine", m.Name).Msg("🤮 Cannot delete ephemeral machine from the database")
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("machine", machine.Name).
|
||||
Msg("🤮 Cannot delete ephemeral machine from the database")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
h.setLastStateChangeToNow(namespace.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// WatchForKVUpdates checks the KV DB table for requests to perform tailnet upgrades
|
||||
// This is a way to communitate the CLI with the headscale server
|
||||
// This is a way to communitate the CLI with the headscale server.
|
||||
func (h *Headscale) watchForKVUpdates(milliSeconds int64) {
|
||||
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
|
||||
for range ticker.C {
|
||||
@@ -154,61 +283,465 @@ func (h *Headscale) watchForKVUpdatesWorker() {
|
||||
// more functions will come here in the future
|
||||
}
|
||||
|
||||
// Serve launches a GIN server with the Headscale API
|
||||
func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
||||
req interface{},
|
||||
info *grpc.UnaryServerInfo,
|
||||
handler grpc.UnaryHandler) (interface{}, error) {
|
||||
// Check if the request is coming from the on-server client.
|
||||
// This is not secure, but it is to maintain maintainability
|
||||
// with the "legacy" database-based client
|
||||
// It is also neede for grpc-gateway to be able to connect to
|
||||
// the server
|
||||
client, _ := peer.FromContext(ctx)
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("client_address", client.Addr.String()).
|
||||
Msg("Client is trying to authenticate")
|
||||
|
||||
meta, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("client_address", client.Addr.String()).
|
||||
Msg("Retrieving metadata is failed")
|
||||
|
||||
return ctx, status.Errorf(
|
||||
codes.InvalidArgument,
|
||||
"Retrieving metadata is failed",
|
||||
)
|
||||
}
|
||||
|
||||
authHeader, ok := meta["authorization"]
|
||||
if !ok {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("client_address", client.Addr.String()).
|
||||
Msg("Authorization token is not supplied")
|
||||
|
||||
return ctx, status.Errorf(
|
||||
codes.Unauthenticated,
|
||||
"Authorization token is not supplied",
|
||||
)
|
||||
}
|
||||
|
||||
token := authHeader[0]
|
||||
|
||||
if !strings.HasPrefix(token, AuthPrefix) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("client_address", client.Addr.String()).
|
||||
Msg(`missing "Bearer " prefix in "Authorization" header`)
|
||||
|
||||
return ctx, status.Error(
|
||||
codes.Unauthenticated,
|
||||
`missing "Bearer " prefix in "Authorization" header`,
|
||||
)
|
||||
}
|
||||
|
||||
// TODO(kradalby): Implement API key backend:
|
||||
// - Table in the DB
|
||||
// - Key name
|
||||
// - Encrypted
|
||||
// - Expiry
|
||||
//
|
||||
// Currently all other than localhost traffic is unauthorized, this is intentional to allow
|
||||
// us to make use of gRPC for our CLI, but not having to implement any of the remote capabilities
|
||||
// and API key auth
|
||||
return ctx, status.Error(
|
||||
codes.Unauthenticated,
|
||||
"Authentication is not implemented yet",
|
||||
)
|
||||
|
||||
// if strings.TrimPrefix(token, AUTH_PREFIX) != a.Token {
|
||||
// log.Error().Caller().Str("client_address", p.Addr.String()).Msg("invalid token")
|
||||
// return ctx, status.Error(codes.Unauthenticated, "invalid token")
|
||||
// }
|
||||
|
||||
// return handler(ctx, req)
|
||||
}
|
||||
|
||||
func (h *Headscale) httpAuthenticationMiddleware(ctx *gin.Context) {
|
||||
log.Trace().
|
||||
Caller().
|
||||
Str("client_address", ctx.ClientIP()).
|
||||
Msg("HTTP authentication invoked")
|
||||
|
||||
authHeader := ctx.GetHeader("authorization")
|
||||
|
||||
if !strings.HasPrefix(authHeader, AuthPrefix) {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("client_address", ctx.ClientIP()).
|
||||
Msg(`missing "Bearer " prefix in "Authorization" header`)
|
||||
ctx.AbortWithStatus(http.StatusUnauthorized)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx.AbortWithStatus(http.StatusUnauthorized)
|
||||
|
||||
// TODO(kradalby): Implement API key backend
|
||||
// Currently all traffic is unauthorized, this is intentional to allow
|
||||
// us to make use of gRPC for our CLI, but not having to implement any of the remote capabilities
|
||||
// and API key auth
|
||||
//
|
||||
// if strings.TrimPrefix(authHeader, AUTH_PREFIX) != a.Token {
|
||||
// log.Error().Caller().Str("client_address", c.ClientIP()).Msg("invalid token")
|
||||
// c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error", "unauthorized"})
|
||||
|
||||
// return
|
||||
// }
|
||||
|
||||
// c.Next()
|
||||
}
|
||||
|
||||
// ensureUnixSocketIsAbsent will check if the given path for headscales unix socket is clear
|
||||
// and will remove it if it is not.
|
||||
func (h *Headscale) ensureUnixSocketIsAbsent() error {
|
||||
// File does not exist, all fine
|
||||
if _, err := os.Stat(h.cfg.UnixSocket); errors.Is(err, os.ErrNotExist) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return os.Remove(h.cfg.UnixSocket)
|
||||
}
|
||||
|
||||
// Serve launches a GIN server with the Headscale API.
|
||||
func (h *Headscale) Serve() error {
|
||||
r := gin.Default()
|
||||
r.GET("/key", h.KeyHandler)
|
||||
r.GET("/register", h.RegisterWebAPI)
|
||||
r.POST("/machine/:id/map", h.PollNetMapHandler)
|
||||
r.POST("/machine/:id", h.RegistrationHandler)
|
||||
var err error
|
||||
|
||||
go h.watchForKVUpdates(5000)
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
defer cancel()
|
||||
|
||||
err = h.ensureUnixSocketIsAbsent()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to remove old socket file: %w", err)
|
||||
}
|
||||
|
||||
socketListener, err := net.Listen("unix", h.cfg.UnixSocket)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set up gRPC socket: %w", err)
|
||||
}
|
||||
|
||||
// Change socket permissions
|
||||
if err := os.Chmod(h.cfg.UnixSocket, h.cfg.UnixSocketPermission); err != nil {
|
||||
return fmt.Errorf("failed change permission of gRPC socket: %w", err)
|
||||
}
|
||||
|
||||
// Handle common process-killing signals so we can gracefully shut down:
|
||||
sigc := make(chan os.Signal, 1)
|
||||
signal.Notify(sigc, os.Interrupt, syscall.SIGTERM)
|
||||
go func(c chan os.Signal) {
|
||||
// Wait for a SIGINT or SIGKILL:
|
||||
sig := <-c
|
||||
log.Printf("Caught signal %s: shutting down.", sig)
|
||||
// Stop listening (and unlink the socket if unix type):
|
||||
socketListener.Close()
|
||||
// And we're done:
|
||||
os.Exit(0)
|
||||
}(sigc)
|
||||
|
||||
networkListener, err := net.Listen("tcp", h.cfg.Addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to bind to TCP address: %w", err)
|
||||
}
|
||||
|
||||
// Create the cmux object that will multiplex 2 protocols on the same port.
|
||||
// The two following listeners will be served on the same port below gracefully.
|
||||
networkMutex := cmux.New(networkListener)
|
||||
// Match gRPC requests here
|
||||
grpcListener := networkMutex.MatchWithWriters(
|
||||
cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc"),
|
||||
cmux.HTTP2MatchHeaderFieldSendSettings(
|
||||
"content-type",
|
||||
"application/grpc+proto",
|
||||
),
|
||||
)
|
||||
// Otherwise match regular http requests.
|
||||
httpListener := networkMutex.Match(cmux.Any())
|
||||
|
||||
grpcGatewayMux := runtime.NewServeMux()
|
||||
|
||||
// Make the grpc-gateway connect to grpc over socket
|
||||
grpcGatewayConn, err := grpc.Dial(
|
||||
h.cfg.UnixSocket,
|
||||
[]grpc.DialOption{
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithContextDialer(GrpcSocketDialer),
|
||||
}...,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Connect to the gRPC server over localhost to skip
|
||||
// the authentication.
|
||||
err = v1.RegisterHeadscaleServiceHandler(ctx, grpcGatewayMux, grpcGatewayConn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
router := gin.Default()
|
||||
|
||||
prometheus := ginprometheus.NewPrometheus("gin")
|
||||
prometheus.Use(router)
|
||||
|
||||
router.GET(
|
||||
"/health",
|
||||
func(c *gin.Context) { c.JSON(http.StatusOK, gin.H{"healthy": "ok"}) },
|
||||
)
|
||||
router.GET("/key", h.KeyHandler)
|
||||
router.GET("/register", h.RegisterWebAPI)
|
||||
router.POST("/machine/:id/map", h.PollNetMapHandler)
|
||||
router.POST("/machine/:id", h.RegistrationHandler)
|
||||
router.GET("/oidc/register/:mkey", h.RegisterOIDC)
|
||||
router.GET("/oidc/callback", h.OIDCCallback)
|
||||
router.GET("/apple", h.AppleMobileConfig)
|
||||
router.GET("/apple/:platform", h.ApplePlatformConfig)
|
||||
router.GET("/swagger", SwaggerUI)
|
||||
router.GET("/swagger/v1/openapiv2.json", SwaggerAPIv1)
|
||||
|
||||
api := router.Group("/api")
|
||||
api.Use(h.httpAuthenticationMiddleware)
|
||||
{
|
||||
api.Any("/v1/*any", gin.WrapF(grpcGatewayMux.ServeHTTP))
|
||||
}
|
||||
|
||||
router.NoRoute(stdoutHandler)
|
||||
|
||||
// Fetch an initial DERP Map before we start serving
|
||||
h.DERPMap = GetDERPMap(h.cfg.DERP)
|
||||
|
||||
if h.cfg.DERP.AutoUpdate {
|
||||
derpMapCancelChannel := make(chan struct{})
|
||||
defer func() { derpMapCancelChannel <- struct{}{} }()
|
||||
go h.scheduledDERPMapUpdateWorker(derpMapCancelChannel)
|
||||
}
|
||||
|
||||
// I HATE THIS
|
||||
go h.watchForKVUpdates(updateInterval)
|
||||
go h.expireEphemeralNodes(updateInterval)
|
||||
|
||||
httpServer := &http.Server{
|
||||
Addr: h.cfg.Addr,
|
||||
Handler: router,
|
||||
ReadTimeout: HTTPReadTimeout,
|
||||
// Go does not handle timeouts in HTTP very well, and there is
|
||||
// no good way to handle streaming timeouts, therefore we need to
|
||||
// keep this at unlimited and be careful to clean up connections
|
||||
// https://blog.cloudflare.com/the-complete-guide-to-golang-net-http-timeouts/#aboutstreaming
|
||||
WriteTimeout: 0,
|
||||
}
|
||||
|
||||
if zl.GlobalLevel() == zl.TraceLevel {
|
||||
zerolog.RespLog = true
|
||||
} else {
|
||||
zerolog.RespLog = false
|
||||
}
|
||||
|
||||
grpcOptions := []grpc.ServerOption{
|
||||
grpc.UnaryInterceptor(
|
||||
grpc_middleware.ChainUnaryServer(
|
||||
h.grpcAuthenticationInterceptor,
|
||||
zerolog.NewUnaryServerInterceptor(),
|
||||
),
|
||||
),
|
||||
}
|
||||
|
||||
tlsConfig, err := h.getTLSSettings()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed to set up TLS configuration")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if tlsConfig != nil {
|
||||
httpServer.TLSConfig = tlsConfig
|
||||
|
||||
grpcOptions = append(grpcOptions, grpc.Creds(credentials.NewTLS(tlsConfig)))
|
||||
}
|
||||
|
||||
grpcServer := grpc.NewServer(grpcOptions...)
|
||||
|
||||
// Start the local gRPC server without TLS and without authentication
|
||||
grpcSocket := grpc.NewServer(zerolog.UnaryInterceptor())
|
||||
|
||||
v1.RegisterHeadscaleServiceServer(grpcServer, newHeadscaleV1APIServer(h))
|
||||
v1.RegisterHeadscaleServiceServer(grpcSocket, newHeadscaleV1APIServer(h))
|
||||
reflection.Register(grpcServer)
|
||||
reflection.Register(grpcSocket)
|
||||
|
||||
errorGroup := new(errgroup.Group)
|
||||
|
||||
errorGroup.Go(func() error { return grpcSocket.Serve(socketListener) })
|
||||
|
||||
// TODO(kradalby): Verify if we need the same TLS setup for gRPC as HTTP
|
||||
errorGroup.Go(func() error { return grpcServer.Serve(grpcListener) })
|
||||
|
||||
if tlsConfig != nil {
|
||||
errorGroup.Go(func() error {
|
||||
tlsl := tls.NewListener(httpListener, tlsConfig)
|
||||
|
||||
return httpServer.Serve(tlsl)
|
||||
})
|
||||
} else {
|
||||
errorGroup.Go(func() error { return httpServer.Serve(httpListener) })
|
||||
}
|
||||
|
||||
errorGroup.Go(func() error { return networkMutex.Serve() })
|
||||
|
||||
log.Info().
|
||||
Msgf("listening and serving (multiplexed HTTP and gRPC) on: %s", h.cfg.Addr)
|
||||
|
||||
return errorGroup.Wait()
|
||||
}
|
||||
|
||||
func (h *Headscale) getTLSSettings() (*tls.Config, error) {
|
||||
var err error
|
||||
if h.cfg.TLSLetsEncryptHostname != "" {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "https://") {
|
||||
log.Warn().Msg("Listening with TLS but ServerURL does not start with https://")
|
||||
log.Warn().
|
||||
Msg("Listening with TLS but ServerURL does not start with https://")
|
||||
}
|
||||
|
||||
m := autocert.Manager{
|
||||
certManager := autocert.Manager{
|
||||
Prompt: autocert.AcceptTOS,
|
||||
HostPolicy: autocert.HostWhitelist(h.cfg.TLSLetsEncryptHostname),
|
||||
Cache: autocert.DirCache(h.cfg.TLSLetsEncryptCacheDir),
|
||||
Client: &acme.Client{
|
||||
DirectoryURL: h.cfg.ACMEURL,
|
||||
},
|
||||
Email: h.cfg.ACMEEmail,
|
||||
}
|
||||
s := &http.Server{
|
||||
Addr: h.cfg.Addr,
|
||||
TLSConfig: m.TLSConfig(),
|
||||
Handler: r,
|
||||
}
|
||||
if h.cfg.TLSLetsEncryptChallengeType == "TLS-ALPN-01" {
|
||||
|
||||
switch h.cfg.TLSLetsEncryptChallengeType {
|
||||
case "TLS-ALPN-01":
|
||||
// Configuration via autocert with TLS-ALPN-01 (https://tools.ietf.org/html/rfc8737)
|
||||
// The RFC requires that the validation is done on port 443; in other words, headscale
|
||||
// must be reachable on port 443.
|
||||
err = s.ListenAndServeTLS("", "")
|
||||
} else if h.cfg.TLSLetsEncryptChallengeType == "HTTP-01" {
|
||||
return certManager.TLSConfig(), nil
|
||||
|
||||
case "HTTP-01":
|
||||
// Configuration via autocert with HTTP-01. This requires listening on
|
||||
// port 80 for the certificate validation in addition to the headscale
|
||||
// service, which can be configured to run on any other port.
|
||||
go func() {
|
||||
|
||||
log.Fatal().
|
||||
Err(http.ListenAndServe(h.cfg.TLSLetsEncryptListen, m.HTTPHandler(http.HandlerFunc(h.redirect)))).
|
||||
Err(http.ListenAndServe(h.cfg.TLSLetsEncryptListen, certManager.HTTPHandler(http.HandlerFunc(h.redirect)))).
|
||||
Msg("failed to set up a HTTP server")
|
||||
}()
|
||||
err = s.ListenAndServeTLS("", "")
|
||||
} else {
|
||||
return errors.New("unknown value for TLSLetsEncryptChallengeType")
|
||||
|
||||
return certManager.TLSConfig(), nil
|
||||
|
||||
default:
|
||||
return nil, errUnsupportedLetsEncryptChallengeType
|
||||
}
|
||||
} else if h.cfg.TLSCertPath == "" {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "http://") {
|
||||
log.Warn().Msg("Listening without TLS but ServerURL does not start with http://")
|
||||
}
|
||||
err = r.Run(h.cfg.Addr)
|
||||
|
||||
return nil, err
|
||||
} else {
|
||||
if !strings.HasPrefix(h.cfg.ServerURL, "https://") {
|
||||
log.Warn().Msg("Listening with TLS but ServerURL does not start with https://")
|
||||
}
|
||||
err = r.RunTLS(h.cfg.Addr, h.cfg.TLSCertPath, h.cfg.TLSKeyPath)
|
||||
tlsConfig := &tls.Config{
|
||||
ClientAuth: tls.RequireAnyClientCert,
|
||||
NextProtos: []string{"http/1.1"},
|
||||
Certificates: make([]tls.Certificate, 1),
|
||||
MinVersion: tls.VersionTLS12,
|
||||
}
|
||||
tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(h.cfg.TLSCertPath, h.cfg.TLSKeyPath)
|
||||
|
||||
return tlsConfig, err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (h *Headscale) setLastStateChangeToNow(namespace string) {
|
||||
now := time.Now().UTC()
|
||||
lastStateUpdate.WithLabelValues("", "headscale").Set(float64(now.Unix()))
|
||||
h.lastStateChange.Store(namespace, now)
|
||||
}
|
||||
|
||||
func (h *Headscale) getLastStateChange(namespaces ...string) time.Time {
|
||||
times := []time.Time{}
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
if wrapped, ok := h.lastStateChange.Load(namespace); ok {
|
||||
lastChange, _ := wrapped.(time.Time)
|
||||
|
||||
times = append(times, lastChange)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(times, func(i, j int) bool {
|
||||
return times[i].After(times[j])
|
||||
})
|
||||
|
||||
log.Trace().Msgf("Latest times %#v", times)
|
||||
|
||||
if len(times) == 0 {
|
||||
return time.Now().UTC()
|
||||
} else {
|
||||
return times[0]
|
||||
}
|
||||
}
|
||||
|
||||
func stdoutHandler(ctx *gin.Context) {
|
||||
body, _ := io.ReadAll(ctx.Request.Body)
|
||||
|
||||
log.Trace().
|
||||
Interface("header", ctx.Request.Header).
|
||||
Interface("proto", ctx.Request.Proto).
|
||||
Interface("url", ctx.Request.URL).
|
||||
Bytes("body", body).
|
||||
Msg("Request did not match")
|
||||
}
|
||||
|
||||
func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
|
||||
privateKey, err := os.ReadFile(path)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
log.Info().Str("path", path).Msg("No private key file at path, creating...")
|
||||
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
machineKeyStr, err := machineKey.MarshalText()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to convert private key to string for saving: %w",
|
||||
err,
|
||||
)
|
||||
}
|
||||
err = os.WriteFile(path, machineKeyStr, privateKeyFileMode)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to save private key to disk: %w",
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
return &machineKey, nil
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("failed to read private key file: %w", err)
|
||||
}
|
||||
|
||||
trimmedPrivateKey := strings.TrimSpace(string(privateKey))
|
||||
privateKeyEnsurePrefix := PrivateKeyEnsurePrefix(trimmedPrivateKey)
|
||||
|
||||
var machineKey key.MachinePrivate
|
||||
if err = machineKey.UnmarshalText([]byte(privateKeyEnsurePrefix)); err != nil {
|
||||
log.Info().
|
||||
Str("path", path).
|
||||
Msg("This might be due to a legacy (headscale pre-0.12) private key. " +
|
||||
"If the key is in WireGuard format, delete the key and restart headscale. " +
|
||||
"A new key will automatically be generated. All Tailscale clients will have to be restarted")
|
||||
|
||||
return nil, fmt.Errorf("failed to parse private key: %w", err)
|
||||
}
|
||||
|
||||
return &machineKey, nil
|
||||
}
|
||||
|
||||
23
app_test.go
23
app_test.go
@@ -5,6 +5,7 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/patrickmn/go-cache"
|
||||
"gopkg.in/check.v1"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
@@ -17,8 +18,10 @@ var _ = check.Suite(&Suite{})
|
||||
|
||||
type Suite struct{}
|
||||
|
||||
var tmpDir string
|
||||
var h Headscale
|
||||
var (
|
||||
tmpDir string
|
||||
app Headscale
|
||||
)
|
||||
|
||||
func (s *Suite) SetUpTest(c *check.C) {
|
||||
s.ResetDB(c)
|
||||
@@ -38,21 +41,27 @@ func (s *Suite) ResetDB(c *check.C) {
|
||||
c.Fatal(err)
|
||||
}
|
||||
cfg := Config{
|
||||
IPPrefix: netaddr.MustParseIPPrefix("10.27.0.0/23"),
|
||||
IPPrefixes: []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("10.27.0.0/23"),
|
||||
},
|
||||
}
|
||||
|
||||
h = Headscale{
|
||||
app = Headscale{
|
||||
cfg: cfg,
|
||||
dbType: "sqlite3",
|
||||
dbString: tmpDir + "/headscale_test.db",
|
||||
requestedExpiryCache: cache.New(
|
||||
requestedExpiryCacheExpiration,
|
||||
requestedExpiryCacheCleanupInterval,
|
||||
),
|
||||
}
|
||||
err = h.initDB()
|
||||
err = app.initDB()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
db, err := h.openDB()
|
||||
db, err := app.openDB()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
h.db = db
|
||||
app.db = db
|
||||
}
|
||||
|
||||
266
apple_mobileconfig.go
Normal file
266
apple_mobileconfig.go
Normal file
@@ -0,0 +1,266 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"html/template"
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/gofrs/uuid"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
// AppleMobileConfig shows a simple message in the browser to point to the CLI
|
||||
// Listens in /register.
|
||||
func (h *Headscale) AppleMobileConfig(ctx *gin.Context) {
|
||||
appleTemplate := template.Must(template.New("apple").Parse(`
|
||||
<html>
|
||||
<body>
|
||||
<h1>Apple configuration profiles</h1>
|
||||
<p>
|
||||
This page provides <a href="https://support.apple.com/guide/mdm/mdm-overview-mdmbf9e668/web">configuration profiles</a> for the official Tailscale clients for <a href="https://apps.apple.com/us/app/tailscale/id1470499037?ls=1">iOS</a> and <a href="https://apps.apple.com/ca/app/tailscale/id1475387142?mt=12">macOS</a>.
|
||||
</p>
|
||||
<p>
|
||||
The profiles will configure Tailscale.app to use {{.Url}} as its control server.
|
||||
</p>
|
||||
|
||||
<h3>Caution</h3>
|
||||
<p>You should always inspect the profile before installing it:</p>
|
||||
<!--
|
||||
<p><code>curl {{.Url}}/apple/ios</code></p>
|
||||
-->
|
||||
<p><code>curl {{.Url}}/apple/macos</code></p>
|
||||
|
||||
<h2>Profiles</h2>
|
||||
|
||||
<!--
|
||||
<h3>iOS</h3>
|
||||
<p>
|
||||
<a href="/apple/ios" download="headscale_ios.mobileconfig">iOS profile</a>
|
||||
</p>
|
||||
-->
|
||||
|
||||
<h3>macOS</h3>
|
||||
<p>Headscale can be set to the default server by installing a Headscale configuration profile:</p>
|
||||
<p>
|
||||
<a href="/apple/macos" download="headscale_macos.mobileconfig">macOS profile</a>
|
||||
</p>
|
||||
|
||||
<ol>
|
||||
<li>Download the profile, then open it. When it has been opened, there should be a notification that a profile can be installed</li>
|
||||
<li>Open System Preferences and go to "Profiles"</li>
|
||||
<li>Find and install the Headscale profile</li>
|
||||
<li>Restart Tailscale.app and log in</li>
|
||||
</ol>
|
||||
|
||||
<p>Or</p>
|
||||
<p>Use your terminal to configure the default setting for Tailscale by issuing:</p>
|
||||
<code>defaults write io.tailscale.ipn.macos ControlURL {{.URL}}</code>
|
||||
|
||||
<p>Restart Tailscale.app and log in.</p>
|
||||
|
||||
</body>
|
||||
</html>`))
|
||||
|
||||
config := map[string]interface{}{
|
||||
"URL": h.cfg.ServerURL,
|
||||
}
|
||||
|
||||
var payload bytes.Buffer
|
||||
if err := appleTemplate.Execute(&payload, config); err != nil {
|
||||
log.Error().
|
||||
Str("handler", "AppleMobileConfig").
|
||||
Err(err).
|
||||
Msg("Could not render Apple index template")
|
||||
ctx.Data(
|
||||
http.StatusInternalServerError,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Could not render Apple index template"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Data(http.StatusOK, "text/html; charset=utf-8", payload.Bytes())
|
||||
}
|
||||
|
||||
func (h *Headscale) ApplePlatformConfig(ctx *gin.Context) {
|
||||
platform := ctx.Param("platform")
|
||||
|
||||
id, err := uuid.NewV4()
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "ApplePlatformConfig").
|
||||
Err(err).
|
||||
Msg("Failed not create UUID")
|
||||
ctx.Data(
|
||||
http.StatusInternalServerError,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Failed to create UUID"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
contentID, err := uuid.NewV4()
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("handler", "ApplePlatformConfig").
|
||||
Err(err).
|
||||
Msg("Failed not create UUID")
|
||||
ctx.Data(
|
||||
http.StatusInternalServerError,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Failed to create UUID"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
platformConfig := AppleMobilePlatformConfig{
|
||||
UUID: contentID,
|
||||
URL: h.cfg.ServerURL,
|
||||
}
|
||||
|
||||
var payload bytes.Buffer
|
||||
|
||||
switch platform {
|
||||
case "macos":
|
||||
if err := macosTemplate.Execute(&payload, platformConfig); err != nil {
|
||||
log.Error().
|
||||
Str("handler", "ApplePlatformConfig").
|
||||
Err(err).
|
||||
Msg("Could not render Apple macOS template")
|
||||
ctx.Data(
|
||||
http.StatusInternalServerError,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Could not render Apple macOS template"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
case "ios":
|
||||
if err := iosTemplate.Execute(&payload, platformConfig); err != nil {
|
||||
log.Error().
|
||||
Str("handler", "ApplePlatformConfig").
|
||||
Err(err).
|
||||
Msg("Could not render Apple iOS template")
|
||||
ctx.Data(
|
||||
http.StatusInternalServerError,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Could not render Apple iOS template"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
default:
|
||||
ctx.Data(
|
||||
http.StatusOK,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Invalid platform, only ios and macos is supported"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
config := AppleMobileConfig{
|
||||
UUID: id,
|
||||
URL: h.cfg.ServerURL,
|
||||
Payload: payload.String(),
|
||||
}
|
||||
|
||||
var content bytes.Buffer
|
||||
if err := commonTemplate.Execute(&content, config); err != nil {
|
||||
log.Error().
|
||||
Str("handler", "ApplePlatformConfig").
|
||||
Err(err).
|
||||
Msg("Could not render Apple platform template")
|
||||
ctx.Data(
|
||||
http.StatusInternalServerError,
|
||||
"text/html; charset=utf-8",
|
||||
[]byte("Could not render Apple platform template"),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Data(
|
||||
http.StatusOK,
|
||||
"application/x-apple-aspen-config; charset=utf-8",
|
||||
content.Bytes(),
|
||||
)
|
||||
}
|
||||
|
||||
type AppleMobileConfig struct {
|
||||
UUID uuid.UUID
|
||||
URL string
|
||||
Payload string
|
||||
}
|
||||
|
||||
type AppleMobilePlatformConfig struct {
|
||||
UUID uuid.UUID
|
||||
URL string
|
||||
}
|
||||
|
||||
var commonTemplate = template.Must(
|
||||
template.New("mobileconfig").Parse(`<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>PayloadUUID</key>
|
||||
<string>{{.UUID}}</string>
|
||||
<key>PayloadDisplayName</key>
|
||||
<string>Headscale</string>
|
||||
<key>PayloadDescription</key>
|
||||
<string>Configure Tailscale login server to: {{.URL}}</string>
|
||||
<key>PayloadIdentifier</key>
|
||||
<string>com.github.juanfont.headscale</string>
|
||||
<key>PayloadRemovalDisallowed</key>
|
||||
<false/>
|
||||
<key>PayloadType</key>
|
||||
<string>Configuration</string>
|
||||
<key>PayloadVersion</key>
|
||||
<integer>1</integer>
|
||||
<key>PayloadContent</key>
|
||||
<array>
|
||||
{{.Payload}}
|
||||
</array>
|
||||
</dict>
|
||||
</plist>`),
|
||||
)
|
||||
|
||||
var iosTemplate = template.Must(template.New("iosTemplate").Parse(`
|
||||
<dict>
|
||||
<key>PayloadType</key>
|
||||
<string>io.tailscale.ipn.ios</string>
|
||||
<key>PayloadUUID</key>
|
||||
<string>{{.UUID}}</string>
|
||||
<key>PayloadIdentifier</key>
|
||||
<string>com.github.juanfont.headscale</string>
|
||||
<key>PayloadVersion</key>
|
||||
<integer>1</integer>
|
||||
<key>PayloadEnabled</key>
|
||||
<true/>
|
||||
|
||||
<key>ControlURL</key>
|
||||
<string>{{.URL}}</string>
|
||||
</dict>
|
||||
`))
|
||||
|
||||
var macosTemplate = template.Must(template.New("macosTemplate").Parse(`
|
||||
<dict>
|
||||
<key>PayloadType</key>
|
||||
<string>io.tailscale.ipn.macos</string>
|
||||
<key>PayloadUUID</key>
|
||||
<string>{{.UUID}}</string>
|
||||
<key>PayloadIdentifier</key>
|
||||
<string>com.github.juanfont.headscale</string>
|
||||
<key>PayloadVersion</key>
|
||||
<integer>1</integer>
|
||||
<key>PayloadEnabled</key>
|
||||
<true/>
|
||||
|
||||
<key>ControlURL</key>
|
||||
<string>{{.URL}}</string>
|
||||
</dict>
|
||||
`))
|
||||
21
buf.gen.yaml
Normal file
21
buf.gen.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
version: v1
|
||||
plugins:
|
||||
- name: go
|
||||
out: gen/go
|
||||
opt:
|
||||
- paths=source_relative
|
||||
- name: go-grpc
|
||||
out: gen/go
|
||||
opt:
|
||||
- paths=source_relative
|
||||
- name: grpc-gateway
|
||||
out: gen/go
|
||||
opt:
|
||||
- paths=source_relative
|
||||
- generate_unbound_methods=true
|
||||
# - name: gorm
|
||||
# out: gen/go
|
||||
# opt:
|
||||
# - paths=source_relative,enums=string,gateway=true
|
||||
- name: openapiv2
|
||||
out: gen/openapiv2
|
||||
40
cli.go
40
cli.go
@@ -1,40 +0,0 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"tailscale.com/types/wgkey"
|
||||
)
|
||||
|
||||
// RegisterMachine is executed from the CLI to register a new Machine using its MachineKey
|
||||
func (h *Headscale) RegisterMachine(key string, namespace string) (*Machine, error) {
|
||||
ns, err := h.GetNamespace(namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mKey, err := wgkey.ParseHex(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := Machine{}
|
||||
if result := h.db.First(&m, "machine_key = ?", mKey.HexString()); errors.Is(result.Error, gorm.ErrRecordNotFound) {
|
||||
return nil, errors.New("Machine not found")
|
||||
}
|
||||
|
||||
if m.isAlreadyRegistered() {
|
||||
return nil, errors.New("Machine already registered")
|
||||
}
|
||||
|
||||
ip, err := h.getAvailableIP()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.IPAddress = ip.String()
|
||||
m.NamespaceID = ns.ID
|
||||
m.Registered = true
|
||||
m.RegisterMethod = "cli"
|
||||
h.db.Save(&m)
|
||||
return &m, nil
|
||||
}
|
||||
30
cli_test.go
30
cli_test.go
@@ -1,31 +1,41 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
"inet.af/netaddr"
|
||||
)
|
||||
|
||||
func (s *Suite) TestRegisterMachine(c *check.C) {
|
||||
n, err := h.CreateNamespace("test")
|
||||
namespace, err := app.CreateNamespace("test")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
m := Machine{
|
||||
now := time.Now().UTC()
|
||||
|
||||
machine := Machine{
|
||||
ID: 0,
|
||||
MachineKey: "8ce002a935f8c394e55e78fbbb410576575ff8ec5cfa2e627e4b807f1be15b0e",
|
||||
NodeKey: "bar",
|
||||
DiscoKey: "faa",
|
||||
Name: "testmachine",
|
||||
NamespaceID: n.ID,
|
||||
IPAddress: "10.0.0.1",
|
||||
NamespaceID: namespace.ID,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("10.0.0.1")},
|
||||
Expiry: &now,
|
||||
}
|
||||
h.db.Save(&m)
|
||||
|
||||
_, err = h.GetMachine("test", "testmachine")
|
||||
err = app.db.Save(&machine).Error
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
m2, err := h.RegisterMachine("8ce002a935f8c394e55e78fbbb410576575ff8ec5cfa2e627e4b807f1be15b0e", n.Name)
|
||||
_, err = app.GetMachine(namespace.Name, machine.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(m2.Registered, check.Equals, true)
|
||||
|
||||
_, err = m2.GetHostInfo()
|
||||
machineAfterRegistering, err := app.RegisterMachine(
|
||||
machine.MachineKey,
|
||||
namespace.Name,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(machineAfterRegistering.Registered, check.Equals, true)
|
||||
|
||||
_, err = machineAfterRegistering.GetHostInfo()
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
132
cmd/headscale/cli/debug.go
Normal file
132
cmd/headscale/cli/debug.go
Normal file
@@ -0,0 +1,132 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
keyLength = 64
|
||||
errPreAuthKeyTooShort = Error("key too short, must be 64 hexadecimal characters")
|
||||
)
|
||||
|
||||
// Error is used to compare errors as per https://dave.cheney.net/2016/04/07/constant-errors
|
||||
type Error string
|
||||
|
||||
func (e Error) Error() string { return string(e) }
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(debugCmd)
|
||||
|
||||
createNodeCmd.Flags().StringP("name", "", "", "Name")
|
||||
err := createNodeCmd.MarkFlagRequired("name")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
createNodeCmd.Flags().StringP("namespace", "n", "", "Namespace")
|
||||
err = createNodeCmd.MarkFlagRequired("namespace")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
createNodeCmd.Flags().StringP("key", "k", "", "Key")
|
||||
err = createNodeCmd.MarkFlagRequired("key")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
createNodeCmd.Flags().
|
||||
StringSliceP("route", "r", []string{}, "List (or repeated flags) of routes to advertise")
|
||||
|
||||
debugCmd.AddCommand(createNodeCmd)
|
||||
}
|
||||
|
||||
var debugCmd = &cobra.Command{
|
||||
Use: "debug",
|
||||
Short: "debug and testing commands",
|
||||
Long: "debug contains extra commands used for debugging and testing headscale",
|
||||
}
|
||||
|
||||
var createNodeCmd = &cobra.Command{
|
||||
Use: "create-node",
|
||||
Short: "Create a node (machine) that can be registered with `nodes register <>` command",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
namespace, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
name, err := cmd.Flags().GetString("name")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting node from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
machineKey, err := cmd.Flags().GetString("key")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting key from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
if len(machineKey) != keyLength {
|
||||
err = errPreAuthKeyTooShort
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
routes, err := cmd.Flags().GetStringSlice("route")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting routes from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
request := &v1.DebugCreateMachineRequest{
|
||||
Key: machineKey,
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Routes: routes,
|
||||
}
|
||||
|
||||
response, err := client.DebugCreateMachine(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot create machine: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Machine created", output)
|
||||
},
|
||||
}
|
||||
41
cmd/headscale/cli/generate.go
Normal file
41
cmd/headscale/cli/generate.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(generateCmd)
|
||||
generateCmd.AddCommand(generatePrivateKeyCmd)
|
||||
}
|
||||
|
||||
var generateCmd = &cobra.Command{
|
||||
Use: "generate",
|
||||
Short: "Generate commands",
|
||||
}
|
||||
|
||||
var generatePrivateKeyCmd = &cobra.Command{
|
||||
Use: "private-key",
|
||||
Short: "Generate a private key for the headscale server",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
machineKeyStr, err := machineKey.MarshalText()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine key from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
||||
SuccessOutput(map[string]string{
|
||||
"private_key": string(machineKeyStr),
|
||||
},
|
||||
string(machineKeyStr), output)
|
||||
},
|
||||
}
|
||||
@@ -2,10 +2,14 @@ package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
survey "github.com/AlecAivazis/survey/v2"
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -13,8 +17,13 @@ func init() {
|
||||
namespaceCmd.AddCommand(createNamespaceCmd)
|
||||
namespaceCmd.AddCommand(listNamespacesCmd)
|
||||
namespaceCmd.AddCommand(destroyNamespaceCmd)
|
||||
namespaceCmd.AddCommand(renameNamespaceCmd)
|
||||
}
|
||||
|
||||
const (
|
||||
errMissingParameter = headscale.Error("missing parameters")
|
||||
)
|
||||
|
||||
var namespaceCmd = &cobra.Command{
|
||||
Use: "namespaces",
|
||||
Short: "Manage the namespaces of Headscale",
|
||||
@@ -25,26 +34,40 @@ var createNamespaceCmd = &cobra.Command{
|
||||
Short: "Creates a new namespace",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("Missing parameters")
|
||||
return errMissingParameter
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
h, err := getHeadscaleApp()
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
namespaceName := args[0]
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
log.Trace().Interface("client", client).Msg("Obtained gRPC client")
|
||||
|
||||
request := &v1.CreateNamespaceRequest{Name: namespaceName}
|
||||
|
||||
log.Trace().Interface("request", request).Msg("Sending CreateNamespace request")
|
||||
response, err := client.CreateNamespace(ctx, request)
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
namespace, err := h.CreateNamespace(args[0])
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(namespace, err, o)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot create namespace: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating namespace: %s\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("Namespace created\n")
|
||||
|
||||
SuccessOutput(response.Namespace, "Namespace created", output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -53,26 +76,70 @@ var destroyNamespaceCmd = &cobra.Command{
|
||||
Short: "Destroys a namespace",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("Missing parameters")
|
||||
return errMissingParameter
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
h, err := getHeadscaleApp()
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
namespaceName := args[0]
|
||||
|
||||
request := &v1.GetNamespaceRequest{
|
||||
Name: namespaceName,
|
||||
}
|
||||
err = h.DestroyNamespace(args[0])
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(map[string]string{"Result": "Namespace destroyed"}, err, o)
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
_, err := client.GetNamespace(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("Error destroying namespace: %s\n", err)
|
||||
return
|
||||
|
||||
confirm := false
|
||||
force, _ := cmd.Flags().GetBool("force")
|
||||
if !force {
|
||||
prompt := &survey.Confirm{
|
||||
Message: fmt.Sprintf(
|
||||
"Do you want to remove the namespace '%s' and any associated preauthkeys?",
|
||||
namespaceName,
|
||||
),
|
||||
}
|
||||
err := survey.AskOne(prompt, &confirm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if confirm || force {
|
||||
request := &v1.DeleteNamespaceRequest{Name: namespaceName}
|
||||
|
||||
response, err := client.DeleteNamespace(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot destroy namespace: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
SuccessOutput(response, "Namespace destroyed", output)
|
||||
} else {
|
||||
SuccessOutput(map[string]string{"Result": "Namespace not destroyed"}, "Namespace not destroyed", output)
|
||||
}
|
||||
fmt.Printf("Namespace destroyed\n")
|
||||
},
|
||||
}
|
||||
|
||||
@@ -80,23 +147,92 @@ var listNamespacesCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List all the namespaces",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
h, err := getHeadscaleApp()
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ListNamespacesRequest{}
|
||||
|
||||
response, err := client.ListNamespaces(ctx, request)
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
namespaces, err := h.ListNamespaces()
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(namespaces, err, o)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot get namespaces: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.Namespaces, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
fmt.Printf("ID\tName\n")
|
||||
for _, n := range *namespaces {
|
||||
fmt.Printf("%d\t%s\n", n.ID, n.Name)
|
||||
|
||||
tableData := pterm.TableData{{"ID", "Name", "Created"}}
|
||||
for _, namespace := range response.GetNamespaces() {
|
||||
tableData = append(
|
||||
tableData,
|
||||
[]string{
|
||||
namespace.GetId(),
|
||||
namespace.GetName(),
|
||||
namespace.GetCreatedAt().AsTime().Format("2006-01-02 15:04:05"),
|
||||
},
|
||||
)
|
||||
}
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var renameNamespaceCmd = &cobra.Command{
|
||||
Use: "rename OLD_NAME NEW_NAME",
|
||||
Short: "Renames a namespace",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
expectedArguments := 2
|
||||
if len(args) < expectedArguments {
|
||||
return errMissingParameter
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.RenameNamespaceRequest{
|
||||
OldName: args[0],
|
||||
NewName: args[1],
|
||||
}
|
||||
|
||||
response, err := client.RenameNamespace(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot rename namespace: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Namespace, "Namespace renamed", output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -8,19 +8,68 @@ import (
|
||||
"time"
|
||||
|
||||
survey "github.com/AlecAivazis/survey/v2"
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(nodeCmd)
|
||||
nodeCmd.PersistentFlags().StringP("namespace", "n", "", "Namespace")
|
||||
err := nodeCmd.MarkPersistentFlagRequired("namespace")
|
||||
listNodesCmd.Flags().StringP("namespace", "n", "", "Filter by namespace")
|
||||
nodeCmd.AddCommand(listNodesCmd)
|
||||
|
||||
registerNodeCmd.Flags().StringP("namespace", "n", "", "Namespace")
|
||||
err := registerNodeCmd.MarkFlagRequired("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
registerNodeCmd.Flags().StringP("key", "k", "", "Key")
|
||||
err = registerNodeCmd.MarkFlagRequired("key")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(listNodesCmd)
|
||||
nodeCmd.AddCommand(registerNodeCmd)
|
||||
|
||||
expireNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = expireNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(expireNodeCmd)
|
||||
|
||||
deleteNodeCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = deleteNodeCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(deleteNodeCmd)
|
||||
|
||||
shareMachineCmd.Flags().StringP("namespace", "n", "", "Namespace")
|
||||
err = shareMachineCmd.MarkFlagRequired("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
shareMachineCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = shareMachineCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(shareMachineCmd)
|
||||
|
||||
unshareMachineCmd.Flags().StringP("namespace", "n", "", "Namespace")
|
||||
err = unshareMachineCmd.MarkFlagRequired("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
unshareMachineCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = unshareMachineCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
nodeCmd.AddCommand(unshareMachineCmd)
|
||||
}
|
||||
|
||||
var nodeCmd = &cobra.Command{
|
||||
@@ -29,118 +78,460 @@ var nodeCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
var registerNodeCmd = &cobra.Command{
|
||||
Use: "register machineID",
|
||||
Use: "register",
|
||||
Short: "Registers a machine to your network",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("Missing parameters")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
n, err := cmd.Flags().GetString("namespace")
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
namespace, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
}
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
|
||||
|
||||
h, err := getHeadscaleApp()
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
m, err := h.RegisterMachine(args[0], n)
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(m, err, o)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
machineKey, err := cmd.Flags().GetString("key")
|
||||
if err != nil {
|
||||
fmt.Printf("Cannot register machine: %s\n", err)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine key from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
fmt.Printf("Machine registered\n")
|
||||
|
||||
request := &v1.RegisterMachineRequest{
|
||||
Key: machineKey,
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
response, err := client.RegisterMachine(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot register machine: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Machine register", output)
|
||||
},
|
||||
}
|
||||
|
||||
var listNodesCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List the nodes in a given namespace",
|
||||
Short: "List nodes",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
n, err := cmd.Flags().GetString("namespace")
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
namespace, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
}
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
|
||||
|
||||
h, err := getHeadscaleApp()
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
machines, err := h.ListMachinesInNamespace(n)
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(machines, err, o)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ListMachinesRequest{
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
response, err := client.ListMachines(ctx, request)
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting nodes: %s", err)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot get nodes: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("ID\tname\t\tlast seen\t\tephemeral\n")
|
||||
for _, m := range *machines {
|
||||
var ephemeral bool
|
||||
if m.AuthKey != nil && m.AuthKey.Ephemeral {
|
||||
ephemeral = true
|
||||
}
|
||||
var lastSeen time.Time
|
||||
if m.LastSeen != nil {
|
||||
lastSeen = *m.LastSeen
|
||||
}
|
||||
fmt.Printf("%d\t%s\t%s\t%t\n", m.ID, m.Name, lastSeen.Format("2006-01-02 15:04:05"), ephemeral)
|
||||
if output != "" {
|
||||
SuccessOutput(response.Machines, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
tableData, err := nodesToPtables(namespace, response.Machines)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var expireNodeCmd = &cobra.Command{
|
||||
Use: "expire",
|
||||
Short: "Expire (log out) a machine in your network",
|
||||
Long: "Expiring a node will keep the node in the database and force it to reauthenticate.",
|
||||
Aliases: []string{"logout"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ExpireMachineRequest{
|
||||
MachineId: identifier,
|
||||
}
|
||||
|
||||
response, err := client.ExpireMachine(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot expire machine: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Machine expired", output)
|
||||
},
|
||||
}
|
||||
|
||||
var deleteNodeCmd = &cobra.Command{
|
||||
Use: "delete ID",
|
||||
Use: "delete",
|
||||
Short: "Delete a node",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("Missing parameters")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
h, err := getHeadscaleApp()
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
id, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Error converting ID to integer: %s", err)
|
||||
}
|
||||
m, err := h.GetMachineByID(uint64(id))
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting node: %s", err)
|
||||
}
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
confirm := false
|
||||
prompt := &survey.Confirm{
|
||||
Message: fmt.Sprintf("Do you want to remove the node %s?", m.Name),
|
||||
}
|
||||
err = survey.AskOne(prompt, &confirm)
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error converting ID to integer: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if confirm {
|
||||
err = h.DeleteMachine(m)
|
||||
if err != nil {
|
||||
log.Fatalf("Error deleting node: %s", err)
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
getRequest := &v1.GetMachineRequest{
|
||||
MachineId: identifier,
|
||||
}
|
||||
|
||||
getResponse, err := client.GetMachine(ctx, getRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Error getting node node: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
deleteRequest := &v1.DeleteMachineRequest{
|
||||
MachineId: identifier,
|
||||
}
|
||||
|
||||
confirm := false
|
||||
force, _ := cmd.Flags().GetBool("force")
|
||||
if !force {
|
||||
prompt := &survey.Confirm{
|
||||
Message: fmt.Sprintf(
|
||||
"Do you want to remove the node %s?",
|
||||
getResponse.GetMachine().Name,
|
||||
),
|
||||
}
|
||||
fmt.Printf("Node deleted\n")
|
||||
err = survey.AskOne(prompt, &confirm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if confirm || force {
|
||||
response, err := client.DeleteMachine(ctx, deleteRequest)
|
||||
if output != "" {
|
||||
SuccessOutput(response, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Error deleting node: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
SuccessOutput(
|
||||
map[string]string{"Result": "Node deleted"},
|
||||
"Node deleted",
|
||||
output,
|
||||
)
|
||||
} else {
|
||||
fmt.Printf("Node not deleted\n")
|
||||
SuccessOutput(map[string]string{"Result": "Node not deleted"}, "Node not deleted", output)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func sharingWorker(
|
||||
cmd *cobra.Command,
|
||||
) (string, *v1.Machine, *v1.Namespace, error) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
namespaceStr, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
|
||||
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
identifier, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting ID to integer: %s", err), output)
|
||||
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
machineRequest := &v1.GetMachineRequest{
|
||||
MachineId: identifier,
|
||||
}
|
||||
|
||||
machineResponse, err := client.GetMachine(ctx, machineRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting node node: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
namespaceRequest := &v1.GetNamespaceRequest{
|
||||
Name: namespaceStr,
|
||||
}
|
||||
|
||||
namespaceResponse, err := client.GetNamespace(ctx, namespaceRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting node node: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
return output, machineResponse.GetMachine(), namespaceResponse.GetNamespace(), nil
|
||||
}
|
||||
|
||||
var shareMachineCmd = &cobra.Command{
|
||||
Use: "share",
|
||||
Short: "Shares a node from the current namespace to the specified one",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, machine, namespace, err := sharingWorker(cmd)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to fetch namespace or machine: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ShareMachineRequest{
|
||||
MachineId: machine.Id,
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
|
||||
response, err := client.ShareMachine(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error sharing node: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Node shared", output)
|
||||
},
|
||||
}
|
||||
|
||||
var unshareMachineCmd = &cobra.Command{
|
||||
Use: "unshare",
|
||||
Short: "Unshares a node from the specified namespace",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, machine, namespace, err := sharingWorker(cmd)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to fetch namespace or machine: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.UnshareMachineRequest{
|
||||
MachineId: machine.Id,
|
||||
Namespace: namespace.Name,
|
||||
}
|
||||
|
||||
response, err := client.UnshareMachine(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error unsharing node: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Node unshared", output)
|
||||
},
|
||||
}
|
||||
|
||||
func nodesToPtables(
|
||||
currentNamespace string,
|
||||
machines []*v1.Machine,
|
||||
) (pterm.TableData, error) {
|
||||
tableData := pterm.TableData{
|
||||
{
|
||||
"ID",
|
||||
"Name",
|
||||
"NodeKey",
|
||||
"Namespace",
|
||||
"IP addresses",
|
||||
"Ephemeral",
|
||||
"Last seen",
|
||||
"Online",
|
||||
"Expired",
|
||||
},
|
||||
}
|
||||
|
||||
for _, machine := range machines {
|
||||
var ephemeral bool
|
||||
if machine.PreAuthKey != nil && machine.PreAuthKey.Ephemeral {
|
||||
ephemeral = true
|
||||
}
|
||||
|
||||
var lastSeen time.Time
|
||||
var lastSeenTime string
|
||||
if machine.LastSeen != nil {
|
||||
lastSeen = machine.LastSeen.AsTime()
|
||||
lastSeenTime = lastSeen.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
|
||||
var expiry time.Time
|
||||
if machine.Expiry != nil {
|
||||
expiry = machine.Expiry.AsTime()
|
||||
}
|
||||
|
||||
var nodeKey key.NodePublic
|
||||
err := nodeKey.UnmarshalText(
|
||||
[]byte(headscale.NodePublicKeyEnsurePrefix(machine.NodeKey)),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var online string
|
||||
if lastSeen.After(
|
||||
time.Now().Add(-5 * time.Minute),
|
||||
) { // TODO: Find a better way to reliably show if online
|
||||
online = pterm.LightGreen("online")
|
||||
} else {
|
||||
online = pterm.LightRed("offline")
|
||||
}
|
||||
|
||||
var expired string
|
||||
if expiry.IsZero() || expiry.After(time.Now()) {
|
||||
expired = pterm.LightGreen("no")
|
||||
} else {
|
||||
expired = pterm.LightRed("yes")
|
||||
}
|
||||
|
||||
var namespace string
|
||||
if currentNamespace == "" || (currentNamespace == machine.Namespace.Name) {
|
||||
namespace = pterm.LightMagenta(machine.Namespace.Name)
|
||||
} else {
|
||||
// Shared into this namespace
|
||||
namespace = pterm.LightYellow(machine.Namespace.Name)
|
||||
}
|
||||
tableData = append(
|
||||
tableData,
|
||||
[]string{
|
||||
strconv.FormatUint(machine.Id, headscale.Base10),
|
||||
machine.Name,
|
||||
nodeKey.ShortString(),
|
||||
namespace,
|
||||
strings.Join(machine.IpAddresses, ", "),
|
||||
strconv.FormatBool(ephemeral),
|
||||
lastSeenTime,
|
||||
online,
|
||||
expired,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
return tableData, nil
|
||||
}
|
||||
|
||||
@@ -2,12 +2,18 @@ package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/hako/durafmt"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultPreAuthKeyExpiry = 1 * time.Hour
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -15,13 +21,17 @@ func init() {
|
||||
preauthkeysCmd.PersistentFlags().StringP("namespace", "n", "", "Namespace")
|
||||
err := preauthkeysCmd.MarkPersistentFlagRequired("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
preauthkeysCmd.AddCommand(listPreAuthKeys)
|
||||
preauthkeysCmd.AddCommand(createPreAuthKeyCmd)
|
||||
createPreAuthKeyCmd.PersistentFlags().Bool("reusable", false, "Make the preauthkey reusable")
|
||||
createPreAuthKeyCmd.PersistentFlags().Bool("ephemeral", false, "Preauthkey for ephemeral nodes")
|
||||
createPreAuthKeyCmd.Flags().StringP("expiration", "e", "", "Human-readable expiration of the key (30m, 24h, 365d...)")
|
||||
preauthkeysCmd.AddCommand(expirePreAuthKeyCmd)
|
||||
createPreAuthKeyCmd.PersistentFlags().
|
||||
Bool("reusable", false, "Make the preauthkey reusable")
|
||||
createPreAuthKeyCmd.PersistentFlags().
|
||||
Bool("ephemeral", false, "Preauthkey for ephemeral nodes")
|
||||
createPreAuthKeyCmd.Flags().
|
||||
DurationP("expiration", "e", DefaultPreAuthKeyExpiry, "Human-readable expiration of the key (30m, 24h, 365d...)")
|
||||
}
|
||||
|
||||
var preauthkeysCmd = &cobra.Command{
|
||||
@@ -33,48 +43,76 @@ var listPreAuthKeys = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List the preauthkeys for this namespace",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
n, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
}
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
h, err := getHeadscaleApp()
|
||||
namespace, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
keys, err := h.GetPreAuthKeys(n)
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(keys, err, o)
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ListPreAuthKeysRequest{
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
response, err := client.ListPreAuthKeys(ctx, request)
|
||||
if err != nil {
|
||||
fmt.Printf("Error getting the list of keys: %s\n", err)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting the list of keys: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
for _, k := range *keys {
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.PreAuthKeys, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
tableData := pterm.TableData{
|
||||
{"ID", "Key", "Reusable", "Ephemeral", "Used", "Expiration", "Created"},
|
||||
}
|
||||
for _, key := range response.PreAuthKeys {
|
||||
expiration := "-"
|
||||
if k.Expiration != nil {
|
||||
expiration = k.Expiration.Format("2006-01-02 15:04:05")
|
||||
if key.GetExpiration() != nil {
|
||||
expiration = key.Expiration.AsTime().Format("2006-01-02 15:04:05")
|
||||
}
|
||||
|
||||
var reusable string
|
||||
if k.Ephemeral {
|
||||
if key.GetEphemeral() {
|
||||
reusable = "N/A"
|
||||
} else {
|
||||
reusable = fmt.Sprintf("%v", k.Reusable)
|
||||
reusable = fmt.Sprintf("%v", key.GetReusable())
|
||||
}
|
||||
|
||||
fmt.Printf(
|
||||
"key: %s, namespace: %s, reusable: %s, ephemeral: %v, expiration: %s, created_at: %s\n",
|
||||
k.Key,
|
||||
k.Namespace.Name,
|
||||
tableData = append(tableData, []string{
|
||||
key.GetId(),
|
||||
key.GetKey(),
|
||||
reusable,
|
||||
k.Ephemeral,
|
||||
strconv.FormatBool(key.GetEphemeral()),
|
||||
strconv.FormatBool(key.GetUsed()),
|
||||
expiration,
|
||||
k.CreatedAt.Format("2006-01-02 15:04:05"),
|
||||
key.GetCreatedAt().AsTime().Format("2006-01-02 15:04:05"),
|
||||
})
|
||||
|
||||
}
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -83,39 +121,95 @@ var createPreAuthKeyCmd = &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Creates a new preauthkey in the specified namespace",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
n, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
}
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
h, err := getHeadscaleApp()
|
||||
namespace, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
reusable, _ := cmd.Flags().GetBool("reusable")
|
||||
ephemeral, _ := cmd.Flags().GetBool("ephemeral")
|
||||
|
||||
e, _ := cmd.Flags().GetString("expiration")
|
||||
var expiration *time.Time
|
||||
if e != "" {
|
||||
duration, err := durafmt.ParseStringShort(e)
|
||||
if err != nil {
|
||||
log.Fatalf("Error parsing expiration: %s", err)
|
||||
}
|
||||
exp := time.Now().UTC().Add(duration.Duration())
|
||||
expiration = &exp
|
||||
log.Trace().
|
||||
Bool("reusable", reusable).
|
||||
Bool("ephemeral", ephemeral).
|
||||
Str("namespace", namespace).
|
||||
Msg("Preparing to create preauthkey")
|
||||
|
||||
request := &v1.CreatePreAuthKeyRequest{
|
||||
Namespace: namespace,
|
||||
Reusable: reusable,
|
||||
Ephemeral: ephemeral,
|
||||
}
|
||||
|
||||
k, err := h.CreatePreAuthKey(n, reusable, ephemeral, expiration)
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(k, err, o)
|
||||
return
|
||||
}
|
||||
duration, _ := cmd.Flags().GetDuration("expiration")
|
||||
expiration := time.Now().UTC().Add(duration)
|
||||
|
||||
log.Trace().Dur("expiration", duration).Msg("expiration has been set")
|
||||
|
||||
request.Expiration = timestamppb.New(expiration)
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
response, err := client.CreatePreAuthKey(ctx, request)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot create Pre Auth Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
fmt.Printf("Key: %s\n", k.Key)
|
||||
|
||||
SuccessOutput(response.PreAuthKey, response.PreAuthKey.Key, output)
|
||||
},
|
||||
}
|
||||
|
||||
var expirePreAuthKeyCmd = &cobra.Command{
|
||||
Use: "expire KEY",
|
||||
Short: "Expire a preauthkey",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return errMissingParameter
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
namespace, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error getting namespace: %s", err), output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ExpirePreAuthKeyRequest{
|
||||
Namespace: namespace,
|
||||
Key: args[0],
|
||||
}
|
||||
|
||||
response, err := client.ExpirePreAuthKey(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot expire Pre Auth Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response, "Key expired", output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -8,7 +8,10 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.PersistentFlags().StringP("output", "o", "", "Output format. Empty for human-readable, 'json' or 'json-line'")
|
||||
rootCmd.PersistentFlags().
|
||||
StringP("output", "o", "", "Output format. Empty for human-readable, 'json', 'json-line' or 'yaml'")
|
||||
rootCmd.PersistentFlags().
|
||||
Bool("force", false, "Disable prompts and forces the execution")
|
||||
}
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
|
||||
@@ -3,20 +3,35 @@ package cli
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"strconv"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(routesCmd)
|
||||
routesCmd.PersistentFlags().StringP("namespace", "n", "", "Namespace")
|
||||
err := routesCmd.MarkPersistentFlagRequired("namespace")
|
||||
|
||||
listRoutesCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err := listRoutesCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
routesCmd.AddCommand(listRoutesCmd)
|
||||
|
||||
enableRouteCmd.Flags().
|
||||
StringSliceP("route", "r", []string{}, "List (or repeated flags) of routes to enable")
|
||||
enableRouteCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
|
||||
err = enableRouteCmd.MarkFlagRequired("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
routesCmd.AddCommand(enableRouteCmd)
|
||||
|
||||
nodeCmd.AddCommand(routesCmd)
|
||||
}
|
||||
|
||||
var routesCmd = &cobra.Command{
|
||||
@@ -25,71 +40,168 @@ var routesCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
var listRoutesCmd = &cobra.Command{
|
||||
Use: "list NODE",
|
||||
Short: "List the routes exposed by this node",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("Missing parameters")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Use: "list",
|
||||
Short: "List routes advertised and enabled by a given node",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
n, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
}
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
h, err := getHeadscaleApp()
|
||||
machineID, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
routes, err := h.GetNodeRoutes(n, args[0])
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine id from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(routes, err, o)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.GetMachineRouteRequest{
|
||||
MachineId: machineID,
|
||||
}
|
||||
|
||||
response, err := client.GetMachineRoute(ctx, request)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot get nodes: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println(routes)
|
||||
if output != "" {
|
||||
SuccessOutput(response.Routes, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
tableData := routesToPtables(response.Routes)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var enableRouteCmd = &cobra.Command{
|
||||
Use: "enable node-name route",
|
||||
Short: "Allows exposing a route declared by this node to the rest of the nodes",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 2 {
|
||||
return fmt.Errorf("Missing parameters")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Use: "enable",
|
||||
Short: "Set the enabled routes for a given node",
|
||||
Long: `This command will take a list of routes that will _replace_
|
||||
the current set of routes on a given node.
|
||||
If you would like to disable a route, simply run the command again, but
|
||||
omit the route you do not want to enable.
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
n, err := cmd.Flags().GetString("namespace")
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting namespace: %s", err)
|
||||
}
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
h, err := getHeadscaleApp()
|
||||
machineID, err := cmd.Flags().GetUint64("identifier")
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
route, err := h.EnableNodeRoute(n, args[0], args[1])
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(route, err, o)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting machine id from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
routes, err := cmd.Flags().GetStringSlice("route")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting routes from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.EnableMachineRoutesRequest{
|
||||
MachineId: machineID,
|
||||
Routes: routes,
|
||||
}
|
||||
|
||||
response, err := client.EnableMachineRoutes(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot register machine: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.Routes, "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
tableData := routesToPtables(response.Routes)
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Failed to render pterm table: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
fmt.Printf("Enabled route %s\n", route)
|
||||
},
|
||||
}
|
||||
|
||||
// routesToPtables converts the list of routes to a nice table.
|
||||
func routesToPtables(routes *v1.Routes) pterm.TableData {
|
||||
tableData := pterm.TableData{{"Route", "Enabled"}}
|
||||
|
||||
for _, route := range routes.GetAdvertisedRoutes() {
|
||||
enabled := isStringInSlice(routes.EnabledRoutes, route)
|
||||
|
||||
tableData = append(tableData, []string{route, strconv.FormatBool(enabled)})
|
||||
}
|
||||
|
||||
return tableData
|
||||
}
|
||||
|
||||
func isStringInSlice(strs []string, s string) bool {
|
||||
for _, s2 := range strs {
|
||||
if s == s2 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ var serveCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
}
|
||||
go h.ExpireEphemeralNodes(5000)
|
||||
|
||||
err = h.Serve()
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
|
||||
@@ -1,26 +1,33 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/viper"
|
||||
"google.golang.org/grpc"
|
||||
"gopkg.in/yaml.v2"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
)
|
||||
|
||||
type ErrorOutput struct {
|
||||
Error string
|
||||
}
|
||||
const (
|
||||
PermissionFallback = 0o700
|
||||
)
|
||||
|
||||
func LoadConfig(path string) error {
|
||||
viper.SetConfigName("config")
|
||||
@@ -32,46 +39,176 @@ func LoadConfig(path string) error {
|
||||
// For testing
|
||||
viper.AddConfigPath(path)
|
||||
}
|
||||
|
||||
viper.SetEnvPrefix("headscale")
|
||||
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
|
||||
viper.AutomaticEnv()
|
||||
|
||||
viper.SetDefault("tls_letsencrypt_cache_dir", "/var/www/.cache")
|
||||
viper.SetDefault("tls_letsencrypt_challenge_type", "HTTP-01")
|
||||
|
||||
viper.SetDefault("ip_prefix", "100.64.0.0/10")
|
||||
viper.SetDefault("log_level", "info")
|
||||
|
||||
viper.SetDefault("log_level", "debug")
|
||||
viper.SetDefault("dns_config", nil)
|
||||
|
||||
err := viper.ReadInConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Fatal error reading config file: %s \n", err)
|
||||
viper.SetDefault("unix_socket", "/var/run/headscale.sock")
|
||||
viper.SetDefault("unix_socket_permission", "0o770")
|
||||
|
||||
viper.SetDefault("cli.insecure", false)
|
||||
viper.SetDefault("cli.timeout", "5s")
|
||||
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
return fmt.Errorf("fatal error reading config file: %w", err)
|
||||
}
|
||||
|
||||
// Collect any validation errors and return them all at once
|
||||
var errorText string
|
||||
if (viper.GetString("tls_letsencrypt_hostname") != "") && ((viper.GetString("tls_cert_path") != "") || (viper.GetString("tls_key_path") != "")) {
|
||||
if (viper.GetString("tls_letsencrypt_hostname") != "") &&
|
||||
((viper.GetString("tls_cert_path") != "") || (viper.GetString("tls_key_path") != "")) {
|
||||
errorText += "Fatal config error: set either tls_letsencrypt_hostname or tls_cert_path/tls_key_path, not both\n"
|
||||
}
|
||||
|
||||
if (viper.GetString("tls_letsencrypt_hostname") != "") && (viper.GetString("tls_letsencrypt_challenge_type") == "TLS-ALPN-01") && (!strings.HasSuffix(viper.GetString("listen_addr"), ":443")) {
|
||||
if (viper.GetString("tls_letsencrypt_hostname") != "") &&
|
||||
(viper.GetString("tls_letsencrypt_challenge_type") == "TLS-ALPN-01") &&
|
||||
(!strings.HasSuffix(viper.GetString("listen_addr"), ":443")) {
|
||||
// this is only a warning because there could be something sitting in front of headscale that redirects the traffic (e.g. an iptables rule)
|
||||
log.Warn().
|
||||
Msg("Warning: when using tls_letsencrypt_hostname with TLS-ALPN-01 as challenge type, headscale must be reachable on port 443, i.e. listen_addr should probably end in :443")
|
||||
}
|
||||
|
||||
if (viper.GetString("tls_letsencrypt_challenge_type") != "HTTP-01") && (viper.GetString("tls_letsencrypt_challenge_type") != "TLS-ALPN-01") {
|
||||
if (viper.GetString("tls_letsencrypt_challenge_type") != "HTTP-01") &&
|
||||
(viper.GetString("tls_letsencrypt_challenge_type") != "TLS-ALPN-01") {
|
||||
errorText += "Fatal config error: the only supported values for tls_letsencrypt_challenge_type are HTTP-01 and TLS-ALPN-01\n"
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(viper.GetString("server_url"), "http://") && !strings.HasPrefix(viper.GetString("server_url"), "https://") {
|
||||
if !strings.HasPrefix(viper.GetString("server_url"), "http://") &&
|
||||
!strings.HasPrefix(viper.GetString("server_url"), "https://") {
|
||||
errorText += "Fatal config error: server_url must start with https:// or http://\n"
|
||||
}
|
||||
if errorText != "" {
|
||||
//nolint
|
||||
return errors.New(strings.TrimSuffix(errorText, "\n"))
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func GetDERPConfig() headscale.DERPConfig {
|
||||
urlStrs := viper.GetStringSlice("derp.urls")
|
||||
|
||||
urls := make([]url.URL, len(urlStrs))
|
||||
for index, urlStr := range urlStrs {
|
||||
urlAddr, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("url", urlStr).
|
||||
Err(err).
|
||||
Msg("Failed to parse url, ignoring...")
|
||||
}
|
||||
|
||||
urls[index] = *urlAddr
|
||||
}
|
||||
|
||||
paths := viper.GetStringSlice("derp.paths")
|
||||
|
||||
autoUpdate := viper.GetBool("derp.auto_update_enabled")
|
||||
updateFrequency := viper.GetDuration("derp.update_frequency")
|
||||
|
||||
return headscale.DERPConfig{
|
||||
URLs: urls,
|
||||
Paths: paths,
|
||||
AutoUpdate: autoUpdate,
|
||||
UpdateFrequency: updateFrequency,
|
||||
}
|
||||
}
|
||||
|
||||
func GetDNSConfig() (*tailcfg.DNSConfig, string) {
|
||||
if viper.IsSet("dns_config") {
|
||||
dnsConfig := &tailcfg.DNSConfig{}
|
||||
|
||||
if viper.IsSet("dns_config.nameservers") {
|
||||
nameserversStr := viper.GetStringSlice("dns_config.nameservers")
|
||||
|
||||
nameservers := make([]netaddr.IP, len(nameserversStr))
|
||||
resolvers := make([]dnstype.Resolver, len(nameserversStr))
|
||||
|
||||
for index, nameserverStr := range nameserversStr {
|
||||
nameserver, err := netaddr.ParseIP(nameserverStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "getDNSConfig").
|
||||
Err(err).
|
||||
Msgf("Could not parse nameserver IP: %s", nameserverStr)
|
||||
}
|
||||
|
||||
nameservers[index] = nameserver
|
||||
resolvers[index] = dnstype.Resolver{
|
||||
Addr: nameserver.String(),
|
||||
}
|
||||
}
|
||||
|
||||
dnsConfig.Nameservers = nameservers
|
||||
dnsConfig.Resolvers = resolvers
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.restricted_nameservers") {
|
||||
if len(dnsConfig.Nameservers) > 0 {
|
||||
dnsConfig.Routes = make(map[string][]dnstype.Resolver)
|
||||
restrictedDNS := viper.GetStringMapStringSlice(
|
||||
"dns_config.restricted_nameservers",
|
||||
)
|
||||
for domain, restrictedNameservers := range restrictedDNS {
|
||||
restrictedResolvers := make(
|
||||
[]dnstype.Resolver,
|
||||
len(restrictedNameservers),
|
||||
)
|
||||
for index, nameserverStr := range restrictedNameservers {
|
||||
nameserver, err := netaddr.ParseIP(nameserverStr)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "getDNSConfig").
|
||||
Err(err).
|
||||
Msgf("Could not parse restricted nameserver IP: %s", nameserverStr)
|
||||
}
|
||||
restrictedResolvers[index] = dnstype.Resolver{
|
||||
Addr: nameserver.String(),
|
||||
}
|
||||
}
|
||||
dnsConfig.Routes[domain] = restrictedResolvers
|
||||
}
|
||||
} else {
|
||||
log.Warn().
|
||||
Msg("Warning: dns_config.restricted_nameservers is set, but no nameservers are configured. Ignoring restricted_nameservers.")
|
||||
}
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.domains") {
|
||||
dnsConfig.Domains = viper.GetStringSlice("dns_config.domains")
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.magic_dns") {
|
||||
magicDNS := viper.GetBool("dns_config.magic_dns")
|
||||
if len(dnsConfig.Nameservers) > 0 {
|
||||
dnsConfig.Proxied = magicDNS
|
||||
} else if magicDNS {
|
||||
log.Warn().
|
||||
Msg("Warning: dns_config.magic_dns is set, but no nameservers are configured. Ignoring magic_dns.")
|
||||
}
|
||||
}
|
||||
|
||||
var baseDomain string
|
||||
if viper.IsSet("dns_config.base_domain") {
|
||||
baseDomain = viper.GetString("dns_config.base_domain")
|
||||
} else {
|
||||
baseDomain = "headscale.net" // does not really matter when MagicDNS is not enabled
|
||||
}
|
||||
|
||||
return dnsConfig, baseDomain
|
||||
}
|
||||
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
func absPath(path string) string {
|
||||
// If a relative path is provided, prefix it with the the directory where
|
||||
// the config file was found.
|
||||
@@ -81,35 +218,73 @@ func absPath(path string) string {
|
||||
path = filepath.Join(dir, path)
|
||||
}
|
||||
}
|
||||
|
||||
return path
|
||||
}
|
||||
|
||||
func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
derpPath := absPath(viper.GetString("derp_map_path"))
|
||||
derpMap, err := loadDerpMap(derpPath)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("path", derpPath).
|
||||
Err(err).
|
||||
Msg("Could not load DERP servers map file")
|
||||
func getHeadscaleConfig() headscale.Config {
|
||||
dnsConfig, baseDomain := GetDNSConfig()
|
||||
derpConfig := GetDERPConfig()
|
||||
|
||||
configuredPrefixes := viper.GetStringSlice("ip_prefixes")
|
||||
parsedPrefixes := make([]netaddr.IPPrefix, 0, len(configuredPrefixes)+1)
|
||||
|
||||
legacyPrefixField := viper.GetString("ip_prefix")
|
||||
if len(legacyPrefixField) > 0 {
|
||||
log.
|
||||
Warn().
|
||||
Msgf(
|
||||
"%s, %s",
|
||||
"use of 'ip_prefix' for configuration is deprecated",
|
||||
"please see 'ip_prefixes' in the shipped example.",
|
||||
)
|
||||
legacyPrefix, err := netaddr.ParseIPPrefix(legacyPrefixField)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to parse ip_prefix: %w", err))
|
||||
}
|
||||
parsedPrefixes = append(parsedPrefixes, legacyPrefix)
|
||||
}
|
||||
|
||||
// Minimum inactivity time out is keepalive timeout (60s) plus a few seconds
|
||||
// to avoid races
|
||||
minInactivityTimeout, _ := time.ParseDuration("65s")
|
||||
if viper.GetDuration("ephemeral_node_inactivity_timeout") <= minInactivityTimeout {
|
||||
err = fmt.Errorf("ephemeral_node_inactivity_timeout (%s) is set too low, must be more than %s\n", viper.GetString("ephemeral_node_inactivity_timeout"), minInactivityTimeout)
|
||||
return nil, err
|
||||
for i, prefixInConfig := range configuredPrefixes {
|
||||
prefix, err := netaddr.ParseIPPrefix(prefixInConfig)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to parse ip_prefixes[%d]: %w", i, err))
|
||||
}
|
||||
parsedPrefixes = append(parsedPrefixes, prefix)
|
||||
}
|
||||
|
||||
cfg := headscale.Config{
|
||||
prefixes := make([]netaddr.IPPrefix, 0, len(parsedPrefixes))
|
||||
{
|
||||
// dedup
|
||||
normalizedPrefixes := make(map[string]int, len(parsedPrefixes))
|
||||
for i, p := range parsedPrefixes {
|
||||
normalized, _ := p.Range().Prefix()
|
||||
normalizedPrefixes[normalized.String()] = i
|
||||
}
|
||||
|
||||
// convert back to list
|
||||
for _, i := range normalizedPrefixes {
|
||||
prefixes = append(prefixes, parsedPrefixes[i])
|
||||
}
|
||||
}
|
||||
|
||||
if len(prefixes) < 1 {
|
||||
prefixes = append(prefixes, netaddr.MustParseIPPrefix("100.64.0.0/10"))
|
||||
log.Warn().Msgf("'ip_prefixes' not configured, falling back to default: %v", prefixes)
|
||||
}
|
||||
|
||||
return headscale.Config{
|
||||
ServerURL: viper.GetString("server_url"),
|
||||
Addr: viper.GetString("listen_addr"),
|
||||
IPPrefixes: prefixes,
|
||||
PrivateKeyPath: absPath(viper.GetString("private_key_path")),
|
||||
DerpMap: derpMap,
|
||||
IPPrefix: netaddr.MustParseIPPrefix(viper.GetString("ip_prefix")),
|
||||
BaseDomain: baseDomain,
|
||||
|
||||
EphemeralNodeInactivityTimeout: viper.GetDuration("ephemeral_node_inactivity_timeout"),
|
||||
DERP: derpConfig,
|
||||
|
||||
EphemeralNodeInactivityTimeout: viper.GetDuration(
|
||||
"ephemeral_node_inactivity_timeout",
|
||||
),
|
||||
|
||||
DBtype: viper.GetString("db_type"),
|
||||
DBpath: absPath(viper.GetString("db_path")),
|
||||
@@ -119,16 +294,60 @@ func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
DBuser: viper.GetString("db_user"),
|
||||
DBpass: viper.GetString("db_pass"),
|
||||
|
||||
TLSLetsEncryptHostname: viper.GetString("tls_letsencrypt_hostname"),
|
||||
TLSLetsEncryptListen: viper.GetString("tls_letsencrypt_listen"),
|
||||
TLSLetsEncryptCacheDir: absPath(viper.GetString("tls_letsencrypt_cache_dir")),
|
||||
TLSLetsEncryptHostname: viper.GetString("tls_letsencrypt_hostname"),
|
||||
TLSLetsEncryptListen: viper.GetString("tls_letsencrypt_listen"),
|
||||
TLSLetsEncryptCacheDir: absPath(
|
||||
viper.GetString("tls_letsencrypt_cache_dir"),
|
||||
),
|
||||
TLSLetsEncryptChallengeType: viper.GetString("tls_letsencrypt_challenge_type"),
|
||||
|
||||
TLSCertPath: absPath(viper.GetString("tls_cert_path")),
|
||||
TLSKeyPath: absPath(viper.GetString("tls_key_path")),
|
||||
|
||||
DNSConfig: dnsConfig,
|
||||
|
||||
ACMEEmail: viper.GetString("acme_email"),
|
||||
ACMEURL: viper.GetString("acme_url"),
|
||||
|
||||
UnixSocket: viper.GetString("unix_socket"),
|
||||
UnixSocketPermission: GetFileMode("unix_socket_permission"),
|
||||
|
||||
OIDC: headscale.OIDCConfig{
|
||||
Issuer: viper.GetString("oidc.issuer"),
|
||||
ClientID: viper.GetString("oidc.client_id"),
|
||||
ClientSecret: viper.GetString("oidc.client_secret"),
|
||||
},
|
||||
|
||||
CLI: headscale.CLIConfig{
|
||||
Address: viper.GetString("cli.address"),
|
||||
APIKey: viper.GetString("cli.api_key"),
|
||||
Insecure: viper.GetBool("cli.insecure"),
|
||||
Timeout: viper.GetDuration("cli.timeout"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
// Minimum inactivity time out is keepalive timeout (60s) plus a few seconds
|
||||
// to avoid races
|
||||
minInactivityTimeout, _ := time.ParseDuration("65s")
|
||||
if viper.GetDuration("ephemeral_node_inactivity_timeout") <= minInactivityTimeout {
|
||||
// TODO: Find a better way to return this text
|
||||
//nolint
|
||||
err := fmt.Errorf(
|
||||
"ephemeral_node_inactivity_timeout (%s) is set too low, must be more than %s",
|
||||
viper.GetString("ephemeral_node_inactivity_timeout"),
|
||||
minInactivityTimeout,
|
||||
)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h, err := headscale.NewHeadscale(cfg)
|
||||
cfg := getHeadscaleConfig()
|
||||
|
||||
cfg.OIDC.MatchMap = loadOIDCMatchMap()
|
||||
|
||||
app, err := headscale.NewHeadscale(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -137,7 +356,7 @@ func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
|
||||
if viper.GetString("acl_policy_path") != "" {
|
||||
aclPath := absPath(viper.GetString("acl_policy_path"))
|
||||
err = h.LoadACLPolicy(aclPath)
|
||||
err = app.LoadACLPolicy(aclPath)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("path", aclPath).
|
||||
@@ -146,52 +365,150 @@ func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return h, nil
|
||||
return app, nil
|
||||
}
|
||||
|
||||
func loadDerpMap(path string) (*tailcfg.DERPMap, error) {
|
||||
derpFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc.ClientConn, context.CancelFunc) {
|
||||
cfg := getHeadscaleConfig()
|
||||
|
||||
log.Debug().
|
||||
Dur("timeout", cfg.CLI.Timeout).
|
||||
Msgf("Setting timeout")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), cfg.CLI.Timeout)
|
||||
|
||||
grpcOptions := []grpc.DialOption{
|
||||
grpc.WithBlock(),
|
||||
}
|
||||
defer derpFile.Close()
|
||||
var derpMap tailcfg.DERPMap
|
||||
b, err := io.ReadAll(derpFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
address := cfg.CLI.Address
|
||||
|
||||
// If the address is not set, we assume that we are on the server hosting headscale.
|
||||
if address == "" {
|
||||
log.Debug().
|
||||
Str("socket", cfg.UnixSocket).
|
||||
Msgf("HEADSCALE_CLI_ADDRESS environment is not set, connecting to unix socket.")
|
||||
|
||||
address = cfg.UnixSocket
|
||||
|
||||
grpcOptions = append(
|
||||
grpcOptions,
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithContextDialer(headscale.GrpcSocketDialer),
|
||||
)
|
||||
} else {
|
||||
// If we are not connecting to a local server, require an API key for authentication
|
||||
apiKey := cfg.CLI.APIKey
|
||||
if apiKey == "" {
|
||||
log.Fatal().Msgf("HEADSCALE_CLI_API_KEY environment variable needs to be set.")
|
||||
}
|
||||
grpcOptions = append(grpcOptions,
|
||||
grpc.WithPerRPCCredentials(tokenAuth{
|
||||
token: apiKey,
|
||||
}),
|
||||
)
|
||||
|
||||
if cfg.CLI.Insecure {
|
||||
grpcOptions = append(grpcOptions, grpc.WithInsecure())
|
||||
}
|
||||
}
|
||||
err = yaml.Unmarshal(b, &derpMap)
|
||||
return &derpMap, err
|
||||
|
||||
log.Trace().Caller().Str("address", address).Msg("Connecting via gRPC")
|
||||
conn, err := grpc.DialContext(ctx, address, grpcOptions...)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msgf("Could not connect: %v", err)
|
||||
}
|
||||
|
||||
client := v1.NewHeadscaleServiceClient(conn)
|
||||
|
||||
return ctx, client, conn, cancel
|
||||
}
|
||||
|
||||
func JsonOutput(result interface{}, errResult error, outputFormat string) {
|
||||
func SuccessOutput(result interface{}, override string, outputFormat string) {
|
||||
var j []byte
|
||||
var err error
|
||||
switch outputFormat {
|
||||
case "json":
|
||||
if errResult != nil {
|
||||
j, err = json.MarshalIndent(ErrorOutput{errResult.Error()}, "", "\t")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
} else {
|
||||
j, err = json.MarshalIndent(result, "", "\t")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
j, err = json.MarshalIndent(result, "", "\t")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
case "json-line":
|
||||
if errResult != nil {
|
||||
j, err = json.Marshal(ErrorOutput{errResult.Error()})
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
} else {
|
||||
j, err = json.Marshal(result)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
j, err = json.Marshal(result)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
case "yaml":
|
||||
j, err = yaml.Marshal(result)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
default:
|
||||
//nolint
|
||||
fmt.Println(override)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
//nolint
|
||||
fmt.Println(string(j))
|
||||
}
|
||||
|
||||
func ErrorOutput(errResult error, override string, outputFormat string) {
|
||||
type errOutput struct {
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
SuccessOutput(errOutput{errResult.Error()}, override, outputFormat)
|
||||
}
|
||||
|
||||
func HasMachineOutputFlag() bool {
|
||||
for _, arg := range os.Args {
|
||||
if arg == "json" || arg == "json-line" || arg == "yaml" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
type tokenAuth struct {
|
||||
token string
|
||||
}
|
||||
|
||||
// Return value is mapped to request headers.
|
||||
func (t tokenAuth) GetRequestMetadata(
|
||||
ctx context.Context,
|
||||
in ...string,
|
||||
) (map[string]string, error) {
|
||||
return map[string]string{
|
||||
"authorization": "Bearer " + t.token,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (tokenAuth) RequireTransportSecurity() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// loadOIDCMatchMap is a wrapper around viper to verifies that the keys in
|
||||
// the match map is valid regex strings.
|
||||
func loadOIDCMatchMap() map[string]string {
|
||||
strMap := viper.GetStringMapString("oidc.domain_map")
|
||||
|
||||
for oidcMatcher := range strMap {
|
||||
_ = regexp.MustCompile(oidcMatcher)
|
||||
}
|
||||
|
||||
return strMap
|
||||
}
|
||||
|
||||
func GetFileMode(key string) fs.FileMode {
|
||||
modeStr := viper.GetString(key)
|
||||
|
||||
mode, err := strconv.ParseUint(modeStr, headscale.Base8, headscale.BitSize64)
|
||||
if err != nil {
|
||||
return PermissionFallback
|
||||
}
|
||||
|
||||
return fs.FileMode(mode)
|
||||
}
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/spf13/cobra"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var version = "dev"
|
||||
var Version = "dev"
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(versionCmd)
|
||||
@@ -17,11 +15,7 @@ var versionCmd = &cobra.Command{
|
||||
Short: "Print the version.",
|
||||
Long: "The version of headscale.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
o, _ := cmd.Flags().GetString("output")
|
||||
if strings.HasPrefix(o, "json") {
|
||||
JsonOutput(map[string]string{"version": version}, nil, o)
|
||||
return
|
||||
}
|
||||
fmt.Println(version)
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
SuccessOutput(map[string]string{"version": Version}, Version, output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/efekarakus/termcolor"
|
||||
@@ -9,6 +11,7 @@ import (
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/viper"
|
||||
"github.com/tcnksm/go-latest"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -20,16 +23,16 @@ func main() {
|
||||
colors = true
|
||||
case termcolor.LevelBasic:
|
||||
colors = true
|
||||
case termcolor.LevelNone:
|
||||
colors = false
|
||||
default:
|
||||
// no color, return text as is.
|
||||
log.Trace().Msg("Colors are not supported, disabling")
|
||||
colors = false
|
||||
}
|
||||
|
||||
// Adhere to no-color.org manifesto of allowing users to
|
||||
// turn off color in cli/services
|
||||
if _, noColorIsSet := os.LookupEnv("NO_COLOR"); noColorIsSet {
|
||||
log.Trace().Msg("NO_COLOR is set, disabling colors")
|
||||
colors = false
|
||||
}
|
||||
|
||||
@@ -40,25 +43,43 @@ func main() {
|
||||
NoColor: !colors,
|
||||
})
|
||||
|
||||
err := cli.LoadConfig("")
|
||||
if err != nil {
|
||||
if err := cli.LoadConfig(""); err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
|
||||
machineOutput := cli.HasMachineOutputFlag()
|
||||
|
||||
logLevel := viper.GetString("log_level")
|
||||
switch logLevel {
|
||||
case "trace":
|
||||
zerolog.SetGlobalLevel(zerolog.TraceLevel)
|
||||
case "debug":
|
||||
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
||||
case "info":
|
||||
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
||||
case "warn":
|
||||
zerolog.SetGlobalLevel(zerolog.WarnLevel)
|
||||
case "error":
|
||||
zerolog.SetGlobalLevel(zerolog.ErrorLevel)
|
||||
default:
|
||||
level, err := zerolog.ParseLevel(logLevel)
|
||||
if err != nil {
|
||||
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
||||
} else {
|
||||
zerolog.SetGlobalLevel(level)
|
||||
}
|
||||
|
||||
// If the user has requested a "machine" readable format,
|
||||
// then disable login so the output remains valid.
|
||||
if machineOutput {
|
||||
zerolog.SetGlobalLevel(zerolog.Disabled)
|
||||
}
|
||||
|
||||
if !viper.GetBool("disable_check_updates") && !machineOutput {
|
||||
if (runtime.GOOS == "linux" || runtime.GOOS == "darwin") &&
|
||||
cli.Version != "dev" {
|
||||
githubTag := &latest.GithubTag{
|
||||
Owner: "juanfont",
|
||||
Repository: "headscale",
|
||||
}
|
||||
res, err := latest.Check(githubTag, cli.Version)
|
||||
if err == nil && res.Outdated {
|
||||
//nolint
|
||||
fmt.Printf(
|
||||
"An updated version of Headscale has been found (%s vs. your current %s). Check it out https://github.com/juanfont/headscale/releases\n",
|
||||
res.Current,
|
||||
cli.Version,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cli.Execute()
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -25,10 +25,9 @@ func (s *Suite) SetUpSuite(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *Suite) TearDownSuite(c *check.C) {
|
||||
|
||||
}
|
||||
|
||||
func (*Suite) TestPostgresConfigLoading(c *check.C) {
|
||||
func (*Suite) TestConfigLoading(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "headscale")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
@@ -41,7 +40,10 @@ func (*Suite) TestPostgresConfigLoading(c *check.C) {
|
||||
}
|
||||
|
||||
// Symlink the example config file
|
||||
err = os.Symlink(filepath.Clean(path+"/../../config.json.postgres.example"), filepath.Join(tmpDir, "config.json"))
|
||||
err = os.Symlink(
|
||||
filepath.Clean(path+"/../../config-example.yaml"),
|
||||
filepath.Join(tmpDir, "config.yaml"),
|
||||
)
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
@@ -53,51 +55,52 @@ func (*Suite) TestPostgresConfigLoading(c *check.C) {
|
||||
// Test that config file was interpreted correctly
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8080")
|
||||
c.Assert(viper.GetString("derp_map_path"), check.Equals, "derp.yaml")
|
||||
c.Assert(viper.GetString("db_type"), check.Equals, "postgres")
|
||||
c.Assert(viper.GetString("db_port"), check.Equals, "5432")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
}
|
||||
|
||||
func (*Suite) TestSqliteConfigLoading(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "headscale")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Symlink the example config file
|
||||
err = os.Symlink(filepath.Clean(path+"/../../config.json.sqlite.example"), filepath.Join(tmpDir, "config.json"))
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Load example config, it should load without validation errors
|
||||
err = cli.LoadConfig(tmpDir)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Test that config file was interpreted correctly
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "0.0.0.0:8080")
|
||||
c.Assert(viper.GetString("derp_map_path"), check.Equals, "derp.yaml")
|
||||
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite3")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "db.sqlite")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "/var/lib/headscale/db.sqlite")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
c.Assert(viper.GetStringSlice("dns_config.nameservers")[0], check.Equals, "1.1.1.1")
|
||||
c.Assert(cli.GetFileMode("unix_socket_permission"), check.Equals, fs.FileMode(0o770))
|
||||
}
|
||||
|
||||
func (*Suite) TestDNSConfigLoading(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "headscale")
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Symlink the example config file
|
||||
err = os.Symlink(
|
||||
filepath.Clean(path+"/../../config-example.yaml"),
|
||||
filepath.Join(tmpDir, "config.yaml"),
|
||||
)
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
// Load example config, it should load without validation errors
|
||||
err = cli.LoadConfig(tmpDir)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
dnsConfig, baseDomain := cli.GetDNSConfig()
|
||||
|
||||
c.Assert(dnsConfig.Nameservers[0].String(), check.Equals, "1.1.1.1")
|
||||
c.Assert(dnsConfig.Resolvers[0].Addr, check.Equals, "1.1.1.1")
|
||||
c.Assert(dnsConfig.Proxied, check.Equals, true)
|
||||
c.Assert(baseDomain, check.Equals, "example.com")
|
||||
}
|
||||
|
||||
func writeConfig(c *check.C, tmpDir string, configYaml []byte) {
|
||||
// Populate a custom config file
|
||||
configFile := filepath.Join(tmpDir, "config.yaml")
|
||||
err := ioutil.WriteFile(configFile, configYaml, 0644)
|
||||
err := ioutil.WriteFile(configFile, configYaml, 0o600)
|
||||
if err != nil {
|
||||
c.Fatalf("Couldn't write file %s", configFile)
|
||||
}
|
||||
@@ -108,10 +111,11 @@ func (*Suite) TestTLSConfigValidation(c *check.C) {
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
//defer os.RemoveAll(tmpDir)
|
||||
fmt.Println(tmpDir)
|
||||
// defer os.RemoveAll(tmpDir)
|
||||
|
||||
configYaml := []byte("---\ntls_letsencrypt_hostname: \"example.com\"\ntls_letsencrypt_challenge_type: \"\"\ntls_cert_path: \"abc.pem\"")
|
||||
configYaml := []byte(
|
||||
"---\ntls_letsencrypt_hostname: \"example.com\"\ntls_letsencrypt_challenge_type: \"\"\ntls_cert_path: \"abc.pem\"",
|
||||
)
|
||||
writeConfig(c, tmpDir, configYaml)
|
||||
|
||||
// Check configuration validation errors (1)
|
||||
@@ -119,13 +123,26 @@ func (*Suite) TestTLSConfigValidation(c *check.C) {
|
||||
c.Assert(err, check.NotNil)
|
||||
// check.Matches can not handle multiline strings
|
||||
tmp := strings.ReplaceAll(err.Error(), "\n", "***")
|
||||
c.Assert(tmp, check.Matches, ".*Fatal config error: set either tls_letsencrypt_hostname or tls_cert_path/tls_key_path, not both.*")
|
||||
c.Assert(tmp, check.Matches, ".*Fatal config error: the only supported values for tls_letsencrypt_challenge_type are.*")
|
||||
c.Assert(tmp, check.Matches, ".*Fatal config error: server_url must start with https:// or http://.*")
|
||||
fmt.Println(tmp)
|
||||
c.Assert(
|
||||
tmp,
|
||||
check.Matches,
|
||||
".*Fatal config error: set either tls_letsencrypt_hostname or tls_cert_path/tls_key_path, not both.*",
|
||||
)
|
||||
c.Assert(
|
||||
tmp,
|
||||
check.Matches,
|
||||
".*Fatal config error: the only supported values for tls_letsencrypt_challenge_type are.*",
|
||||
)
|
||||
c.Assert(
|
||||
tmp,
|
||||
check.Matches,
|
||||
".*Fatal config error: server_url must start with https:// or http://.*",
|
||||
)
|
||||
|
||||
// Check configuration validation errors (2)
|
||||
configYaml = []byte("---\nserver_url: \"http://127.0.0.1:8080\"\ntls_letsencrypt_hostname: \"example.com\"\ntls_letsencrypt_challenge_type: \"TLS-ALPN-01\"")
|
||||
configYaml = []byte(
|
||||
"---\nserver_url: \"http://127.0.0.1:8080\"\ntls_letsencrypt_hostname: \"example.com\"\ntls_letsencrypt_challenge_type: \"TLS-ALPN-01\"",
|
||||
)
|
||||
writeConfig(c, tmpDir, configYaml)
|
||||
err = cli.LoadConfig(tmpDir)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
173
config-example.yaml
Normal file
173
config-example.yaml
Normal file
@@ -0,0 +1,173 @@
|
||||
---
|
||||
# headscale will look for a configuration file named `config.yaml` (or `config.json`) in the following order:
|
||||
#
|
||||
# - `/etc/headscale`
|
||||
# - `~/.headscale`
|
||||
# - current working directory
|
||||
|
||||
# The url clients will connect to.
|
||||
# Typically this will be a domain like:
|
||||
#
|
||||
# https://myheadscale.example.com:443
|
||||
#
|
||||
server_url: http://127.0.0.1:8080
|
||||
|
||||
# Address to listen to / bind to on the server
|
||||
#
|
||||
listen_addr: 0.0.0.0:8080
|
||||
|
||||
# Private key used encrypt the traffic between headscale
|
||||
# and Tailscale clients.
|
||||
# The private key file which will be
|
||||
# autogenerated if it's missing
|
||||
private_key_path: /var/lib/headscale/private.key
|
||||
|
||||
# List of IP prefixes to allocate tailaddresses from.
|
||||
# Each prefix consists of either an IPv4 or IPv6 address,
|
||||
# and the associated prefix length, delimited by a slash.
|
||||
ip_prefixes:
|
||||
- fd7a:115c:a1e0::/48
|
||||
- 100.64.0.0/10
|
||||
|
||||
# DERP is a relay system that Tailscale uses when a direct
|
||||
# connection cannot be established.
|
||||
# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp
|
||||
#
|
||||
# headscale needs a list of DERP servers that can be presented
|
||||
# to the clients.
|
||||
derp:
|
||||
# List of externally available DERP maps encoded in JSON
|
||||
urls:
|
||||
- https://controlplane.tailscale.com/derpmap/default
|
||||
|
||||
# Locally available DERP map files encoded in YAML
|
||||
#
|
||||
# This option is mostly interesting for people hosting
|
||||
# their own DERP servers:
|
||||
# https://tailscale.com/kb/1118/custom-derp-servers/
|
||||
#
|
||||
# paths:
|
||||
# - /etc/headscale/derp-example.yaml
|
||||
paths: []
|
||||
|
||||
# If enabled, a worker will be set up to periodically
|
||||
# refresh the given sources and update the derpmap
|
||||
# will be set up.
|
||||
auto_update_enabled: true
|
||||
|
||||
# How often should we check for DERP updates?
|
||||
update_frequency: 24h
|
||||
|
||||
# Disables the automatic check for headscale updates on startup
|
||||
disable_check_updates: false
|
||||
|
||||
# Time before an inactive ephemeral node is deleted?
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
|
||||
# SQLite config
|
||||
db_type: sqlite3
|
||||
db_path: /var/lib/headscale/db.sqlite
|
||||
|
||||
# # Postgres config
|
||||
# db_type: postgres
|
||||
# db_host: localhost
|
||||
# db_port: 5432
|
||||
# db_name: headscale
|
||||
# db_user: foo
|
||||
# db_pass: bar
|
||||
|
||||
### TLS configuration
|
||||
#
|
||||
## Let's encrypt / ACME
|
||||
#
|
||||
# headscale supports automatically requesting and setting up
|
||||
# TLS for a domain with Let's Encrypt.
|
||||
#
|
||||
# URL to ACME directory
|
||||
acme_url: https://acme-v02.api.letsencrypt.org/directory
|
||||
|
||||
# Email to register with ACME provider
|
||||
acme_email: ""
|
||||
|
||||
# Domain name to request a TLS certificate for:
|
||||
tls_letsencrypt_hostname: ""
|
||||
|
||||
# Path to store certificates and metadata needed by
|
||||
# letsencrypt
|
||||
tls_letsencrypt_cache_dir: /var/lib/headscale/cache
|
||||
|
||||
# Type of ACME challenge to use, currently supported types:
|
||||
# HTTP-01 or TLS_ALPN-01
|
||||
# See [docs/tls.md](docs/tls.md) for more information
|
||||
tls_letsencrypt_challenge_type: HTTP-01
|
||||
# When HTTP-01 challenge is chosen, letsencrypt must set up a
|
||||
# verification endpoint, and it will be listning on:
|
||||
# :http = port 80
|
||||
tls_letsencrypt_listen: ":http"
|
||||
|
||||
## Use already defined certificates:
|
||||
tls_cert_path: ""
|
||||
tls_key_path: ""
|
||||
|
||||
log_level: info
|
||||
|
||||
# Path to a file containg ACL policies.
|
||||
# Recommended path: /etc/headscale/acl.hujson
|
||||
acl_policy_path: ""
|
||||
|
||||
## DNS
|
||||
#
|
||||
# headscale supports Tailscale's DNS configuration and MagicDNS.
|
||||
# Please have a look to their KB to better understand the concepts:
|
||||
#
|
||||
# - https://tailscale.com/kb/1054/dns/
|
||||
# - https://tailscale.com/kb/1081/magicdns/
|
||||
# - https://tailscale.com/blog/2021-09-private-dns-with-magicdns/
|
||||
#
|
||||
dns_config:
|
||||
# List of DNS servers to expose to clients.
|
||||
nameservers:
|
||||
- 1.1.1.1
|
||||
|
||||
# Split DNS (see https://tailscale.com/kb/1054/dns/),
|
||||
# list of search domains and the DNS to query for each one.
|
||||
#
|
||||
# restricted_nameservers:
|
||||
# foo.bar.com:
|
||||
# - 1.1.1.1
|
||||
# darp.headscale.net:
|
||||
# - 1.1.1.1
|
||||
# - 8.8.8.8
|
||||
|
||||
# Search domains to inject.
|
||||
domains: []
|
||||
|
||||
# Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
|
||||
# Only works if there is at least a nameserver defined.
|
||||
magic_dns: true
|
||||
|
||||
# Defines the base domain to create the hostnames for MagicDNS.
|
||||
# `base_domain` must be a FQDNs, without the trailing dot.
|
||||
# The FQDN of the hosts will be
|
||||
# `hostname.namespace.base_domain` (e.g., _myhost.mynamespace.example.com_).
|
||||
base_domain: example.com
|
||||
|
||||
# Unix socket used for the CLI to connect without authentication
|
||||
# Note: for local development, you probably want to change this to:
|
||||
# unix_socket: ./headscale.sock
|
||||
unix_socket: /var/run/headscale.sock
|
||||
unix_socket_permission: "0770"
|
||||
#
|
||||
# headscale supports experimental OpenID connect support,
|
||||
# it is still being tested and might have some bugs, please
|
||||
# help us test it.
|
||||
# OpenID Connect
|
||||
# oidc:
|
||||
# issuer: "https://your-oidc.issuer.com/path"
|
||||
# client_id: "your-oidc-client-id"
|
||||
# client_secret: "your-oidc-client-secret"
|
||||
#
|
||||
# # Domain map is used to map incomming users (by their email) to
|
||||
# # a namespace. The key can be a string, or regex.
|
||||
# domain_map:
|
||||
# ".*": default-namespace
|
||||
@@ -1,20 +0,0 @@
|
||||
{
|
||||
"server_url": "http://127.0.0.1:8080",
|
||||
"listen_addr": "0.0.0.0:8080",
|
||||
"private_key_path": "private.key",
|
||||
"derp_map_path": "derp.yaml",
|
||||
"ephemeral_node_inactivity_timeout": "30m",
|
||||
"db_type": "postgres",
|
||||
"db_host": "localhost",
|
||||
"db_port": 5432,
|
||||
"db_name": "headscale",
|
||||
"db_user": "foo",
|
||||
"db_pass": "bar",
|
||||
"tls_letsencrypt_hostname": "",
|
||||
"tls_letsencrypt_listen": ":http",
|
||||
"tls_letsencrypt_cache_dir": ".cache",
|
||||
"tls_letsencrypt_challenge_type": "HTTP-01",
|
||||
"tls_cert_path": "",
|
||||
"tls_key_path": "",
|
||||
"acl_policy_path": ""
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
{
|
||||
"server_url": "http://127.0.0.1:8080",
|
||||
"listen_addr": "0.0.0.0:8080",
|
||||
"private_key_path": "private.key",
|
||||
"derp_map_path": "derp.yaml",
|
||||
"ephemeral_node_inactivity_timeout": "30m",
|
||||
"db_type": "sqlite3",
|
||||
"db_path": "db.sqlite",
|
||||
"tls_letsencrypt_hostname": "",
|
||||
"tls_letsencrypt_listen": ":http",
|
||||
"tls_letsencrypt_cache_dir": ".cache",
|
||||
"tls_letsencrypt_challenge_type": "HTTP-01",
|
||||
"tls_cert_path": "",
|
||||
"tls_key_path": "",
|
||||
"acl_policy_path": ""
|
||||
}
|
||||
40
db.go
40
db.go
@@ -9,7 +9,10 @@ import (
|
||||
"gorm.io/gorm/logger"
|
||||
)
|
||||
|
||||
const dbVersion = "1"
|
||||
const (
|
||||
dbVersion = "1"
|
||||
errValueNotFound = Error("not found")
|
||||
)
|
||||
|
||||
// KV is a key-value store in a psql table. For future use...
|
||||
type KV struct {
|
||||
@@ -24,7 +27,7 @@ func (h *Headscale) initDB() error {
|
||||
}
|
||||
h.db = db
|
||||
|
||||
if h.dbType == "postgres" {
|
||||
if h.dbType == Postgres {
|
||||
db.Exec("create extension if not exists \"uuid-ossp\";")
|
||||
}
|
||||
err = db.AutoMigrate(&Machine{})
|
||||
@@ -44,7 +47,13 @@ func (h *Headscale) initDB() error {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&SharedMachine{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = h.setValue("db_version", dbVersion)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -60,12 +69,12 @@ func (h *Headscale) openDB() (*gorm.DB, error) {
|
||||
}
|
||||
|
||||
switch h.dbType {
|
||||
case "sqlite3":
|
||||
case Sqlite:
|
||||
db, err = gorm.Open(sqlite.Open(h.dbString), &gorm.Config{
|
||||
DisableForeignKeyConstraintWhenMigrating: true,
|
||||
Logger: log,
|
||||
})
|
||||
case "postgres":
|
||||
case Postgres:
|
||||
db, err = gorm.Open(postgres.Open(h.dbString), &gorm.Config{
|
||||
DisableForeignKeyConstraintWhenMigrating: true,
|
||||
Logger: log,
|
||||
@@ -79,28 +88,33 @@ func (h *Headscale) openDB() (*gorm.DB, error) {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// getValue returns the value for the given key in KV
|
||||
// getValue returns the value for the given key in KV.
|
||||
func (h *Headscale) getValue(key string) (string, error) {
|
||||
var row KV
|
||||
if result := h.db.First(&row, "key = ?", key); errors.Is(result.Error, gorm.ErrRecordNotFound) {
|
||||
return "", errors.New("not found")
|
||||
if result := h.db.First(&row, "key = ?", key); errors.Is(
|
||||
result.Error,
|
||||
gorm.ErrRecordNotFound,
|
||||
) {
|
||||
return "", errValueNotFound
|
||||
}
|
||||
|
||||
return row.Value, nil
|
||||
}
|
||||
|
||||
// setValue sets value for the given key in KV
|
||||
// setValue sets value for the given key in KV.
|
||||
func (h *Headscale) setValue(key string, value string) error {
|
||||
kv := KV{
|
||||
keyValue := KV{
|
||||
Key: key,
|
||||
Value: value,
|
||||
}
|
||||
|
||||
_, err := h.getValue(key)
|
||||
if err == nil {
|
||||
h.db.Model(&kv).Where("key = ?", key).Update("value", value)
|
||||
if _, err := h.getValue(key); err == nil {
|
||||
h.db.Model(&keyValue).Where("key = ?", key).Update("value", value)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
h.db.Create(kv)
|
||||
h.db.Create(keyValue)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
15
derp-example.yaml
Normal file
15
derp-example.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
# If you plan to somehow use headscale, please deploy your own DERP infra: https://tailscale.com/kb/1118/custom-derp-servers/
|
||||
regions:
|
||||
900:
|
||||
regionid: 900
|
||||
regioncode: custom
|
||||
regionname: My Region
|
||||
nodes:
|
||||
- name: 900a
|
||||
regionid: 900
|
||||
hostname: myderp.mydomain.no
|
||||
ipv4: 123.123.123.123
|
||||
ipv6: "2604:a880:400:d1::828:b001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
164
derp.go
Normal file
164
derp.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"gopkg.in/yaml.v2"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
func loadDERPMapFromPath(path string) (*tailcfg.DERPMap, error) {
|
||||
derpFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer derpFile.Close()
|
||||
var derpMap tailcfg.DERPMap
|
||||
b, err := io.ReadAll(derpFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = yaml.Unmarshal(b, &derpMap)
|
||||
|
||||
return &derpMap, err
|
||||
}
|
||||
|
||||
func loadDERPMapFromURL(addr url.URL) (*tailcfg.DERPMap, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), HTTPReadTimeout)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", addr.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := http.Client{
|
||||
Timeout: HTTPReadTimeout,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var derpMap tailcfg.DERPMap
|
||||
err = json.Unmarshal(body, &derpMap)
|
||||
|
||||
return &derpMap, err
|
||||
}
|
||||
|
||||
// mergeDERPMaps naively merges a list of DERPMaps into a single
|
||||
// DERPMap, it will _only_ look at the Regions, an integer.
|
||||
// If a region exists in two of the given DERPMaps, the region
|
||||
// form the _last_ DERPMap will be preserved.
|
||||
// An empty DERPMap list will result in a DERPMap with no regions.
|
||||
func mergeDERPMaps(derpMaps []*tailcfg.DERPMap) *tailcfg.DERPMap {
|
||||
result := tailcfg.DERPMap{
|
||||
OmitDefaultRegions: false,
|
||||
Regions: map[int]*tailcfg.DERPRegion{},
|
||||
}
|
||||
|
||||
for _, derpMap := range derpMaps {
|
||||
for id, region := range derpMap.Regions {
|
||||
result.Regions[id] = region
|
||||
}
|
||||
}
|
||||
|
||||
return &result
|
||||
}
|
||||
|
||||
func GetDERPMap(cfg DERPConfig) *tailcfg.DERPMap {
|
||||
derpMaps := make([]*tailcfg.DERPMap, 0)
|
||||
|
||||
for _, path := range cfg.Paths {
|
||||
log.Debug().
|
||||
Str("func", "GetDERPMap").
|
||||
Str("path", path).
|
||||
Msg("Loading DERPMap from path")
|
||||
derpMap, err := loadDERPMapFromPath(path)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "GetDERPMap").
|
||||
Str("path", path).
|
||||
Err(err).
|
||||
Msg("Could not load DERP map from path")
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
derpMaps = append(derpMaps, derpMap)
|
||||
}
|
||||
|
||||
for _, addr := range cfg.URLs {
|
||||
derpMap, err := loadDERPMapFromURL(addr)
|
||||
log.Debug().
|
||||
Str("func", "GetDERPMap").
|
||||
Str("url", addr.String()).
|
||||
Msg("Loading DERPMap from path")
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "GetDERPMap").
|
||||
Str("url", addr.String()).
|
||||
Err(err).
|
||||
Msg("Could not load DERP map from path")
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
derpMaps = append(derpMaps, derpMap)
|
||||
}
|
||||
|
||||
derpMap := mergeDERPMaps(derpMaps)
|
||||
|
||||
log.Trace().Interface("derpMap", derpMap).Msg("DERPMap loaded")
|
||||
|
||||
if len(derpMap.Regions) == 0 {
|
||||
log.Warn().
|
||||
Msg("DERP map is empty, not a single DERP map datasource was loaded correctly or contained a region")
|
||||
}
|
||||
|
||||
return derpMap
|
||||
}
|
||||
|
||||
func (h *Headscale) scheduledDERPMapUpdateWorker(cancelChan <-chan struct{}) {
|
||||
log.Info().
|
||||
Dur("frequency", h.cfg.DERP.UpdateFrequency).
|
||||
Msg("Setting up a DERPMap update worker")
|
||||
ticker := time.NewTicker(h.cfg.DERP.UpdateFrequency)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-cancelChan:
|
||||
return
|
||||
|
||||
case <-ticker.C:
|
||||
log.Info().Msg("Fetching DERPMap updates")
|
||||
h.DERPMap = GetDERPMap(h.cfg.DERP)
|
||||
|
||||
namespaces, err := h.ListNamespaces()
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Msg("Failed to fetch namespaces")
|
||||
}
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
h.setLastStateChangeToNow(namespace.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
146
derp.yaml
146
derp.yaml
@@ -1,146 +0,0 @@
|
||||
# This file contains some of the official Tailscale DERP servers,
|
||||
# shamelessly taken from https://github.com/tailscale/tailscale/blob/main/net/dnsfallback/dns-fallback-servers.json
|
||||
#
|
||||
# If you plan to somehow use headscale, please deploy your own DERP infra
|
||||
regions:
|
||||
1:
|
||||
regionid: 1
|
||||
regioncode: nyc
|
||||
regionname: New York City
|
||||
nodes:
|
||||
- name: 1a
|
||||
regionid: 1
|
||||
hostname: derp1.tailscale.com
|
||||
ipv4: 159.89.225.99
|
||||
ipv6: "2604:a880:400:d1::828:b001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
- name: 1b
|
||||
regionid: 1
|
||||
hostname: derp1b.tailscale.com
|
||||
ipv4: 45.55.35.93
|
||||
ipv6: "2604:a880:800:a1::f:2001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
2:
|
||||
regionid: 2
|
||||
regioncode: sfo
|
||||
regionname: San Francisco
|
||||
nodes:
|
||||
- name: 2a
|
||||
regionid: 2
|
||||
hostname: derp2.tailscale.com
|
||||
ipv4: 167.172.206.31
|
||||
ipv6: "2604:a880:2:d1::c5:7001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
- name: 2b
|
||||
regionid: 2
|
||||
hostname: derp2b.tailscale.com
|
||||
ipv4: 64.227.106.23
|
||||
ipv6: "2604:a880:4:1d0::29:9000"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
3:
|
||||
regionid: 3
|
||||
regioncode: sin
|
||||
regionname: Singapore
|
||||
nodes:
|
||||
- name: 3a
|
||||
regionid: 3
|
||||
hostname: derp3.tailscale.com
|
||||
ipv4: 68.183.179.66
|
||||
ipv6: "2400:6180:0:d1::67d:8001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
4:
|
||||
regionid: 4
|
||||
regioncode: fra
|
||||
regionname: Frankfurt
|
||||
nodes:
|
||||
- name: 4a
|
||||
regionid: 4
|
||||
hostname: derp4.tailscale.com
|
||||
ipv4: 167.172.182.26
|
||||
ipv6: "2a03:b0c0:3:e0::36e:900"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
- name: 4b
|
||||
regionid: 4
|
||||
hostname: derp4b.tailscale.com
|
||||
ipv4: 157.230.25.0
|
||||
ipv6: "2a03:b0c0:3:e0::58f:3001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
5:
|
||||
regionid: 5
|
||||
regioncode: syd
|
||||
regionname: Sydney
|
||||
nodes:
|
||||
- name: 5a
|
||||
regionid: 5
|
||||
hostname: derp5.tailscale.com
|
||||
ipv4: 103.43.75.49
|
||||
ipv6: "2001:19f0:5801:10b7:5400:2ff:feaa:284c"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
6:
|
||||
regionid: 6
|
||||
regioncode: blr
|
||||
regionname: Bangalore
|
||||
nodes:
|
||||
- name: 6a
|
||||
regionid: 6
|
||||
hostname: derp6.tailscale.com
|
||||
ipv4: 68.183.90.120
|
||||
ipv6: "2400:6180:100:d0::982:d001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
7:
|
||||
regionid: 7
|
||||
regioncode: tok
|
||||
regionname: Tokyo
|
||||
nodes:
|
||||
- name: 7a
|
||||
regionid: 7
|
||||
hostname: derp7.tailscale.com
|
||||
ipv4: 167.179.89.145
|
||||
ipv6: "2401:c080:1000:467f:5400:2ff:feee:22aa"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
8:
|
||||
regionid: 8
|
||||
regioncode: lhr
|
||||
regionname: London
|
||||
nodes:
|
||||
- name: 8a
|
||||
regionid: 8
|
||||
hostname: derp8.tailscale.com
|
||||
ipv4: 167.71.139.179
|
||||
ipv6: "2a03:b0c0:1:e0::3cc:e001"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
9:
|
||||
regionid: 9
|
||||
regioncode: sao
|
||||
regionname: São Paulo
|
||||
nodes:
|
||||
- name: 9a
|
||||
regionid: 9
|
||||
hostname: derp9.tailscale.com
|
||||
ipv4: 207.148.3.137
|
||||
ipv6: "2001:19f0:6401:1d9c:5400:2ff:feef:bb82"
|
||||
stunport: 0
|
||||
stunonly: false
|
||||
derptestport: 0
|
||||
176
dns.go
Normal file
176
dns.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/fatih/set"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/dnsname"
|
||||
)
|
||||
|
||||
const (
|
||||
ByteSize = 8
|
||||
)
|
||||
|
||||
const (
|
||||
ipv4AddressLength = 32
|
||||
ipv6AddressLength = 128
|
||||
)
|
||||
|
||||
// generateMagicDNSRootDomains generates a list of DNS entries to be included in `Routes` in `MapResponse`.
|
||||
// This list of reverse DNS entries instructs the OS on what subnets and domains the Tailscale embedded DNS
|
||||
// server (listening in 100.100.100.100 udp/53) should be used for.
|
||||
//
|
||||
// Tailscale.com includes in the list:
|
||||
// - the `BaseDomain` of the user
|
||||
// - the reverse DNS entry for IPv6 (0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa., see below more on IPv6)
|
||||
// - the reverse DNS entries for the IPv4 subnets covered by the user's `IPPrefix`.
|
||||
// In the public SaaS this is [64-127].100.in-addr.arpa.
|
||||
//
|
||||
// The main purpose of this function is then generating the list of IPv4 entries. For the 100.64.0.0/10, this
|
||||
// is clear, and could be hardcoded. But we are allowing any range as `IPPrefix`, so we need to find out the
|
||||
// subnets when we have 172.16.0.0/16 (i.e., [0-255].16.172.in-addr.arpa.), or any other subnet.
|
||||
//
|
||||
// How IN-ADDR.ARPA domains work is defined in RFC1035 (section 3.5). Tailscale.com seems to adhere to this,
|
||||
// and do not make use of RFC2317 ("Classless IN-ADDR.ARPA delegation") - hence generating the entries for the next
|
||||
// class block only.
|
||||
|
||||
// From the netmask we can find out the wildcard bits (the bits that are not set in the netmask).
|
||||
// This allows us to then calculate the subnets included in the subsequent class block and generate the entries.
|
||||
func generateMagicDNSRootDomains(ipPrefixes []netaddr.IPPrefix) []dnsname.FQDN {
|
||||
fqdns := make([]dnsname.FQDN, 0, len(ipPrefixes))
|
||||
for _, ipPrefix := range ipPrefixes {
|
||||
var generateDNSRoot func(netaddr.IPPrefix) []dnsname.FQDN
|
||||
switch ipPrefix.IP().BitLen() {
|
||||
case ipv4AddressLength:
|
||||
generateDNSRoot = generateIPv4DNSRootDomain
|
||||
|
||||
case ipv6AddressLength:
|
||||
generateDNSRoot = generateIPv6DNSRootDomain
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported IP version with address length %d", ipPrefix.IP().BitLen()))
|
||||
}
|
||||
|
||||
fqdns = append(fqdns, generateDNSRoot(ipPrefix)...)
|
||||
}
|
||||
|
||||
return fqdns
|
||||
}
|
||||
|
||||
func generateIPv4DNSRootDomain(ipPrefix netaddr.IPPrefix) []dnsname.FQDN {
|
||||
// Conversion to the std lib net.IPnet, a bit easier to operate
|
||||
netRange := ipPrefix.IPNet()
|
||||
maskBits, _ := netRange.Mask.Size()
|
||||
|
||||
// lastOctet is the last IP byte covered by the mask
|
||||
lastOctet := maskBits / ByteSize
|
||||
|
||||
// wildcardBits is the number of bits not under the mask in the lastOctet
|
||||
wildcardBits := ByteSize - maskBits%ByteSize
|
||||
|
||||
// min is the value in the lastOctet byte of the IP
|
||||
// max is basically 2^wildcardBits - i.e., the value when all the wildcardBits are set to 1
|
||||
min := uint(netRange.IP[lastOctet])
|
||||
max := (min + 1<<uint(wildcardBits)) - 1
|
||||
|
||||
// here we generate the base domain (e.g., 100.in-addr.arpa., 16.172.in-addr.arpa., etc.)
|
||||
rdnsSlice := []string{}
|
||||
for i := lastOctet - 1; i >= 0; i-- {
|
||||
rdnsSlice = append(rdnsSlice, fmt.Sprintf("%d", netRange.IP[i]))
|
||||
}
|
||||
rdnsSlice = append(rdnsSlice, "in-addr.arpa.")
|
||||
rdnsBase := strings.Join(rdnsSlice, ".")
|
||||
|
||||
fqdns := make([]dnsname.FQDN, 0, max-min+1)
|
||||
for i := min; i <= max; i++ {
|
||||
fqdn, err := dnsname.ToFQDN(fmt.Sprintf("%d.%s", i, rdnsBase))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
fqdns = append(fqdns, fqdn)
|
||||
}
|
||||
|
||||
return fqdns
|
||||
}
|
||||
|
||||
func generateIPv6DNSRootDomain(ipPrefix netaddr.IPPrefix) []dnsname.FQDN {
|
||||
const nibbleLen = 4
|
||||
|
||||
maskBits, _ := ipPrefix.IPNet().Mask.Size()
|
||||
expanded := ipPrefix.IP().StringExpanded()
|
||||
nibbleStr := strings.Map(func(r rune) rune {
|
||||
if r == ':' {
|
||||
return -1
|
||||
}
|
||||
|
||||
return r
|
||||
}, expanded)
|
||||
|
||||
// TODO?: that does not look the most efficient implementation,
|
||||
// but the inputs are not so long as to cause problems,
|
||||
// and from what I can see, the generateMagicDNSRootDomains
|
||||
// function is called only once over the lifetime of a server process.
|
||||
prefixConstantParts := []string{}
|
||||
for i := 0; i < maskBits/nibbleLen; i++ {
|
||||
prefixConstantParts = append([]string{string(nibbleStr[i])}, prefixConstantParts...)
|
||||
}
|
||||
|
||||
makeDomain := func(variablePrefix ...string) (dnsname.FQDN, error) {
|
||||
prefix := strings.Join(append(variablePrefix, prefixConstantParts...), ".")
|
||||
|
||||
return dnsname.ToFQDN(fmt.Sprintf("%s.ip6.arpa", prefix))
|
||||
}
|
||||
|
||||
var fqdns []dnsname.FQDN
|
||||
if maskBits%4 == 0 {
|
||||
dom, _ := makeDomain()
|
||||
fqdns = append(fqdns, dom)
|
||||
} else {
|
||||
domCount := 1 << (maskBits % nibbleLen)
|
||||
fqdns = make([]dnsname.FQDN, 0, domCount)
|
||||
for i := 0; i < domCount; i++ {
|
||||
varNibble := fmt.Sprintf("%x", i)
|
||||
dom, err := makeDomain(varNibble)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
fqdns = append(fqdns, dom)
|
||||
}
|
||||
}
|
||||
|
||||
return fqdns
|
||||
}
|
||||
|
||||
func getMapResponseDNSConfig(
|
||||
dnsConfigOrig *tailcfg.DNSConfig,
|
||||
baseDomain string,
|
||||
machine Machine,
|
||||
peers Machines,
|
||||
) *tailcfg.DNSConfig {
|
||||
var dnsConfig *tailcfg.DNSConfig
|
||||
if dnsConfigOrig != nil && dnsConfigOrig.Proxied { // if MagicDNS is enabled
|
||||
// Only inject the Search Domain of the current namespace - shared nodes should use their full FQDN
|
||||
dnsConfig = dnsConfigOrig.Clone()
|
||||
dnsConfig.Domains = append(
|
||||
dnsConfig.Domains,
|
||||
fmt.Sprintf("%s.%s", machine.Namespace.Name, baseDomain),
|
||||
)
|
||||
|
||||
namespaceSet := set.New(set.ThreadSafe)
|
||||
namespaceSet.Add(machine.Namespace)
|
||||
for _, p := range peers {
|
||||
namespaceSet.Add(p.Namespace)
|
||||
}
|
||||
for _, namespace := range namespaceSet.List() {
|
||||
dnsRoute := fmt.Sprintf("%s.%s", namespace.(Namespace).Name, baseDomain)
|
||||
dnsConfig.Routes[dnsRoute] = nil
|
||||
}
|
||||
} else {
|
||||
dnsConfig = dnsConfigOrig
|
||||
}
|
||||
|
||||
return dnsConfig
|
||||
}
|
||||
395
dns_test.go
Normal file
395
dns_test.go
Normal file
@@ -0,0 +1,395 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
)
|
||||
|
||||
func (s *Suite) TestMagicDNSRootDomains100(c *check.C) {
|
||||
prefixes := []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("100.64.0.0/10"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
found := false
|
||||
for _, domain := range domains {
|
||||
if domain == "64.100.in-addr.arpa." {
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(found, check.Equals, true)
|
||||
|
||||
found = false
|
||||
for _, domain := range domains {
|
||||
if domain == "100.100.in-addr.arpa." {
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(found, check.Equals, true)
|
||||
|
||||
found = false
|
||||
for _, domain := range domains {
|
||||
if domain == "127.100.in-addr.arpa." {
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(found, check.Equals, true)
|
||||
}
|
||||
|
||||
func (s *Suite) TestMagicDNSRootDomains172(c *check.C) {
|
||||
prefixes := []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("172.16.0.0/16"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
found := false
|
||||
for _, domain := range domains {
|
||||
if domain == "0.16.172.in-addr.arpa." {
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(found, check.Equals, true)
|
||||
|
||||
found = false
|
||||
for _, domain := range domains {
|
||||
if domain == "255.16.172.in-addr.arpa." {
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(found, check.Equals, true)
|
||||
}
|
||||
|
||||
// Happens when netmask is a multiple of 4 bits (sounds likely).
|
||||
func (s *Suite) TestMagicDNSRootDomainsIPv6Single(c *check.C) {
|
||||
prefixes := []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/48"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
c.Assert(len(domains), check.Equals, 1)
|
||||
c.Assert(domains[0].WithTrailingDot(), check.Equals, "0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa.")
|
||||
}
|
||||
|
||||
func (s *Suite) TestMagicDNSRootDomainsIPv6SingleMultiple(c *check.C) {
|
||||
prefixes := []netaddr.IPPrefix{
|
||||
netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/50"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
yieldsRoot := func(dom string) bool {
|
||||
for _, candidate := range domains {
|
||||
if candidate.WithTrailingDot() == dom {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
c.Assert(len(domains), check.Equals, 4)
|
||||
c.Assert(yieldsRoot("0.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa."), check.Equals, true)
|
||||
c.Assert(yieldsRoot("1.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa."), check.Equals, true)
|
||||
c.Assert(yieldsRoot("2.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa."), check.Equals, true)
|
||||
c.Assert(yieldsRoot("3.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa."), check.Equals, true)
|
||||
}
|
||||
|
||||
func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
namespaceShared1, err := app.CreateNamespace("shared1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
namespaceShared2, err := app.CreateNamespace("shared2")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
namespaceShared3, err := app.CreateNamespace("shared3")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKeyInShared1, err := app.CreatePreAuthKey(
|
||||
namespaceShared1.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKeyInShared2, err := app.CreatePreAuthKey(
|
||||
namespaceShared2.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKeyInShared3, err := app.CreatePreAuthKey(
|
||||
namespaceShared3.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
PreAuthKey2InShared1, err := app.CreatePreAuthKey(
|
||||
namespaceShared1.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared1.Name, "test_get_shared_nodes_1")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
machineInShared1 := &Machine{
|
||||
ID: 1,
|
||||
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
DiscoKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
Name: "test_get_shared_nodes_1",
|
||||
NamespaceID: namespaceShared1.ID,
|
||||
Namespace: *namespaceShared1,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.1")},
|
||||
AuthKeyID: uint(preAuthKeyInShared1.ID),
|
||||
}
|
||||
app.db.Save(machineInShared1)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared1.Name, machineInShared1.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared2 := &Machine{
|
||||
ID: 2,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_2",
|
||||
NamespaceID: namespaceShared2.ID,
|
||||
Namespace: *namespaceShared2,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.2")},
|
||||
AuthKeyID: uint(preAuthKeyInShared2.ID),
|
||||
}
|
||||
app.db.Save(machineInShared2)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared2.Name, machineInShared2.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared3 := &Machine{
|
||||
ID: 3,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_3",
|
||||
NamespaceID: namespaceShared3.ID,
|
||||
Namespace: *namespaceShared3,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.3")},
|
||||
AuthKeyID: uint(preAuthKeyInShared3.ID),
|
||||
}
|
||||
app.db.Save(machineInShared3)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared3.Name, machineInShared3.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machine2InShared1 := &Machine{
|
||||
ID: 4,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_4",
|
||||
NamespaceID: namespaceShared1.ID,
|
||||
Namespace: *namespaceShared1,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.4")},
|
||||
AuthKeyID: uint(PreAuthKey2InShared1.ID),
|
||||
}
|
||||
app.db.Save(machine2InShared1)
|
||||
|
||||
err = app.AddSharedMachineToNamespace(machineInShared2, namespaceShared1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
baseDomain := "foobar.headscale.net"
|
||||
dnsConfigOrig := tailcfg.DNSConfig{
|
||||
Routes: make(map[string][]dnstype.Resolver),
|
||||
Domains: []string{baseDomain},
|
||||
Proxied: true,
|
||||
}
|
||||
|
||||
peersOfMachineInShared1, err := app.getPeers(machineInShared1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
dnsConfig := getMapResponseDNSConfig(
|
||||
&dnsConfigOrig,
|
||||
baseDomain,
|
||||
*machineInShared1,
|
||||
peersOfMachineInShared1,
|
||||
)
|
||||
c.Assert(dnsConfig, check.NotNil)
|
||||
c.Assert(len(dnsConfig.Routes), check.Equals, 2)
|
||||
|
||||
domainRouteShared1 := fmt.Sprintf("%s.%s", namespaceShared1.Name, baseDomain)
|
||||
_, ok := dnsConfig.Routes[domainRouteShared1]
|
||||
c.Assert(ok, check.Equals, true)
|
||||
|
||||
domainRouteShared2 := fmt.Sprintf("%s.%s", namespaceShared2.Name, baseDomain)
|
||||
_, ok = dnsConfig.Routes[domainRouteShared2]
|
||||
c.Assert(ok, check.Equals, true)
|
||||
|
||||
domainRouteShared3 := fmt.Sprintf("%s.%s", namespaceShared3.Name, baseDomain)
|
||||
_, ok = dnsConfig.Routes[domainRouteShared3]
|
||||
c.Assert(ok, check.Equals, false)
|
||||
}
|
||||
|
||||
func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
namespaceShared1, err := app.CreateNamespace("shared1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
namespaceShared2, err := app.CreateNamespace("shared2")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
namespaceShared3, err := app.CreateNamespace("shared3")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKeyInShared1, err := app.CreatePreAuthKey(
|
||||
namespaceShared1.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKeyInShared2, err := app.CreatePreAuthKey(
|
||||
namespaceShared2.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKeyInShared3, err := app.CreatePreAuthKey(
|
||||
namespaceShared3.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKey2InShared1, err := app.CreatePreAuthKey(
|
||||
namespaceShared1.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared1.Name, "test_get_shared_nodes_1")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
machineInShared1 := &Machine{
|
||||
ID: 1,
|
||||
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
DiscoKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
Name: "test_get_shared_nodes_1",
|
||||
NamespaceID: namespaceShared1.ID,
|
||||
Namespace: *namespaceShared1,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.1")},
|
||||
AuthKeyID: uint(preAuthKeyInShared1.ID),
|
||||
}
|
||||
app.db.Save(machineInShared1)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared1.Name, machineInShared1.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared2 := &Machine{
|
||||
ID: 2,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_2",
|
||||
NamespaceID: namespaceShared2.ID,
|
||||
Namespace: *namespaceShared2,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.2")},
|
||||
AuthKeyID: uint(preAuthKeyInShared2.ID),
|
||||
}
|
||||
app.db.Save(machineInShared2)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared2.Name, machineInShared2.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared3 := &Machine{
|
||||
ID: 3,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_3",
|
||||
NamespaceID: namespaceShared3.ID,
|
||||
Namespace: *namespaceShared3,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.3")},
|
||||
AuthKeyID: uint(preAuthKeyInShared3.ID),
|
||||
}
|
||||
app.db.Save(machineInShared3)
|
||||
|
||||
_, err = app.GetMachine(namespaceShared3.Name, machineInShared3.Name)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machine2InShared1 := &Machine{
|
||||
ID: 4,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Name: "test_get_shared_nodes_4",
|
||||
NamespaceID: namespaceShared1.ID,
|
||||
Namespace: *namespaceShared1,
|
||||
Registered: true,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netaddr.IP{netaddr.MustParseIP("100.64.0.4")},
|
||||
AuthKeyID: uint(preAuthKey2InShared1.ID),
|
||||
}
|
||||
app.db.Save(machine2InShared1)
|
||||
|
||||
err = app.AddSharedMachineToNamespace(machineInShared2, namespaceShared1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
baseDomain := "foobar.headscale.net"
|
||||
dnsConfigOrig := tailcfg.DNSConfig{
|
||||
Routes: make(map[string][]dnstype.Resolver),
|
||||
Domains: []string{baseDomain},
|
||||
Proxied: false,
|
||||
}
|
||||
|
||||
peersOfMachine1Shared1, err := app.getPeers(machineInShared1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
dnsConfig := getMapResponseDNSConfig(
|
||||
&dnsConfigOrig,
|
||||
baseDomain,
|
||||
*machineInShared1,
|
||||
peersOfMachine1Shared1,
|
||||
)
|
||||
c.Assert(dnsConfig, check.NotNil)
|
||||
c.Assert(len(dnsConfig.Routes), check.Equals, 0)
|
||||
c.Assert(len(dnsConfig.Domains), check.Equals, 1)
|
||||
}
|
||||
42
docs/README.md
Normal file
42
docs/README.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# headscale documentation
|
||||
|
||||
This page contains the official and community contributed documentation for `headscale`.
|
||||
|
||||
If you are having trouble with following the documentation or get unexpected results,
|
||||
please ask on [Discord](https://discord.gg/XcQxk2VHjx) instead of opening an Issue.
|
||||
|
||||
## Official documentation
|
||||
|
||||
### How-to
|
||||
|
||||
- [Running headscale on Linux](running-headscale-linux.md)
|
||||
|
||||
### References
|
||||
|
||||
- [Configuration](../config-example.yaml)
|
||||
- [Glossary](glossary.md)
|
||||
- [TLS](tls.md)
|
||||
|
||||
## Community documentation
|
||||
|
||||
Community documentation is not actively maintained by the headscale authors and is
|
||||
written by community members. It is _not_ verified by `headscale` developers.
|
||||
|
||||
**It might be outdated and it might miss necessary steps**.
|
||||
|
||||
- [Running headscale in a container](running-headscale-container.md)
|
||||
|
||||
## Misc
|
||||
|
||||
### Policy ACLs
|
||||
|
||||
Headscale implements the same policy ACLs as Tailscale.com, adapted to the self-hosted environment.
|
||||
|
||||
For instance, instead of referring to users when defining groups you must
|
||||
use namespaces (which are the equivalent to user/logins in Tailscale.com).
|
||||
|
||||
Please check https://tailscale.com/kb/1018/acls/, and `./tests/acls/` in this repo for working examples.
|
||||
|
||||
### Apple devices
|
||||
|
||||
An endpoint with information on how to connect your Apple devices (currently macOS only) is available at `/apple` on your running instance.
|
||||
5
docs/examples/README.md
Normal file
5
docs/examples/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Examples
|
||||
|
||||
This directory contains examples on how to run `headscale` on different platforms.
|
||||
|
||||
All examples are provided by the community and they are not verified by the `headscale` authors.
|
||||
@@ -1,7 +1,9 @@
|
||||
# Deploying Headscale on Kubernetes
|
||||
# Deploying headscale on Kubernetes
|
||||
|
||||
**Note:** This is contributed by the community and not verified by the headscale authors.
|
||||
|
||||
This directory contains [Kustomize](https://kustomize.io) templates that deploy
|
||||
Headscale in various configurations.
|
||||
headscale in various configurations.
|
||||
|
||||
These templates currently support Rancher k3s. Other clusters may require
|
||||
adaptation, especially around volume claims and ingress.
|
||||
@@ -24,6 +26,7 @@ Configure DERP servers by editing `base/site/derp.yaml` if needed.
|
||||
You'll somehow need to get `headscale:latest` into your cluster image registry.
|
||||
|
||||
An easy way to do this with k3s:
|
||||
|
||||
- Reconfigure k3s to use docker instead of containerd (`k3s server --docker`)
|
||||
- `docker build -t headscale:latest ..` from here
|
||||
|
||||
@@ -61,21 +64,21 @@ Use the wrapper script to remotely operate headscale to perform administrative
|
||||
tasks like creating namespaces, authkeys, etc.
|
||||
|
||||
```
|
||||
[c@nix-slate:~/Projects/headscale/k8s]$ ./headscale.bash
|
||||
[c@nix-slate:~/Projects/headscale/k8s]$ ./headscale.bash
|
||||
|
||||
headscale is an open source implementation of the Tailscale control server
|
||||
|
||||
https://gitlab.com/juanfont/headscale
|
||||
https://github.com/juanfont/headscale
|
||||
|
||||
Usage:
|
||||
headscale [command]
|
||||
|
||||
Available Commands:
|
||||
help Help about any command
|
||||
namespace Manage the namespaces of Headscale
|
||||
node Manage the nodes of Headscale
|
||||
preauthkey Handle the preauthkeys in Headscale
|
||||
routes Manage the routes of Headscale
|
||||
namespace Manage the namespaces of headscale
|
||||
node Manage the nodes of headscale
|
||||
preauthkey Handle the preauthkeys in headscale
|
||||
routes Manage the routes of headscale
|
||||
serve Launches the headscale server
|
||||
version Print the version.
|
||||
|
||||
@@ -89,7 +92,6 @@ Use "headscale [command] --help" for more information about a command.
|
||||
|
||||
# TODO / Ideas
|
||||
|
||||
- Github action to publish the docker image
|
||||
- Interpolate `email:` option to the ClusterIssuer from site configuration.
|
||||
This probably needs to be done with a transformer, kustomize vars don't seem to work.
|
||||
- Add kustomize examples for cloud-native ingress, load balancer
|
||||
18
docs/examples/kustomize/base/ingress.yaml
Normal file
18
docs/examples/kustomize/base/ingress.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: headscale
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
spec:
|
||||
rules:
|
||||
- host: $(PUBLIC_HOSTNAME)
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: headscale
|
||||
port:
|
||||
number: 8080
|
||||
path: /
|
||||
pathType: Prefix
|
||||
42
docs/examples/kustomize/base/kustomization.yaml
Normal file
42
docs/examples/kustomize/base/kustomization.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
namespace: headscale
|
||||
resources:
|
||||
- configmap.yaml
|
||||
- ingress.yaml
|
||||
- service.yaml
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true
|
||||
configMapGenerator:
|
||||
- name: headscale-site
|
||||
files:
|
||||
- derp.yaml=site/derp.yaml
|
||||
envs:
|
||||
- site/public.env
|
||||
- name: headscale-etc
|
||||
literals:
|
||||
- config.json={}
|
||||
secretGenerator:
|
||||
- name: headscale
|
||||
files:
|
||||
- secrets/private-key
|
||||
vars:
|
||||
- name: PUBLIC_PROTO
|
||||
objRef:
|
||||
kind: ConfigMap
|
||||
name: headscale-site
|
||||
apiVersion: v1
|
||||
fieldRef:
|
||||
fieldPath: data.public-proto
|
||||
- name: PUBLIC_HOSTNAME
|
||||
objRef:
|
||||
kind: ConfigMap
|
||||
name: headscale-site
|
||||
apiVersion: v1
|
||||
fieldRef:
|
||||
fieldPath: data.public-hostname
|
||||
- name: CONTACT_EMAIL
|
||||
objRef:
|
||||
kind: ConfigMap
|
||||
name: headscale-site
|
||||
apiVersion: v1
|
||||
fieldRef:
|
||||
fieldPath: data.contact-email
|
||||
@@ -8,6 +8,6 @@ spec:
|
||||
selector:
|
||||
app: headscale
|
||||
ports:
|
||||
- name: http
|
||||
targetPort: http
|
||||
port: 8080
|
||||
- name: http
|
||||
targetPort: http
|
||||
port: 8080
|
||||
76
docs/examples/kustomize/postgres/deployment.yaml
Normal file
76
docs/examples/kustomize/postgres/deployment.yaml
Normal file
@@ -0,0 +1,76 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: headscale
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: headscale
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: headscale
|
||||
spec:
|
||||
containers:
|
||||
- name: headscale
|
||||
image: "headscale:latest"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["/go/bin/headscale", "serve"]
|
||||
env:
|
||||
- name: SERVER_URL
|
||||
value: $(PUBLIC_PROTO)://$(PUBLIC_HOSTNAME)
|
||||
- name: LISTEN_ADDR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: listen_addr
|
||||
- name: DERP_MAP_PATH
|
||||
value: /vol/config/derp.yaml
|
||||
- name: EPHEMERAL_NODE_INACTIVITY_TIMEOUT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: ephemeral_node_inactivity_timeout
|
||||
- name: DB_TYPE
|
||||
value: postgres
|
||||
- name: DB_HOST
|
||||
value: postgres.headscale.svc.cluster.local
|
||||
- name: DB_PORT
|
||||
value: "5432"
|
||||
- name: DB_USER
|
||||
value: headscale
|
||||
- name: DB_PASS
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: postgresql
|
||||
key: password
|
||||
- name: DB_NAME
|
||||
value: headscale
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
containerPort: 8080
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 15
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /vol/config
|
||||
- name: secret
|
||||
mountPath: /vol/secret
|
||||
- name: etc
|
||||
mountPath: /etc/headscale
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: headscale-site
|
||||
- name: etc
|
||||
configMap:
|
||||
name: headscale-etc
|
||||
- name: secret
|
||||
secret:
|
||||
secretName: headscale
|
||||
13
docs/examples/kustomize/postgres/kustomization.yaml
Normal file
13
docs/examples/kustomize/postgres/kustomization.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
namespace: headscale
|
||||
bases:
|
||||
- ../base
|
||||
resources:
|
||||
- deployment.yaml
|
||||
- postgres-service.yaml
|
||||
- postgres-statefulset.yaml
|
||||
generatorOptions:
|
||||
disableNameSuffixHash: true
|
||||
secretGenerator:
|
||||
- name: postgresql
|
||||
files:
|
||||
- secrets/password
|
||||
@@ -8,6 +8,6 @@ spec:
|
||||
selector:
|
||||
app: postgres
|
||||
ports:
|
||||
- name: postgres
|
||||
targetPort: postgres
|
||||
port: 5432
|
||||
- name: postgres
|
||||
targetPort: postgres
|
||||
port: 5432
|
||||
49
docs/examples/kustomize/postgres/postgres-statefulset.yaml
Normal file
49
docs/examples/kustomize/postgres/postgres-statefulset.yaml
Normal file
@@ -0,0 +1,49 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: postgres
|
||||
spec:
|
||||
serviceName: postgres
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: postgres
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
containers:
|
||||
- name: postgres
|
||||
image: "postgres:13"
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: POSTGRES_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: postgresql
|
||||
key: password
|
||||
- name: POSTGRES_USER
|
||||
value: headscale
|
||||
ports:
|
||||
- name: postgres
|
||||
protocol: TCP
|
||||
containerPort: 5432
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: 5432
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 15
|
||||
volumeMounts:
|
||||
- name: pgdata
|
||||
mountPath: /var/lib/postgresql/data
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: pgdata
|
||||
spec:
|
||||
storageClassName: local-path
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
@@ -6,6 +6,6 @@ metadata:
|
||||
traefik.ingress.kubernetes.io/router.tls: "true"
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- $(PUBLIC_HOSTNAME)
|
||||
secretName: production-cert
|
||||
- hosts:
|
||||
- $(PUBLIC_HOSTNAME)
|
||||
secretName: production-cert
|
||||
@@ -0,0 +1,9 @@
|
||||
namespace: headscale
|
||||
bases:
|
||||
- ../base
|
||||
resources:
|
||||
- production-issuer.yaml
|
||||
patches:
|
||||
- path: ingress-patch.yaml
|
||||
target:
|
||||
kind: Ingress
|
||||
@@ -11,6 +11,6 @@ spec:
|
||||
# Secret resource used to store the account's private key.
|
||||
name: letsencrypt-production-acc-key
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
@@ -1,5 +1,5 @@
|
||||
namespace: headscale
|
||||
bases:
|
||||
- ../base
|
||||
- ../base
|
||||
resources:
|
||||
- statefulset.yaml
|
||||
- statefulset.yaml
|
||||
77
docs/examples/kustomize/sqlite/statefulset.yaml
Normal file
77
docs/examples/kustomize/sqlite/statefulset.yaml
Normal file
@@ -0,0 +1,77 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: headscale
|
||||
spec:
|
||||
serviceName: headscale
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: headscale
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: headscale
|
||||
spec:
|
||||
containers:
|
||||
- name: headscale
|
||||
image: "headscale:latest"
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["/go/bin/headscale", "serve"]
|
||||
env:
|
||||
- name: SERVER_URL
|
||||
value: $(PUBLIC_PROTO)://$(PUBLIC_HOSTNAME)
|
||||
- name: LISTEN_ADDR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: listen_addr
|
||||
- name: DERP_MAP_PATH
|
||||
value: /vol/config/derp.yaml
|
||||
- name: EPHEMERAL_NODE_INACTIVITY_TIMEOUT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: headscale-config
|
||||
key: ephemeral_node_inactivity_timeout
|
||||
- name: DB_TYPE
|
||||
value: sqlite3
|
||||
- name: DB_PATH
|
||||
value: /vol/data/db.sqlite
|
||||
ports:
|
||||
- name: http
|
||||
protocol: TCP
|
||||
containerPort: 8080
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
periodSeconds: 15
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /vol/config
|
||||
- name: data
|
||||
mountPath: /vol/data
|
||||
- name: secret
|
||||
mountPath: /vol/secret
|
||||
- name: etc
|
||||
mountPath: /etc/headscale
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: headscale-site
|
||||
- name: etc
|
||||
configMap:
|
||||
name: headscale-etc
|
||||
- name: secret
|
||||
secret:
|
||||
secretName: headscale
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
storageClassName: local-path
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
@@ -6,6 +6,6 @@ metadata:
|
||||
traefik.ingress.kubernetes.io/router.tls: "true"
|
||||
spec:
|
||||
tls:
|
||||
- hosts:
|
||||
- $(PUBLIC_HOSTNAME)
|
||||
secretName: staging-cert
|
||||
- hosts:
|
||||
- $(PUBLIC_HOSTNAME)
|
||||
secretName: staging-cert
|
||||
9
docs/examples/kustomize/staging-tls/kustomization.yaml
Normal file
9
docs/examples/kustomize/staging-tls/kustomization.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
namespace: headscale
|
||||
bases:
|
||||
- ../base
|
||||
resources:
|
||||
- staging-issuer.yaml
|
||||
patches:
|
||||
- path: ingress-patch.yaml
|
||||
target:
|
||||
kind: Ingress
|
||||
@@ -11,6 +11,6 @@ spec:
|
||||
# Secret resource used to store the account's private key.
|
||||
name: letsencrypt-staging-acc-key
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
3
docs/glossary.md
Normal file
3
docs/glossary.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Glossary
|
||||
|
||||
- Namespace: Collection of Tailscale nodes that can see each other. In Tailscale.com this is called Tailnet.
|
||||
148
docs/running-headscale-container.md
Normal file
148
docs/running-headscale-container.md
Normal file
@@ -0,0 +1,148 @@
|
||||
# Running headscale in a container
|
||||
|
||||
**Note:** the container documentation is maintained by the _community_ and there is no guarentee
|
||||
it is up to date, or working.
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing a user how-to set up and run `headscale` in a container.
|
||||
[Docker](https://www.docker.com) is used as the reference container implementation, but there is no reason that it should
|
||||
not work with alternatives like [Podman](https://podman.io). The Docker image can be found on Docker Hub [here](https://hub.docker.com/r/headscale/headscale).
|
||||
|
||||
## Configure and run `headscale`
|
||||
|
||||
1. Prepare a directory on the host Docker node in your directory of choice, used to hold `headscale` configuration and the [SQLite](https://www.sqlite.org/) database:
|
||||
|
||||
```shell
|
||||
mkdir ./headscale && cd ./headscale
|
||||
mkdir ./config
|
||||
```
|
||||
|
||||
2. Create an empty SQlite datebase in the headscale directory:
|
||||
|
||||
```shell
|
||||
touch ./config/db.sqlite
|
||||
```
|
||||
|
||||
3. **(Strongly Recommended)** Download a copy of the [example configuration](../config-example.yaml) from the [headscale repository](https://github.com/juanfont/headscale/).
|
||||
|
||||
Using wget:
|
||||
|
||||
```shell
|
||||
wget -O ./config/config.yaml https://raw.githubusercontent.com/juanfont/headscale/main/config-example.yaml
|
||||
```
|
||||
|
||||
Using curl:
|
||||
|
||||
```shell
|
||||
curl https://raw.githubusercontent.com/juanfont/headscale/main/config-example.yaml -o ./config/config.yaml
|
||||
```
|
||||
|
||||
**(Advanced)** If you would like to hand craft a config file **instead** of downloading the example config file, create a blank `headscale` configuration in the headscale directory to edit:
|
||||
|
||||
```shell
|
||||
touch ./config/config.yaml
|
||||
```
|
||||
|
||||
Modify the config file to your preferences before launching Docker container.
|
||||
|
||||
4. Start the headscale server while working in the host headscale directory:
|
||||
|
||||
```shell
|
||||
docker run \
|
||||
--name headscale \
|
||||
--detach \
|
||||
--rm \
|
||||
--volume $(pwd)/config:/etc/headscale/ \
|
||||
--publish 127.0.0.1:8080:8080 \
|
||||
headscale/headscale:<VERSION> \
|
||||
headscale serve
|
||||
|
||||
```
|
||||
|
||||
This command will mount `config/` under `/etc/headscale`, forward port 8080 out of the container so the
|
||||
`headscale` instance becomes available and then detach so headscale runs in the background.
|
||||
|
||||
5. Verify `headscale` is running:
|
||||
|
||||
Follow the container logs:
|
||||
|
||||
```shell
|
||||
docker logs --follow headscale
|
||||
```
|
||||
|
||||
Verify running containers:
|
||||
|
||||
```shell
|
||||
docker ps
|
||||
```
|
||||
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:8080/metrics
|
||||
```
|
||||
|
||||
6. Create a namespace ([tailnet](https://tailscale.com/kb/1136/tailnet/)):
|
||||
|
||||
```shell
|
||||
docker exec headscale -- headscale namespaces create myfirstnamespace
|
||||
```
|
||||
|
||||
### Register a machine (normal login)
|
||||
|
||||
On a client machine, execute the `tailscale` login command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server YOUR_HEADSCALE_URL
|
||||
```
|
||||
|
||||
To register a machine when running `headscale` in a container, take the headscale command and pass it to the container:
|
||||
|
||||
```shell
|
||||
docker exec headscale -- \
|
||||
headscale --namespace myfirstnamespace nodes register --key <YOU_+MACHINE_KEY>
|
||||
```
|
||||
|
||||
### Register machine using a pre authenticated key
|
||||
|
||||
Generate a key using the command line:
|
||||
|
||||
```shell
|
||||
docker exec headscale -- \
|
||||
headscale --namespace myfirstnamespace preauthkeys create --reusable --expiration 24h
|
||||
```
|
||||
|
||||
This will return a pre-authenticated key that can be used to connect a node to `headscale` during the `tailscale` command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
## Debugging headscale running in Docker
|
||||
|
||||
The `headscale/headscale` Docker container is based on a "distroless" image that does not contain a shell or any other debug tools. If you need to debug your application running in the Docker container, you can use the `-debug` variant, for example `headscale/headscale:x.x.x-debug`.
|
||||
|
||||
### Running the debug Docker container
|
||||
|
||||
To run the debug Docker container, use the exact same commands as above, but replace `headscale/headscale:x.x.x` with `headscale/headscale:x.x.x-debug` (`x.x.x` is the version of headscale). The two containers are compatible with each other, so you can alternate between them.
|
||||
|
||||
### Executing commands in the debug container
|
||||
|
||||
The default command in the debug container is to run `headscale`, which is located at `/bin/headscale` inside the container.
|
||||
|
||||
Additionally, the debug container includes a minimalist Busybox shell.
|
||||
|
||||
To launch a shell in the container, use:
|
||||
|
||||
```
|
||||
docker run -it headscale/headscale:x.x.x-debug sh
|
||||
```
|
||||
|
||||
You can also execute commands directly, such as `ls /bin` in this example:
|
||||
|
||||
```
|
||||
docker run headscale/headscale:x.x.x-debug ls /bin
|
||||
```
|
||||
|
||||
Using `docker exec` allows you to run commands in an existing container.
|
||||
172
docs/running-headscale-linux.md
Normal file
172
docs/running-headscale-linux.md
Normal file
@@ -0,0 +1,172 @@
|
||||
# Running headscale on Linux
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing a user how-to set up and run `headscale` on Linux.
|
||||
In additional to the "get up and running section", there is an optional [SystemD section](#running-headscale-in-the-background-with-systemd)
|
||||
describing how to make `headscale` run properly in a server environment.
|
||||
|
||||
## Configure and run `headscale`
|
||||
|
||||
1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases):
|
||||
|
||||
```shell
|
||||
wget --output-document=/usr/local/bin/headscale \
|
||||
https://github.com/juanfont/headscale/releases/download/v<HEADSCALE VERSION>/headscale_<HEADSCALE VERSION>_linux_<ARCH>
|
||||
```
|
||||
|
||||
2. Make `headscale` executable:
|
||||
|
||||
```shell
|
||||
chmod +x /usr/local/bin/headscale
|
||||
```
|
||||
|
||||
3. Prepare a directory to hold `headscale` configuration and the [SQLite](https://www.sqlite.org/) database:
|
||||
|
||||
```shell
|
||||
# Directory for configuration
|
||||
|
||||
mkdir -p /etc/headscale
|
||||
|
||||
# Directory for Database, and other variable data (like certificates)
|
||||
mkdir -p /var/lib/headscale
|
||||
```
|
||||
|
||||
4. Create an empty SQLite database:
|
||||
|
||||
```shell
|
||||
touch /var/lib/headscale/db.sqlite
|
||||
```
|
||||
|
||||
5. Create a `headscale` configuration:
|
||||
|
||||
```shell
|
||||
touch /etc/headscale/config.yaml
|
||||
```
|
||||
|
||||
It is **strongly recommended** to copy and modify the [example configuration](../config-example.yaml)
|
||||
from the [headscale repository](../)
|
||||
|
||||
6. Start the headscale server:
|
||||
|
||||
```shell
|
||||
headscale serve
|
||||
```
|
||||
|
||||
This command will start `headscale` in the current terminal session.
|
||||
|
||||
---
|
||||
|
||||
To continue the tutorial, open a new terminal and let it run in the background.
|
||||
Alternatively use terminal emulators like [tmux](https://github.com/tmux/tmux) or [screen](https://www.gnu.org/software/screen/).
|
||||
|
||||
To run `headscale` in the background, please follow the steps in the [SystemD section](#running-headscale-in-the-background-with-systemd) before continuing.
|
||||
|
||||
7. Verify `headscale` is running:
|
||||
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:8080/metrics
|
||||
```
|
||||
|
||||
8. Create a namespace ([tailnet](https://tailscale.com/kb/1136/tailnet/)):
|
||||
|
||||
```shell
|
||||
headscale namespaces create myfirstnamespace
|
||||
```
|
||||
|
||||
### Register a machine (normal login)
|
||||
|
||||
On a client machine, execute the `tailscale` login command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server YOUR_HEADSCALE_URL
|
||||
```
|
||||
|
||||
Register the machine:
|
||||
|
||||
```shell
|
||||
headscale --namespace myfirstnamespace nodes register --key <YOU_+MACHINE_KEY>
|
||||
```
|
||||
|
||||
### Register machine using a pre authenticated key
|
||||
|
||||
Generate a key using the command line:
|
||||
|
||||
```shell
|
||||
headscale --namespace myfirstnamespace preauthkeys create --reusable --expiration 24h
|
||||
```
|
||||
|
||||
This will return a pre-authenticated key that can be used to connect a node to `headscale` during the `tailscale` command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
## Running `headscale` in the background with SystemD
|
||||
|
||||
This section demonstrates how to run `headscale` as a service in the background with [SystemD](https://www.freedesktop.org/wiki/Software/systemd/).
|
||||
This should work on most modern Linux distributions.
|
||||
|
||||
1. Create a SystemD service configuration at `/etc/systemd/system/headscale.service` containing:
|
||||
|
||||
```systemd
|
||||
[Unit]
|
||||
Description=headscale controller
|
||||
After=syslog.target
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=headscale
|
||||
Group=headscale
|
||||
ExecStart=/usr/local/bin/headscale serve
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
# Optional security enhancements
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
ReadWritePaths=/var/lib/headscale /var/run/headscale
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
RuntimeDirectory=headscale
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
2. In `/etc/headscale/config.yaml`, override the default `headscale` unix socket with a SystemD friendly path:
|
||||
|
||||
```yaml
|
||||
unix_socket: /var/run/headscale/headscale.sock
|
||||
```
|
||||
|
||||
3. Reload SystemD to load the new configuration file:
|
||||
|
||||
```shell
|
||||
systemctl daemon-reload
|
||||
```
|
||||
|
||||
4. Enable and start the new `headscale` service:
|
||||
|
||||
```shell
|
||||
systemctl enable headscale
|
||||
systemctl start headscale
|
||||
```
|
||||
|
||||
5. Verify the headscale service:
|
||||
|
||||
```shell
|
||||
systemctl status headscale
|
||||
```
|
||||
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:8080/metrics
|
||||
```
|
||||
|
||||
`headscale` will now run in the background and start at boot.
|
||||
31
docs/tls.md
Normal file
31
docs/tls.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# Running the service via TLS (optional)
|
||||
|
||||
## Let's Encrypt / ACME
|
||||
|
||||
To get a certificate automatically via [Let's Encrypt](https://letsencrypt.org/), set `tls_letsencrypt_hostname` to the desired certificate hostname. This name must resolve to the IP address(es) headscale is reachable on (i.e., it must correspond to the `server_url` configuration parameter). The certificate and Let's Encrypt account credentials will be stored in the directory configured in `tls_letsencrypt_cache_dir`. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from. The certificate will automatically be renewed as needed.
|
||||
|
||||
```yaml
|
||||
tls_letsencrypt_hostname: ""
|
||||
tls_letsencrypt_listen: ":http"
|
||||
tls_letsencrypt_cache_dir: ".cache"
|
||||
tls_letsencrypt_challenge_type: HTTP-01
|
||||
```
|
||||
|
||||
### Challenge type HTTP-01
|
||||
|
||||
The default challenge type `HTTP-01` requires that headscale is reachable on port 80 for the Let's Encrypt automated validation, in addition to whatever port is configured in `listen_addr`. By default, headscale listens on port 80 on all local IPs for Let's Encrypt automated validation.
|
||||
|
||||
If you need to change the ip and/or port used by headscale for the Let's Encrypt validation process, set `tls_letsencrypt_listen` to the appropriate value. This can be handy if you are running headscale as a non-root user (or can't run `setcap`). Keep in mind, however, that Let's Encrypt will _only_ connect to port 80 for the validation callback, so if you change `tls_letsencrypt_listen` you will also need to configure something else (e.g. a firewall rule) to forward the traffic from port 80 to the ip:port combination specified in `tls_letsencrypt_listen`.
|
||||
|
||||
### Challenge type TLS-ALPN-01
|
||||
|
||||
Alternatively, `tls_letsencrypt_challenge_type` can be set to `TLS-ALPN-01`. In this configuration, headscale listens on the ip:port combination defined in `listen_addr`. Let's Encrypt will _only_ connect to port 443 for the validation callback, so if `listen_addr` is not set to port 443, something else (e.g. a firewall rule) will be required to forward the traffic from port 443 to the ip:port combination specified in `listen_addr`.
|
||||
|
||||
## Bring your own certificate
|
||||
|
||||
headscale can also be configured to expose its web service via TLS. To configure the certificate and key file manually, set the `tls_cert_path` and `tls_cert_path` configuration parameters. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.
|
||||
|
||||
```yaml
|
||||
tls_cert_path: ""
|
||||
tls_key_path: ""
|
||||
```
|
||||
1115
gen/go/headscale/v1/device.pb.go
Normal file
1115
gen/go/headscale/v1/device.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
303
gen/go/headscale/v1/headscale.pb.go
Normal file
303
gen/go/headscale/v1/headscale.pb.go
Normal file
@@ -0,0 +1,303 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc v3.17.3
|
||||
// source: headscale/v1/headscale.proto
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
var File_headscale_v1_headscale_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_headscale_v1_headscale_proto_rawDesc = []byte{
|
||||
0x0a, 0x1c, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
|
||||
0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
|
||||
0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65,
|
||||
0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xf4,
|
||||
0x12, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76,
|
||||
0x69, 0x63, 0x65, 0x12, 0x77, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70,
|
||||
0x61, 0x63, 0x65, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
|
||||
0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93,
|
||||
0x02, 0x1a, 0x12, 0x18, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x7c, 0x0a, 0x0f,
|
||||
0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
|
||||
0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43,
|
||||
0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73,
|
||||
0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1c, 0x82, 0xd3,
|
||||
0xe4, 0x93, 0x02, 0x16, 0x22, 0x11, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x61,
|
||||
0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x96, 0x01, 0x0a, 0x0f, 0x52,
|
||||
0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x24,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70,
|
||||
0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0x82, 0xd3, 0xe4,
|
||||
0x93, 0x02, 0x30, 0x22, 0x2e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x61, 0x6d,
|
||||
0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6f, 0x6c, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x7d, 0x2f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x2f, 0x7b, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61,
|
||||
0x6d, 0x65, 0x7d, 0x12, 0x80, 0x01, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x61,
|
||||
0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x61, 0x6d,
|
||||
0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c,
|
||||
0x65, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x2a, 0x18, 0x2f, 0x61,
|
||||
0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f,
|
||||
0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x76, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61,
|
||||
0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x23, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65,
|
||||
0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73,
|
||||
0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x22, 0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x13, 0x12, 0x11, 0x2f, 0x61, 0x70,
|
||||
0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x80,
|
||||
0x01, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68,
|
||||
0x4b, 0x65, 0x79, 0x12, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68,
|
||||
0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
|
||||
0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x22, 0x12, 0x2f, 0x61, 0x70, 0x69,
|
||||
0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x3a, 0x01,
|
||||
0x2a, 0x12, 0x87, 0x01, 0x0a, 0x10, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41,
|
||||
0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41,
|
||||
0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70,
|
||||
0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x22, 0x19, 0x2f,
|
||||
0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65,
|
||||
0x79, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x7a, 0x0a, 0x0f, 0x4c,
|
||||
0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x24,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69,
|
||||
0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b,
|
||||
0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4,
|
||||
0x93, 0x02, 0x14, 0x12, 0x12, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65,
|
||||
0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x12, 0x89, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x62, 0x75,
|
||||
0x67, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x27,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65,
|
||||
0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61,
|
||||
0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x22, 0x15, 0x2f, 0x61, 0x70, 0x69, 0x2f,
|
||||
0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x3a, 0x01, 0x2a, 0x12, 0x75, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x2f, 0x61,
|
||||
0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x80, 0x01, 0x0a, 0x0f, 0x52,
|
||||
0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x24,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65,
|
||||
0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x63, 0x68,
|
||||
0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4,
|
||||
0x93, 0x02, 0x1a, 0x22, 0x18, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x2f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x12, 0x7e, 0x0a,
|
||||
0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x22,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65,
|
||||
0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x23, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x2a,
|
||||
0x1c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x85, 0x01,
|
||||
0x0a, 0x0d, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12,
|
||||
0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45,
|
||||
0x78, 0x70, 0x69, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25,
|
||||
0x22, 0x23, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x65,
|
||||
0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x6e, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x73, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x68,
|
||||
0x69, 0x6e, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x17, 0x82, 0xd3,
|
||||
0xe4, 0x93, 0x02, 0x11, 0x12, 0x0f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x8d, 0x01, 0x0a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x65, 0x4d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x65, 0x4d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0x82,
|
||||
0xd3, 0xe4, 0x93, 0x02, 0x30, 0x22, 0x2e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f,
|
||||
0x69, 0x64, 0x7d, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73,
|
||||
0x70, 0x61, 0x63, 0x65, 0x7d, 0x12, 0x95, 0x01, 0x0a, 0x0e, 0x55, 0x6e, 0x73, 0x68, 0x61, 0x72,
|
||||
0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x23, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x6e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x4d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x6e, 0x73,
|
||||
0x68, 0x61, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x22, 0x38, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x22, 0x30, 0x2f, 0x61, 0x70,
|
||||
0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x75, 0x6e, 0x73, 0x68, 0x61, 0x72,
|
||||
0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x12, 0x8b, 0x01,
|
||||
0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b,
|
||||
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x12, 0x23, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f,
|
||||
0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x97, 0x01, 0x0a, 0x13,
|
||||
0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x12, 0x28, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61,
|
||||
0x62, 0x6c, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25,
|
||||
0x22, 0x23, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
|
||||
0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31,
|
||||
0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var file_headscale_v1_headscale_proto_goTypes = []interface{}{
|
||||
(*GetNamespaceRequest)(nil), // 0: headscale.v1.GetNamespaceRequest
|
||||
(*CreateNamespaceRequest)(nil), // 1: headscale.v1.CreateNamespaceRequest
|
||||
(*RenameNamespaceRequest)(nil), // 2: headscale.v1.RenameNamespaceRequest
|
||||
(*DeleteNamespaceRequest)(nil), // 3: headscale.v1.DeleteNamespaceRequest
|
||||
(*ListNamespacesRequest)(nil), // 4: headscale.v1.ListNamespacesRequest
|
||||
(*CreatePreAuthKeyRequest)(nil), // 5: headscale.v1.CreatePreAuthKeyRequest
|
||||
(*ExpirePreAuthKeyRequest)(nil), // 6: headscale.v1.ExpirePreAuthKeyRequest
|
||||
(*ListPreAuthKeysRequest)(nil), // 7: headscale.v1.ListPreAuthKeysRequest
|
||||
(*DebugCreateMachineRequest)(nil), // 8: headscale.v1.DebugCreateMachineRequest
|
||||
(*GetMachineRequest)(nil), // 9: headscale.v1.GetMachineRequest
|
||||
(*RegisterMachineRequest)(nil), // 10: headscale.v1.RegisterMachineRequest
|
||||
(*DeleteMachineRequest)(nil), // 11: headscale.v1.DeleteMachineRequest
|
||||
(*ExpireMachineRequest)(nil), // 12: headscale.v1.ExpireMachineRequest
|
||||
(*ListMachinesRequest)(nil), // 13: headscale.v1.ListMachinesRequest
|
||||
(*ShareMachineRequest)(nil), // 14: headscale.v1.ShareMachineRequest
|
||||
(*UnshareMachineRequest)(nil), // 15: headscale.v1.UnshareMachineRequest
|
||||
(*GetMachineRouteRequest)(nil), // 16: headscale.v1.GetMachineRouteRequest
|
||||
(*EnableMachineRoutesRequest)(nil), // 17: headscale.v1.EnableMachineRoutesRequest
|
||||
(*GetNamespaceResponse)(nil), // 18: headscale.v1.GetNamespaceResponse
|
||||
(*CreateNamespaceResponse)(nil), // 19: headscale.v1.CreateNamespaceResponse
|
||||
(*RenameNamespaceResponse)(nil), // 20: headscale.v1.RenameNamespaceResponse
|
||||
(*DeleteNamespaceResponse)(nil), // 21: headscale.v1.DeleteNamespaceResponse
|
||||
(*ListNamespacesResponse)(nil), // 22: headscale.v1.ListNamespacesResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 23: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 24: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 25: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateMachineResponse)(nil), // 26: headscale.v1.DebugCreateMachineResponse
|
||||
(*GetMachineResponse)(nil), // 27: headscale.v1.GetMachineResponse
|
||||
(*RegisterMachineResponse)(nil), // 28: headscale.v1.RegisterMachineResponse
|
||||
(*DeleteMachineResponse)(nil), // 29: headscale.v1.DeleteMachineResponse
|
||||
(*ExpireMachineResponse)(nil), // 30: headscale.v1.ExpireMachineResponse
|
||||
(*ListMachinesResponse)(nil), // 31: headscale.v1.ListMachinesResponse
|
||||
(*ShareMachineResponse)(nil), // 32: headscale.v1.ShareMachineResponse
|
||||
(*UnshareMachineResponse)(nil), // 33: headscale.v1.UnshareMachineResponse
|
||||
(*GetMachineRouteResponse)(nil), // 34: headscale.v1.GetMachineRouteResponse
|
||||
(*EnableMachineRoutesResponse)(nil), // 35: headscale.v1.EnableMachineRoutesResponse
|
||||
}
|
||||
var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
0, // 0: headscale.v1.HeadscaleService.GetNamespace:input_type -> headscale.v1.GetNamespaceRequest
|
||||
1, // 1: headscale.v1.HeadscaleService.CreateNamespace:input_type -> headscale.v1.CreateNamespaceRequest
|
||||
2, // 2: headscale.v1.HeadscaleService.RenameNamespace:input_type -> headscale.v1.RenameNamespaceRequest
|
||||
3, // 3: headscale.v1.HeadscaleService.DeleteNamespace:input_type -> headscale.v1.DeleteNamespaceRequest
|
||||
4, // 4: headscale.v1.HeadscaleService.ListNamespaces:input_type -> headscale.v1.ListNamespacesRequest
|
||||
5, // 5: headscale.v1.HeadscaleService.CreatePreAuthKey:input_type -> headscale.v1.CreatePreAuthKeyRequest
|
||||
6, // 6: headscale.v1.HeadscaleService.ExpirePreAuthKey:input_type -> headscale.v1.ExpirePreAuthKeyRequest
|
||||
7, // 7: headscale.v1.HeadscaleService.ListPreAuthKeys:input_type -> headscale.v1.ListPreAuthKeysRequest
|
||||
8, // 8: headscale.v1.HeadscaleService.DebugCreateMachine:input_type -> headscale.v1.DebugCreateMachineRequest
|
||||
9, // 9: headscale.v1.HeadscaleService.GetMachine:input_type -> headscale.v1.GetMachineRequest
|
||||
10, // 10: headscale.v1.HeadscaleService.RegisterMachine:input_type -> headscale.v1.RegisterMachineRequest
|
||||
11, // 11: headscale.v1.HeadscaleService.DeleteMachine:input_type -> headscale.v1.DeleteMachineRequest
|
||||
12, // 12: headscale.v1.HeadscaleService.ExpireMachine:input_type -> headscale.v1.ExpireMachineRequest
|
||||
13, // 13: headscale.v1.HeadscaleService.ListMachines:input_type -> headscale.v1.ListMachinesRequest
|
||||
14, // 14: headscale.v1.HeadscaleService.ShareMachine:input_type -> headscale.v1.ShareMachineRequest
|
||||
15, // 15: headscale.v1.HeadscaleService.UnshareMachine:input_type -> headscale.v1.UnshareMachineRequest
|
||||
16, // 16: headscale.v1.HeadscaleService.GetMachineRoute:input_type -> headscale.v1.GetMachineRouteRequest
|
||||
17, // 17: headscale.v1.HeadscaleService.EnableMachineRoutes:input_type -> headscale.v1.EnableMachineRoutesRequest
|
||||
18, // 18: headscale.v1.HeadscaleService.GetNamespace:output_type -> headscale.v1.GetNamespaceResponse
|
||||
19, // 19: headscale.v1.HeadscaleService.CreateNamespace:output_type -> headscale.v1.CreateNamespaceResponse
|
||||
20, // 20: headscale.v1.HeadscaleService.RenameNamespace:output_type -> headscale.v1.RenameNamespaceResponse
|
||||
21, // 21: headscale.v1.HeadscaleService.DeleteNamespace:output_type -> headscale.v1.DeleteNamespaceResponse
|
||||
22, // 22: headscale.v1.HeadscaleService.ListNamespaces:output_type -> headscale.v1.ListNamespacesResponse
|
||||
23, // 23: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
24, // 24: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
25, // 25: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
26, // 26: headscale.v1.HeadscaleService.DebugCreateMachine:output_type -> headscale.v1.DebugCreateMachineResponse
|
||||
27, // 27: headscale.v1.HeadscaleService.GetMachine:output_type -> headscale.v1.GetMachineResponse
|
||||
28, // 28: headscale.v1.HeadscaleService.RegisterMachine:output_type -> headscale.v1.RegisterMachineResponse
|
||||
29, // 29: headscale.v1.HeadscaleService.DeleteMachine:output_type -> headscale.v1.DeleteMachineResponse
|
||||
30, // 30: headscale.v1.HeadscaleService.ExpireMachine:output_type -> headscale.v1.ExpireMachineResponse
|
||||
31, // 31: headscale.v1.HeadscaleService.ListMachines:output_type -> headscale.v1.ListMachinesResponse
|
||||
32, // 32: headscale.v1.HeadscaleService.ShareMachine:output_type -> headscale.v1.ShareMachineResponse
|
||||
33, // 33: headscale.v1.HeadscaleService.UnshareMachine:output_type -> headscale.v1.UnshareMachineResponse
|
||||
34, // 34: headscale.v1.HeadscaleService.GetMachineRoute:output_type -> headscale.v1.GetMachineRouteResponse
|
||||
35, // 35: headscale.v1.HeadscaleService.EnableMachineRoutes:output_type -> headscale.v1.EnableMachineRoutesResponse
|
||||
18, // [18:36] is the sub-list for method output_type
|
||||
0, // [0:18] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_headscale_proto_init() }
|
||||
func file_headscale_v1_headscale_proto_init() {
|
||||
if File_headscale_v1_headscale_proto != nil {
|
||||
return
|
||||
}
|
||||
file_headscale_v1_namespace_proto_init()
|
||||
file_headscale_v1_preauthkey_proto_init()
|
||||
file_headscale_v1_machine_proto_init()
|
||||
file_headscale_v1_routes_proto_init()
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_headscale_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_headscale_v1_headscale_proto_goTypes,
|
||||
DependencyIndexes: file_headscale_v1_headscale_proto_depIdxs,
|
||||
}.Build()
|
||||
File_headscale_v1_headscale_proto = out.File
|
||||
file_headscale_v1_headscale_proto_rawDesc = nil
|
||||
file_headscale_v1_headscale_proto_goTypes = nil
|
||||
file_headscale_v1_headscale_proto_depIdxs = nil
|
||||
}
|
||||
1792
gen/go/headscale/v1/headscale.pb.gw.go
Normal file
1792
gen/go/headscale/v1/headscale.pb.gw.go
Normal file
File diff suppressed because it is too large
Load Diff
721
gen/go/headscale/v1/headscale_grpc.pb.go
Normal file
721
gen/go/headscale/v1/headscale_grpc.pb.go
Normal file
@@ -0,0 +1,721 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// HeadscaleServiceClient is the client API for HeadscaleService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type HeadscaleServiceClient interface {
|
||||
// --- Namespace start ---
|
||||
GetNamespace(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error)
|
||||
CreateNamespace(ctx context.Context, in *CreateNamespaceRequest, opts ...grpc.CallOption) (*CreateNamespaceResponse, error)
|
||||
RenameNamespace(ctx context.Context, in *RenameNamespaceRequest, opts ...grpc.CallOption) (*RenameNamespaceResponse, error)
|
||||
DeleteNamespace(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*DeleteNamespaceResponse, error)
|
||||
ListNamespaces(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error)
|
||||
// --- PreAuthKeys start ---
|
||||
CreatePreAuthKey(ctx context.Context, in *CreatePreAuthKeyRequest, opts ...grpc.CallOption) (*CreatePreAuthKeyResponse, error)
|
||||
ExpirePreAuthKey(ctx context.Context, in *ExpirePreAuthKeyRequest, opts ...grpc.CallOption) (*ExpirePreAuthKeyResponse, error)
|
||||
ListPreAuthKeys(ctx context.Context, in *ListPreAuthKeysRequest, opts ...grpc.CallOption) (*ListPreAuthKeysResponse, error)
|
||||
// --- Machine start ---
|
||||
DebugCreateMachine(ctx context.Context, in *DebugCreateMachineRequest, opts ...grpc.CallOption) (*DebugCreateMachineResponse, error)
|
||||
GetMachine(ctx context.Context, in *GetMachineRequest, opts ...grpc.CallOption) (*GetMachineResponse, error)
|
||||
RegisterMachine(ctx context.Context, in *RegisterMachineRequest, opts ...grpc.CallOption) (*RegisterMachineResponse, error)
|
||||
DeleteMachine(ctx context.Context, in *DeleteMachineRequest, opts ...grpc.CallOption) (*DeleteMachineResponse, error)
|
||||
ExpireMachine(ctx context.Context, in *ExpireMachineRequest, opts ...grpc.CallOption) (*ExpireMachineResponse, error)
|
||||
ListMachines(ctx context.Context, in *ListMachinesRequest, opts ...grpc.CallOption) (*ListMachinesResponse, error)
|
||||
ShareMachine(ctx context.Context, in *ShareMachineRequest, opts ...grpc.CallOption) (*ShareMachineResponse, error)
|
||||
UnshareMachine(ctx context.Context, in *UnshareMachineRequest, opts ...grpc.CallOption) (*UnshareMachineResponse, error)
|
||||
// --- Route start ---
|
||||
GetMachineRoute(ctx context.Context, in *GetMachineRouteRequest, opts ...grpc.CallOption) (*GetMachineRouteResponse, error)
|
||||
EnableMachineRoutes(ctx context.Context, in *EnableMachineRoutesRequest, opts ...grpc.CallOption) (*EnableMachineRoutesResponse, error)
|
||||
}
|
||||
|
||||
type headscaleServiceClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewHeadscaleServiceClient(cc grpc.ClientConnInterface) HeadscaleServiceClient {
|
||||
return &headscaleServiceClient{cc}
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) GetNamespace(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error) {
|
||||
out := new(GetNamespaceResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/GetNamespace", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) CreateNamespace(ctx context.Context, in *CreateNamespaceRequest, opts ...grpc.CallOption) (*CreateNamespaceResponse, error) {
|
||||
out := new(CreateNamespaceResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/CreateNamespace", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) RenameNamespace(ctx context.Context, in *RenameNamespaceRequest, opts ...grpc.CallOption) (*RenameNamespaceResponse, error) {
|
||||
out := new(RenameNamespaceResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/RenameNamespace", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DeleteNamespace(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*DeleteNamespaceResponse, error) {
|
||||
out := new(DeleteNamespaceResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/DeleteNamespace", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ListNamespaces(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error) {
|
||||
out := new(ListNamespacesResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ListNamespaces", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) CreatePreAuthKey(ctx context.Context, in *CreatePreAuthKeyRequest, opts ...grpc.CallOption) (*CreatePreAuthKeyResponse, error) {
|
||||
out := new(CreatePreAuthKeyResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/CreatePreAuthKey", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ExpirePreAuthKey(ctx context.Context, in *ExpirePreAuthKeyRequest, opts ...grpc.CallOption) (*ExpirePreAuthKeyResponse, error) {
|
||||
out := new(ExpirePreAuthKeyResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ExpirePreAuthKey", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ListPreAuthKeys(ctx context.Context, in *ListPreAuthKeysRequest, opts ...grpc.CallOption) (*ListPreAuthKeysResponse, error) {
|
||||
out := new(ListPreAuthKeysResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ListPreAuthKeys", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DebugCreateMachine(ctx context.Context, in *DebugCreateMachineRequest, opts ...grpc.CallOption) (*DebugCreateMachineResponse, error) {
|
||||
out := new(DebugCreateMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/DebugCreateMachine", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) GetMachine(ctx context.Context, in *GetMachineRequest, opts ...grpc.CallOption) (*GetMachineResponse, error) {
|
||||
out := new(GetMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/GetMachine", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) RegisterMachine(ctx context.Context, in *RegisterMachineRequest, opts ...grpc.CallOption) (*RegisterMachineResponse, error) {
|
||||
out := new(RegisterMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/RegisterMachine", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DeleteMachine(ctx context.Context, in *DeleteMachineRequest, opts ...grpc.CallOption) (*DeleteMachineResponse, error) {
|
||||
out := new(DeleteMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/DeleteMachine", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ExpireMachine(ctx context.Context, in *ExpireMachineRequest, opts ...grpc.CallOption) (*ExpireMachineResponse, error) {
|
||||
out := new(ExpireMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ExpireMachine", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ListMachines(ctx context.Context, in *ListMachinesRequest, opts ...grpc.CallOption) (*ListMachinesResponse, error) {
|
||||
out := new(ListMachinesResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ListMachines", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ShareMachine(ctx context.Context, in *ShareMachineRequest, opts ...grpc.CallOption) (*ShareMachineResponse, error) {
|
||||
out := new(ShareMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ShareMachine", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) UnshareMachine(ctx context.Context, in *UnshareMachineRequest, opts ...grpc.CallOption) (*UnshareMachineResponse, error) {
|
||||
out := new(UnshareMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/UnshareMachine", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) GetMachineRoute(ctx context.Context, in *GetMachineRouteRequest, opts ...grpc.CallOption) (*GetMachineRouteResponse, error) {
|
||||
out := new(GetMachineRouteResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/GetMachineRoute", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) EnableMachineRoutes(ctx context.Context, in *EnableMachineRoutesRequest, opts ...grpc.CallOption) (*EnableMachineRoutesResponse, error) {
|
||||
out := new(EnableMachineRoutesResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/EnableMachineRoutes", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// HeadscaleServiceServer is the server API for HeadscaleService service.
|
||||
// All implementations must embed UnimplementedHeadscaleServiceServer
|
||||
// for forward compatibility
|
||||
type HeadscaleServiceServer interface {
|
||||
// --- Namespace start ---
|
||||
GetNamespace(context.Context, *GetNamespaceRequest) (*GetNamespaceResponse, error)
|
||||
CreateNamespace(context.Context, *CreateNamespaceRequest) (*CreateNamespaceResponse, error)
|
||||
RenameNamespace(context.Context, *RenameNamespaceRequest) (*RenameNamespaceResponse, error)
|
||||
DeleteNamespace(context.Context, *DeleteNamespaceRequest) (*DeleteNamespaceResponse, error)
|
||||
ListNamespaces(context.Context, *ListNamespacesRequest) (*ListNamespacesResponse, error)
|
||||
// --- PreAuthKeys start ---
|
||||
CreatePreAuthKey(context.Context, *CreatePreAuthKeyRequest) (*CreatePreAuthKeyResponse, error)
|
||||
ExpirePreAuthKey(context.Context, *ExpirePreAuthKeyRequest) (*ExpirePreAuthKeyResponse, error)
|
||||
ListPreAuthKeys(context.Context, *ListPreAuthKeysRequest) (*ListPreAuthKeysResponse, error)
|
||||
// --- Machine start ---
|
||||
DebugCreateMachine(context.Context, *DebugCreateMachineRequest) (*DebugCreateMachineResponse, error)
|
||||
GetMachine(context.Context, *GetMachineRequest) (*GetMachineResponse, error)
|
||||
RegisterMachine(context.Context, *RegisterMachineRequest) (*RegisterMachineResponse, error)
|
||||
DeleteMachine(context.Context, *DeleteMachineRequest) (*DeleteMachineResponse, error)
|
||||
ExpireMachine(context.Context, *ExpireMachineRequest) (*ExpireMachineResponse, error)
|
||||
ListMachines(context.Context, *ListMachinesRequest) (*ListMachinesResponse, error)
|
||||
ShareMachine(context.Context, *ShareMachineRequest) (*ShareMachineResponse, error)
|
||||
UnshareMachine(context.Context, *UnshareMachineRequest) (*UnshareMachineResponse, error)
|
||||
// --- Route start ---
|
||||
GetMachineRoute(context.Context, *GetMachineRouteRequest) (*GetMachineRouteResponse, error)
|
||||
EnableMachineRoutes(context.Context, *EnableMachineRoutesRequest) (*EnableMachineRoutesResponse, error)
|
||||
mustEmbedUnimplementedHeadscaleServiceServer()
|
||||
}
|
||||
|
||||
// UnimplementedHeadscaleServiceServer must be embedded to have forward compatible implementations.
|
||||
type UnimplementedHeadscaleServiceServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedHeadscaleServiceServer) GetNamespace(context.Context, *GetNamespaceRequest) (*GetNamespaceResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetNamespace not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) CreateNamespace(context.Context, *CreateNamespaceRequest) (*CreateNamespaceResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateNamespace not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) RenameNamespace(context.Context, *RenameNamespaceRequest) (*RenameNamespaceResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RenameNamespace not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DeleteNamespace(context.Context, *DeleteNamespaceRequest) (*DeleteNamespaceResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteNamespace not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ListNamespaces(context.Context, *ListNamespacesRequest) (*ListNamespacesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListNamespaces not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) CreatePreAuthKey(context.Context, *CreatePreAuthKeyRequest) (*CreatePreAuthKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreatePreAuthKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ExpirePreAuthKey(context.Context, *ExpirePreAuthKeyRequest) (*ExpirePreAuthKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ExpirePreAuthKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ListPreAuthKeys(context.Context, *ListPreAuthKeysRequest) (*ListPreAuthKeysResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListPreAuthKeys not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DebugCreateMachine(context.Context, *DebugCreateMachineRequest) (*DebugCreateMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DebugCreateMachine not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) GetMachine(context.Context, *GetMachineRequest) (*GetMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetMachine not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) RegisterMachine(context.Context, *RegisterMachineRequest) (*RegisterMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RegisterMachine not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DeleteMachine(context.Context, *DeleteMachineRequest) (*DeleteMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteMachine not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ExpireMachine(context.Context, *ExpireMachineRequest) (*ExpireMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ExpireMachine not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ListMachines(context.Context, *ListMachinesRequest) (*ListMachinesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListMachines not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ShareMachine(context.Context, *ShareMachineRequest) (*ShareMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ShareMachine not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) UnshareMachine(context.Context, *UnshareMachineRequest) (*UnshareMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method UnshareMachine not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) GetMachineRoute(context.Context, *GetMachineRouteRequest) (*GetMachineRouteResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetMachineRoute not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) EnableMachineRoutes(context.Context, *EnableMachineRoutesRequest) (*EnableMachineRoutesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method EnableMachineRoutes not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) mustEmbedUnimplementedHeadscaleServiceServer() {}
|
||||
|
||||
// UnsafeHeadscaleServiceServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to HeadscaleServiceServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeHeadscaleServiceServer interface {
|
||||
mustEmbedUnimplementedHeadscaleServiceServer()
|
||||
}
|
||||
|
||||
func RegisterHeadscaleServiceServer(s grpc.ServiceRegistrar, srv HeadscaleServiceServer) {
|
||||
s.RegisterService(&HeadscaleService_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _HeadscaleService_GetNamespace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetNamespaceRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).GetNamespace(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/GetNamespace",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).GetNamespace(ctx, req.(*GetNamespaceRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_CreateNamespace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CreateNamespaceRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).CreateNamespace(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/CreateNamespace",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).CreateNamespace(ctx, req.(*CreateNamespaceRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_RenameNamespace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RenameNamespaceRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).RenameNamespace(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/RenameNamespace",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).RenameNamespace(ctx, req.(*RenameNamespaceRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_DeleteNamespace_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeleteNamespaceRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).DeleteNamespace(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/DeleteNamespace",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).DeleteNamespace(ctx, req.(*DeleteNamespaceRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_ListNamespaces_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListNamespacesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).ListNamespaces(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ListNamespaces",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ListNamespaces(ctx, req.(*ListNamespacesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_CreatePreAuthKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CreatePreAuthKeyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).CreatePreAuthKey(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/CreatePreAuthKey",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).CreatePreAuthKey(ctx, req.(*CreatePreAuthKeyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_ExpirePreAuthKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ExpirePreAuthKeyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).ExpirePreAuthKey(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ExpirePreAuthKey",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ExpirePreAuthKey(ctx, req.(*ExpirePreAuthKeyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_ListPreAuthKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListPreAuthKeysRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).ListPreAuthKeys(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ListPreAuthKeys",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ListPreAuthKeys(ctx, req.(*ListPreAuthKeysRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_DebugCreateMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DebugCreateMachineRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).DebugCreateMachine(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/DebugCreateMachine",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).DebugCreateMachine(ctx, req.(*DebugCreateMachineRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_GetMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetMachineRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).GetMachine(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/GetMachine",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).GetMachine(ctx, req.(*GetMachineRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_RegisterMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RegisterMachineRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).RegisterMachine(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/RegisterMachine",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).RegisterMachine(ctx, req.(*RegisterMachineRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_DeleteMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeleteMachineRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).DeleteMachine(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/DeleteMachine",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).DeleteMachine(ctx, req.(*DeleteMachineRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_ExpireMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ExpireMachineRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).ExpireMachine(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ExpireMachine",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ExpireMachine(ctx, req.(*ExpireMachineRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_ListMachines_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListMachinesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).ListMachines(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ListMachines",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ListMachines(ctx, req.(*ListMachinesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_ShareMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ShareMachineRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).ShareMachine(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ShareMachine",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ShareMachine(ctx, req.(*ShareMachineRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_UnshareMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(UnshareMachineRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).UnshareMachine(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/UnshareMachine",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).UnshareMachine(ctx, req.(*UnshareMachineRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_GetMachineRoute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetMachineRouteRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).GetMachineRoute(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/GetMachineRoute",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).GetMachineRoute(ctx, req.(*GetMachineRouteRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_EnableMachineRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(EnableMachineRoutesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).EnableMachineRoutes(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/EnableMachineRoutes",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).EnableMachineRoutes(ctx, req.(*EnableMachineRoutesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// HeadscaleService_ServiceDesc is the grpc.ServiceDesc for HeadscaleService service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "headscale.v1.HeadscaleService",
|
||||
HandlerType: (*HeadscaleServiceServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "GetNamespace",
|
||||
Handler: _HeadscaleService_GetNamespace_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CreateNamespace",
|
||||
Handler: _HeadscaleService_CreateNamespace_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "RenameNamespace",
|
||||
Handler: _HeadscaleService_RenameNamespace_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteNamespace",
|
||||
Handler: _HeadscaleService_DeleteNamespace_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ListNamespaces",
|
||||
Handler: _HeadscaleService_ListNamespaces_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CreatePreAuthKey",
|
||||
Handler: _HeadscaleService_CreatePreAuthKey_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ExpirePreAuthKey",
|
||||
Handler: _HeadscaleService_ExpirePreAuthKey_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ListPreAuthKeys",
|
||||
Handler: _HeadscaleService_ListPreAuthKeys_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DebugCreateMachine",
|
||||
Handler: _HeadscaleService_DebugCreateMachine_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetMachine",
|
||||
Handler: _HeadscaleService_GetMachine_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "RegisterMachine",
|
||||
Handler: _HeadscaleService_RegisterMachine_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteMachine",
|
||||
Handler: _HeadscaleService_DeleteMachine_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ExpireMachine",
|
||||
Handler: _HeadscaleService_ExpireMachine_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ListMachines",
|
||||
Handler: _HeadscaleService_ListMachines_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ShareMachine",
|
||||
Handler: _HeadscaleService_ShareMachine_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "UnshareMachine",
|
||||
Handler: _HeadscaleService_UnshareMachine_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetMachineRoute",
|
||||
Handler: _HeadscaleService_GetMachineRoute_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "EnableMachineRoutes",
|
||||
Handler: _HeadscaleService_EnableMachineRoutes_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "headscale/v1/headscale.proto",
|
||||
}
|
||||
1445
gen/go/headscale/v1/machine.pb.go
Normal file
1445
gen/go/headscale/v1/machine.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
801
gen/go/headscale/v1/namespace.pb.go
Normal file
801
gen/go/headscale/v1/namespace.pb.go
Normal file
@@ -0,0 +1,801 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc v3.17.3
|
||||
// source: headscale/v1/namespace.proto
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type Namespace struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Namespace) Reset() {
|
||||
*x = Namespace{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Namespace) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Namespace) ProtoMessage() {}
|
||||
|
||||
func (x *Namespace) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Namespace.ProtoReflect.Descriptor instead.
|
||||
func (*Namespace) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Namespace) GetId() string {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Namespace) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Namespace) GetCreatedAt() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.CreatedAt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type GetNamespaceRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetNamespaceRequest) Reset() {
|
||||
*x = GetNamespaceRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetNamespaceRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetNamespaceRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetNamespaceRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetNamespaceRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetNamespaceRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *GetNamespaceRequest) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type GetNamespaceResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Namespace *Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetNamespaceResponse) Reset() {
|
||||
*x = GetNamespaceResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetNamespaceResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetNamespaceResponse) ProtoMessage() {}
|
||||
|
||||
func (x *GetNamespaceResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetNamespaceResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetNamespaceResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *GetNamespaceResponse) GetNamespace() *Namespace {
|
||||
if x != nil {
|
||||
return x.Namespace
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type CreateNamespaceRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreateNamespaceRequest) Reset() {
|
||||
*x = CreateNamespaceRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *CreateNamespaceRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*CreateNamespaceRequest) ProtoMessage() {}
|
||||
|
||||
func (x *CreateNamespaceRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use CreateNamespaceRequest.ProtoReflect.Descriptor instead.
|
||||
func (*CreateNamespaceRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *CreateNamespaceRequest) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type CreateNamespaceResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Namespace *Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreateNamespaceResponse) Reset() {
|
||||
*x = CreateNamespaceResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *CreateNamespaceResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*CreateNamespaceResponse) ProtoMessage() {}
|
||||
|
||||
func (x *CreateNamespaceResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use CreateNamespaceResponse.ProtoReflect.Descriptor instead.
|
||||
func (*CreateNamespaceResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *CreateNamespaceResponse) GetNamespace() *Namespace {
|
||||
if x != nil {
|
||||
return x.Namespace
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type RenameNamespaceRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
OldName string `protobuf:"bytes,1,opt,name=old_name,json=oldName,proto3" json:"old_name,omitempty"`
|
||||
NewName string `protobuf:"bytes,2,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"`
|
||||
}
|
||||
|
||||
func (x *RenameNamespaceRequest) Reset() {
|
||||
*x = RenameNamespaceRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *RenameNamespaceRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*RenameNamespaceRequest) ProtoMessage() {}
|
||||
|
||||
func (x *RenameNamespaceRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use RenameNamespaceRequest.ProtoReflect.Descriptor instead.
|
||||
func (*RenameNamespaceRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *RenameNamespaceRequest) GetOldName() string {
|
||||
if x != nil {
|
||||
return x.OldName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *RenameNamespaceRequest) GetNewName() string {
|
||||
if x != nil {
|
||||
return x.NewName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type RenameNamespaceResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Namespace *Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
}
|
||||
|
||||
func (x *RenameNamespaceResponse) Reset() {
|
||||
*x = RenameNamespaceResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *RenameNamespaceResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*RenameNamespaceResponse) ProtoMessage() {}
|
||||
|
||||
func (x *RenameNamespaceResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[6]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use RenameNamespaceResponse.ProtoReflect.Descriptor instead.
|
||||
func (*RenameNamespaceResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *RenameNamespaceResponse) GetNamespace() *Namespace {
|
||||
if x != nil {
|
||||
return x.Namespace
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type DeleteNamespaceRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DeleteNamespaceRequest) Reset() {
|
||||
*x = DeleteNamespaceRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeleteNamespaceRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeleteNamespaceRequest) ProtoMessage() {}
|
||||
|
||||
func (x *DeleteNamespaceRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[7]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeleteNamespaceRequest.ProtoReflect.Descriptor instead.
|
||||
func (*DeleteNamespaceRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
func (x *DeleteNamespaceRequest) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type DeleteNamespaceResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *DeleteNamespaceResponse) Reset() {
|
||||
*x = DeleteNamespaceResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeleteNamespaceResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeleteNamespaceResponse) ProtoMessage() {}
|
||||
|
||||
func (x *DeleteNamespaceResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[8]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeleteNamespaceResponse.ProtoReflect.Descriptor instead.
|
||||
func (*DeleteNamespaceResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
type ListNamespacesRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *ListNamespacesRequest) Reset() {
|
||||
*x = ListNamespacesRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[9]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ListNamespacesRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ListNamespacesRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ListNamespacesRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[9]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ListNamespacesRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ListNamespacesRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{9}
|
||||
}
|
||||
|
||||
type ListNamespacesResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Namespaces []*Namespace `protobuf:"bytes,1,rep,name=namespaces,proto3" json:"namespaces,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ListNamespacesResponse) Reset() {
|
||||
*x = ListNamespacesResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[10]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ListNamespacesResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ListNamespacesResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ListNamespacesResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_namespace_proto_msgTypes[10]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ListNamespacesResponse.ProtoReflect.Descriptor instead.
|
||||
func (*ListNamespacesResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_namespace_proto_rawDescGZIP(), []int{10}
|
||||
}
|
||||
|
||||
func (x *ListNamespacesResponse) GetNamespaces() []*Namespace {
|
||||
if x != nil {
|
||||
return x.Namespaces
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_headscale_v1_namespace_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_headscale_v1_namespace_proto_rawDesc = []byte{
|
||||
0x0a, 0x1c, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a, 0x0a,
|
||||
0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
|
||||
0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39,
|
||||
0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09,
|
||||
0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x29, 0x0a, 0x13, 0x47, 0x65, 0x74,
|
||||
0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x22, 0x4d, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73,
|
||||
0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x09,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x17, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
|
||||
0x61, 0x63, 0x65, 0x22, 0x2c, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d,
|
||||
0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a,
|
||||
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
|
||||
0x65, 0x22, 0x50, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73,
|
||||
0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x09,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x17, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
|
||||
0x61, 0x63, 0x65, 0x22, 0x4e, 0x0a, 0x16, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4e, 0x61, 0x6d,
|
||||
0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a,
|
||||
0x08, 0x6f, 0x6c, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x07, 0x6f, 0x6c, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x77, 0x5f,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x77, 0x4e,
|
||||
0x61, 0x6d, 0x65, 0x22, 0x50, 0x0a, 0x17, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4e, 0x61, 0x6d,
|
||||
0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35,
|
||||
0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x0b, 0x32, 0x17, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x2c, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
|
||||
0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x22, 0x19, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x61, 0x6d,
|
||||
0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x17,
|
||||
0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x51, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x4e,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x12, 0x37, 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18,
|
||||
0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x0a,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69,
|
||||
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e,
|
||||
0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f,
|
||||
0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_headscale_v1_namespace_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_namespace_proto_rawDescData = file_headscale_v1_namespace_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_headscale_v1_namespace_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_namespace_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_namespace_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_namespace_proto_rawDescData)
|
||||
})
|
||||
return file_headscale_v1_namespace_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_headscale_v1_namespace_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
|
||||
var file_headscale_v1_namespace_proto_goTypes = []interface{}{
|
||||
(*Namespace)(nil), // 0: headscale.v1.Namespace
|
||||
(*GetNamespaceRequest)(nil), // 1: headscale.v1.GetNamespaceRequest
|
||||
(*GetNamespaceResponse)(nil), // 2: headscale.v1.GetNamespaceResponse
|
||||
(*CreateNamespaceRequest)(nil), // 3: headscale.v1.CreateNamespaceRequest
|
||||
(*CreateNamespaceResponse)(nil), // 4: headscale.v1.CreateNamespaceResponse
|
||||
(*RenameNamespaceRequest)(nil), // 5: headscale.v1.RenameNamespaceRequest
|
||||
(*RenameNamespaceResponse)(nil), // 6: headscale.v1.RenameNamespaceResponse
|
||||
(*DeleteNamespaceRequest)(nil), // 7: headscale.v1.DeleteNamespaceRequest
|
||||
(*DeleteNamespaceResponse)(nil), // 8: headscale.v1.DeleteNamespaceResponse
|
||||
(*ListNamespacesRequest)(nil), // 9: headscale.v1.ListNamespacesRequest
|
||||
(*ListNamespacesResponse)(nil), // 10: headscale.v1.ListNamespacesResponse
|
||||
(*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp
|
||||
}
|
||||
var file_headscale_v1_namespace_proto_depIdxs = []int32{
|
||||
11, // 0: headscale.v1.Namespace.created_at:type_name -> google.protobuf.Timestamp
|
||||
0, // 1: headscale.v1.GetNamespaceResponse.namespace:type_name -> headscale.v1.Namespace
|
||||
0, // 2: headscale.v1.CreateNamespaceResponse.namespace:type_name -> headscale.v1.Namespace
|
||||
0, // 3: headscale.v1.RenameNamespaceResponse.namespace:type_name -> headscale.v1.Namespace
|
||||
0, // 4: headscale.v1.ListNamespacesResponse.namespaces:type_name -> headscale.v1.Namespace
|
||||
5, // [5:5] is the sub-list for method output_type
|
||||
5, // [5:5] is the sub-list for method input_type
|
||||
5, // [5:5] is the sub-list for extension type_name
|
||||
5, // [5:5] is the sub-list for extension extendee
|
||||
0, // [0:5] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_namespace_proto_init() }
|
||||
func file_headscale_v1_namespace_proto_init() {
|
||||
if File_headscale_v1_namespace_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_headscale_v1_namespace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Namespace); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_namespace_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetNamespaceRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_namespace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetNamespaceResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_namespace_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*CreateNamespaceRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_namespace_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*CreateNamespaceResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_namespace_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RenameNamespaceRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_namespace_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*RenameNamespaceResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_namespace_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DeleteNamespaceRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_namespace_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DeleteNamespaceResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_namespace_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListNamespacesRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_namespace_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListNamespacesResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_namespace_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 11,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_headscale_v1_namespace_proto_goTypes,
|
||||
DependencyIndexes: file_headscale_v1_namespace_proto_depIdxs,
|
||||
MessageInfos: file_headscale_v1_namespace_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_namespace_proto = out.File
|
||||
file_headscale_v1_namespace_proto_rawDesc = nil
|
||||
file_headscale_v1_namespace_proto_goTypes = nil
|
||||
file_headscale_v1_namespace_proto_depIdxs = nil
|
||||
}
|
||||
640
gen/go/headscale/v1/preauthkey.pb.go
Normal file
640
gen/go/headscale/v1/preauthkey.pb.go
Normal file
@@ -0,0 +1,640 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc v3.17.3
|
||||
// source: headscale/v1/preauthkey.proto
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type PreAuthKey struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Reusable bool `protobuf:"varint,4,opt,name=reusable,proto3" json:"reusable,omitempty"`
|
||||
Ephemeral bool `protobuf:"varint,5,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
|
||||
Used bool `protobuf:"varint,6,opt,name=used,proto3" json:"used,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) Reset() {
|
||||
*x = PreAuthKey{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*PreAuthKey) ProtoMessage() {}
|
||||
|
||||
func (x *PreAuthKey) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use PreAuthKey.ProtoReflect.Descriptor instead.
|
||||
func (*PreAuthKey) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetNamespace() string {
|
||||
if x != nil {
|
||||
return x.Namespace
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetId() string {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetKey() string {
|
||||
if x != nil {
|
||||
return x.Key
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetReusable() bool {
|
||||
if x != nil {
|
||||
return x.Reusable
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetEphemeral() bool {
|
||||
if x != nil {
|
||||
return x.Ephemeral
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetUsed() bool {
|
||||
if x != nil {
|
||||
return x.Used
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetExpiration() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.Expiration
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *PreAuthKey) GetCreatedAt() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.CreatedAt
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type CreatePreAuthKeyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
Reusable bool `protobuf:"varint,2,opt,name=reusable,proto3" json:"reusable,omitempty"`
|
||||
Ephemeral bool `protobuf:"varint,3,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
|
||||
Expiration *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expiration,proto3" json:"expiration,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) Reset() {
|
||||
*x = CreatePreAuthKeyRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*CreatePreAuthKeyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use CreatePreAuthKeyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*CreatePreAuthKeyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) GetNamespace() string {
|
||||
if x != nil {
|
||||
return x.Namespace
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) GetReusable() bool {
|
||||
if x != nil {
|
||||
return x.Reusable
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) GetEphemeral() bool {
|
||||
if x != nil {
|
||||
return x.Ephemeral
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyRequest) GetExpiration() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.Expiration
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type CreatePreAuthKeyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
PreAuthKey *PreAuthKey `protobuf:"bytes,1,opt,name=pre_auth_key,json=preAuthKey,proto3" json:"pre_auth_key,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyResponse) Reset() {
|
||||
*x = CreatePreAuthKeyResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*CreatePreAuthKeyResponse) ProtoMessage() {}
|
||||
|
||||
func (x *CreatePreAuthKeyResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use CreatePreAuthKeyResponse.ProtoReflect.Descriptor instead.
|
||||
func (*CreatePreAuthKeyResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *CreatePreAuthKeyResponse) GetPreAuthKey() *PreAuthKey {
|
||||
if x != nil {
|
||||
return x.PreAuthKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ExpirePreAuthKeyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyRequest) Reset() {
|
||||
*x = ExpirePreAuthKeyRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ExpirePreAuthKeyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ExpirePreAuthKeyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ExpirePreAuthKeyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ExpirePreAuthKeyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyRequest) GetNamespace() string {
|
||||
if x != nil {
|
||||
return x.Namespace
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyRequest) GetKey() string {
|
||||
if x != nil {
|
||||
return x.Key
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ExpirePreAuthKeyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyResponse) Reset() {
|
||||
*x = ExpirePreAuthKeyResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ExpirePreAuthKeyResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ExpirePreAuthKeyResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ExpirePreAuthKeyResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ExpirePreAuthKeyResponse.ProtoReflect.Descriptor instead.
|
||||
func (*ExpirePreAuthKeyResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
type ListPreAuthKeysRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysRequest) Reset() {
|
||||
*x = ListPreAuthKeysRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ListPreAuthKeysRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ListPreAuthKeysRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ListPreAuthKeysRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ListPreAuthKeysRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysRequest) GetNamespace() string {
|
||||
if x != nil {
|
||||
return x.Namespace
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ListPreAuthKeysResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
PreAuthKeys []*PreAuthKey `protobuf:"bytes,1,rep,name=pre_auth_keys,json=preAuthKeys,proto3" json:"pre_auth_keys,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysResponse) Reset() {
|
||||
*x = ListPreAuthKeysResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ListPreAuthKeysResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ListPreAuthKeysResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_preauthkey_proto_msgTypes[6]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ListPreAuthKeysResponse.ProtoReflect.Descriptor instead.
|
||||
func (*ListPreAuthKeysResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *ListPreAuthKeysResponse) GetPreAuthKeys() []*PreAuthKey {
|
||||
if x != nil {
|
||||
return x.PreAuthKeys
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_headscale_v1_preauthkey_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_headscale_v1_preauthkey_proto_rawDesc = []byte{
|
||||
0x0a, 0x1d, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x70,
|
||||
0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
|
||||
0x0c, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
|
||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x91,
|
||||
0x02, 0x0a, 0x0a, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x12, 0x1c, 0x0a,
|
||||
0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69,
|
||||
0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b,
|
||||
0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x1a, 0x0a,
|
||||
0x08, 0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52,
|
||||
0x08, 0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x70, 0x68,
|
||||
0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x70,
|
||||
0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x64, 0x18,
|
||||
0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x75, 0x73, 0x65, 0x64, 0x12, 0x3a, 0x0a, 0x0a, 0x65,
|
||||
0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70,
|
||||
0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74,
|
||||
0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
|
||||
0x41, 0x74, 0x22, 0xad, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65,
|
||||
0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c,
|
||||
0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08,
|
||||
0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08,
|
||||
0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x70, 0x68, 0x65,
|
||||
0x6d, 0x65, 0x72, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x70, 0x68,
|
||||
0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61,
|
||||
0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
|
||||
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x22, 0x56, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41,
|
||||
0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a,
|
||||
0x0a, 0x0c, 0x70, 0x72, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x0a,
|
||||
0x70, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x22, 0x49, 0x0a, 0x17, 0x45, 0x78,
|
||||
0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
|
||||
0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
|
||||
0x61, 0x63, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x1a, 0x0a, 0x18, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50,
|
||||
0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x22, 0x36, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68,
|
||||
0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x57, 0x0a, 0x17, 0x4c, 0x69, 0x73,
|
||||
0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68,
|
||||
0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x41, 0x75,
|
||||
0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65,
|
||||
0x79, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_headscale_v1_preauthkey_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_preauthkey_proto_rawDescData = file_headscale_v1_preauthkey_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_headscale_v1_preauthkey_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_preauthkey_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_preauthkey_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_preauthkey_proto_rawDescData)
|
||||
})
|
||||
return file_headscale_v1_preauthkey_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_headscale_v1_preauthkey_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
|
||||
var file_headscale_v1_preauthkey_proto_goTypes = []interface{}{
|
||||
(*PreAuthKey)(nil), // 0: headscale.v1.PreAuthKey
|
||||
(*CreatePreAuthKeyRequest)(nil), // 1: headscale.v1.CreatePreAuthKeyRequest
|
||||
(*CreatePreAuthKeyResponse)(nil), // 2: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyRequest)(nil), // 3: headscale.v1.ExpirePreAuthKeyRequest
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 4: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysRequest)(nil), // 5: headscale.v1.ListPreAuthKeysRequest
|
||||
(*ListPreAuthKeysResponse)(nil), // 6: headscale.v1.ListPreAuthKeysResponse
|
||||
(*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp
|
||||
}
|
||||
var file_headscale_v1_preauthkey_proto_depIdxs = []int32{
|
||||
7, // 0: headscale.v1.PreAuthKey.expiration:type_name -> google.protobuf.Timestamp
|
||||
7, // 1: headscale.v1.PreAuthKey.created_at:type_name -> google.protobuf.Timestamp
|
||||
7, // 2: headscale.v1.CreatePreAuthKeyRequest.expiration:type_name -> google.protobuf.Timestamp
|
||||
0, // 3: headscale.v1.CreatePreAuthKeyResponse.pre_auth_key:type_name -> headscale.v1.PreAuthKey
|
||||
0, // 4: headscale.v1.ListPreAuthKeysResponse.pre_auth_keys:type_name -> headscale.v1.PreAuthKey
|
||||
5, // [5:5] is the sub-list for method output_type
|
||||
5, // [5:5] is the sub-list for method input_type
|
||||
5, // [5:5] is the sub-list for extension type_name
|
||||
5, // [5:5] is the sub-list for extension extendee
|
||||
0, // [0:5] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_preauthkey_proto_init() }
|
||||
func file_headscale_v1_preauthkey_proto_init() {
|
||||
if File_headscale_v1_preauthkey_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_headscale_v1_preauthkey_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PreAuthKey); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_preauthkey_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*CreatePreAuthKeyRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_preauthkey_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*CreatePreAuthKeyResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_preauthkey_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ExpirePreAuthKeyRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_preauthkey_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ExpirePreAuthKeyResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_preauthkey_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListPreAuthKeysRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_preauthkey_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListPreAuthKeysResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_preauthkey_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 7,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_headscale_v1_preauthkey_proto_goTypes,
|
||||
DependencyIndexes: file_headscale_v1_preauthkey_proto_depIdxs,
|
||||
MessageInfos: file_headscale_v1_preauthkey_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_preauthkey_proto = out.File
|
||||
file_headscale_v1_preauthkey_proto_rawDesc = nil
|
||||
file_headscale_v1_preauthkey_proto_goTypes = nil
|
||||
file_headscale_v1_preauthkey_proto_depIdxs = nil
|
||||
}
|
||||
424
gen/go/headscale/v1/routes.pb.go
Normal file
424
gen/go/headscale/v1/routes.pb.go
Normal file
@@ -0,0 +1,424 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.27.1
|
||||
// protoc v3.17.3
|
||||
// source: headscale/v1/routes.proto
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type Routes struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
AdvertisedRoutes []string `protobuf:"bytes,1,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
|
||||
EnabledRoutes []string `protobuf:"bytes,2,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Routes) Reset() {
|
||||
*x = Routes{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Routes) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Routes) ProtoMessage() {}
|
||||
|
||||
func (x *Routes) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Routes.ProtoReflect.Descriptor instead.
|
||||
func (*Routes) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Routes) GetAdvertisedRoutes() []string {
|
||||
if x != nil {
|
||||
return x.AdvertisedRoutes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Routes) GetEnabledRoutes() []string {
|
||||
if x != nil {
|
||||
return x.EnabledRoutes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type GetMachineRouteRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
MachineId uint64 `protobuf:"varint,1,opt,name=machine_id,json=machineId,proto3" json:"machine_id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetMachineRouteRequest) Reset() {
|
||||
*x = GetMachineRouteRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetMachineRouteRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetMachineRouteRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetMachineRouteRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetMachineRouteRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetMachineRouteRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *GetMachineRouteRequest) GetMachineId() uint64 {
|
||||
if x != nil {
|
||||
return x.MachineId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type GetMachineRouteResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Routes *Routes `protobuf:"bytes,1,opt,name=routes,proto3" json:"routes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetMachineRouteResponse) Reset() {
|
||||
*x = GetMachineRouteResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetMachineRouteResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetMachineRouteResponse) ProtoMessage() {}
|
||||
|
||||
func (x *GetMachineRouteResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetMachineRouteResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetMachineRouteResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *GetMachineRouteResponse) GetRoutes() *Routes {
|
||||
if x != nil {
|
||||
return x.Routes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type EnableMachineRoutesRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
MachineId uint64 `protobuf:"varint,1,opt,name=machine_id,json=machineId,proto3" json:"machine_id,omitempty"`
|
||||
Routes []string `protobuf:"bytes,2,rep,name=routes,proto3" json:"routes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesRequest) Reset() {
|
||||
*x = EnableMachineRoutesRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EnableMachineRoutesRequest) ProtoMessage() {}
|
||||
|
||||
func (x *EnableMachineRoutesRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EnableMachineRoutesRequest.ProtoReflect.Descriptor instead.
|
||||
func (*EnableMachineRoutesRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesRequest) GetMachineId() uint64 {
|
||||
if x != nil {
|
||||
return x.MachineId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesRequest) GetRoutes() []string {
|
||||
if x != nil {
|
||||
return x.Routes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type EnableMachineRoutesResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Routes *Routes `protobuf:"bytes,1,opt,name=routes,proto3" json:"routes,omitempty"`
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesResponse) Reset() {
|
||||
*x = EnableMachineRoutesResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EnableMachineRoutesResponse) ProtoMessage() {}
|
||||
|
||||
func (x *EnableMachineRoutesResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_routes_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EnableMachineRoutesResponse.ProtoReflect.Descriptor instead.
|
||||
func (*EnableMachineRoutesResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *EnableMachineRoutesResponse) GetRoutes() *Routes {
|
||||
if x != nil {
|
||||
return x.Routes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_headscale_v1_routes_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_headscale_v1_routes_proto_rawDesc = []byte{
|
||||
0x0a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x22, 0x5c, 0x0a, 0x06, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65,
|
||||
0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10,
|
||||
0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x12, 0x25, 0x0a, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
|
||||
0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x37, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x49, 0x64,
|
||||
0x22, 0x47, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x72,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x73, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x53, 0x0a, 0x1a, 0x45, 0x6e, 0x61,
|
||||
0x62, 0x6c, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x4b,
|
||||
0x0a, 0x1b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52,
|
||||
0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a,
|
||||
0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67,
|
||||
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f,
|
||||
0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e,
|
||||
0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_headscale_v1_routes_proto_rawDescOnce sync.Once
|
||||
file_headscale_v1_routes_proto_rawDescData = file_headscale_v1_routes_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_headscale_v1_routes_proto_rawDescGZIP() []byte {
|
||||
file_headscale_v1_routes_proto_rawDescOnce.Do(func() {
|
||||
file_headscale_v1_routes_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_routes_proto_rawDescData)
|
||||
})
|
||||
return file_headscale_v1_routes_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_headscale_v1_routes_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
|
||||
var file_headscale_v1_routes_proto_goTypes = []interface{}{
|
||||
(*Routes)(nil), // 0: headscale.v1.Routes
|
||||
(*GetMachineRouteRequest)(nil), // 1: headscale.v1.GetMachineRouteRequest
|
||||
(*GetMachineRouteResponse)(nil), // 2: headscale.v1.GetMachineRouteResponse
|
||||
(*EnableMachineRoutesRequest)(nil), // 3: headscale.v1.EnableMachineRoutesRequest
|
||||
(*EnableMachineRoutesResponse)(nil), // 4: headscale.v1.EnableMachineRoutesResponse
|
||||
}
|
||||
var file_headscale_v1_routes_proto_depIdxs = []int32{
|
||||
0, // 0: headscale.v1.GetMachineRouteResponse.routes:type_name -> headscale.v1.Routes
|
||||
0, // 1: headscale.v1.EnableMachineRoutesResponse.routes:type_name -> headscale.v1.Routes
|
||||
2, // [2:2] is the sub-list for method output_type
|
||||
2, // [2:2] is the sub-list for method input_type
|
||||
2, // [2:2] is the sub-list for extension type_name
|
||||
2, // [2:2] is the sub-list for extension extendee
|
||||
0, // [0:2] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_headscale_v1_routes_proto_init() }
|
||||
func file_headscale_v1_routes_proto_init() {
|
||||
if File_headscale_v1_routes_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_headscale_v1_routes_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Routes); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_routes_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetMachineRouteRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_routes_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetMachineRouteResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_routes_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*EnableMachineRoutesRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_routes_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*EnableMachineRoutesResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_routes_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 5,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_headscale_v1_routes_proto_goTypes,
|
||||
DependencyIndexes: file_headscale_v1_routes_proto_depIdxs,
|
||||
MessageInfos: file_headscale_v1_routes_proto_msgTypes,
|
||||
}.Build()
|
||||
File_headscale_v1_routes_proto = out.File
|
||||
file_headscale_v1_routes_proto_rawDesc = nil
|
||||
file_headscale_v1_routes_proto_goTypes = nil
|
||||
file_headscale_v1_routes_proto_depIdxs = nil
|
||||
}
|
||||
43
gen/openapiv2/headscale/v1/device.swagger.json
Normal file
43
gen/openapiv2/headscale/v1/device.swagger.json
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "headscale/v1/device.proto",
|
||||
"version": "version not set"
|
||||
},
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"paths": {},
|
||||
"definitions": {
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"@type": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": {}
|
||||
},
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
923
gen/openapiv2/headscale/v1/headscale.swagger.json
Normal file
923
gen/openapiv2/headscale/v1/headscale.swagger.json
Normal file
@@ -0,0 +1,923 @@
|
||||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "headscale/v1/headscale.proto",
|
||||
"version": "version not set"
|
||||
},
|
||||
"tags": [
|
||||
{
|
||||
"name": "HeadscaleService"
|
||||
}
|
||||
],
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"paths": {
|
||||
"/api/v1/debug/machine": {
|
||||
"post": {
|
||||
"summary": "--- Machine start ---",
|
||||
"operationId": "HeadscaleService_DebugCreateMachine",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1DebugCreateMachineResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1DebugCreateMachineRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/machine": {
|
||||
"get": {
|
||||
"operationId": "HeadscaleService_ListMachines",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1ListMachinesResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "namespace",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/machine/register": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_RegisterMachine",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1RegisterMachineResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/machine/{machineId}": {
|
||||
"get": {
|
||||
"operationId": "HeadscaleService_GetMachine",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1GetMachineResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "machineId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
},
|
||||
"delete": {
|
||||
"operationId": "HeadscaleService_DeleteMachine",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1DeleteMachineResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "machineId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/machine/{machineId}/expire": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_ExpireMachine",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1ExpireMachineResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "machineId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/machine/{machineId}/routes": {
|
||||
"get": {
|
||||
"summary": "--- Route start ---",
|
||||
"operationId": "HeadscaleService_GetMachineRoute",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1GetMachineRouteResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "machineId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
},
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_EnableMachineRoutes",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1EnableMachineRoutesResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "machineId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/machine/{machineId}/share/{namespace}": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_ShareMachine",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1ShareMachineResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "machineId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "namespace",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/machine/{machineId}/unshare/{namespace}": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_UnshareMachine",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1UnshareMachineResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "machineId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
{
|
||||
"name": "namespace",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/namespace": {
|
||||
"get": {
|
||||
"operationId": "HeadscaleService_ListNamespaces",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1ListNamespacesResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
},
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_CreateNamespace",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1CreateNamespaceResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1CreateNamespaceRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/namespace/{name}": {
|
||||
"get": {
|
||||
"summary": "--- Namespace start ---",
|
||||
"operationId": "HeadscaleService_GetNamespace",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1GetNamespaceResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
},
|
||||
"delete": {
|
||||
"operationId": "HeadscaleService_DeleteNamespace",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1DeleteNamespaceResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/namespace/{oldName}/rename/{newName}": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_RenameNamespace",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1RenameNamespaceResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "oldName",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "newName",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/preauthkey": {
|
||||
"get": {
|
||||
"operationId": "HeadscaleService_ListPreAuthKeys",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1ListPreAuthKeysResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "namespace",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"type": "string"
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
},
|
||||
"post": {
|
||||
"summary": "--- PreAuthKeys start ---",
|
||||
"operationId": "HeadscaleService_CreatePreAuthKey",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1CreatePreAuthKeyResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1CreatePreAuthKeyRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/api/v1/preauthkey/expire": {
|
||||
"post": {
|
||||
"operationId": "HeadscaleService_ExpirePreAuthKey",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1ExpirePreAuthKeyResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/rpcStatus"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1ExpirePreAuthKeyRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"HeadscaleService"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"definitions": {
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"@type": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": {}
|
||||
},
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1CreateNamespaceRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1CreateNamespaceResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"namespace": {
|
||||
"$ref": "#/definitions/v1Namespace"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1CreatePreAuthKeyRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"namespace": {
|
||||
"type": "string"
|
||||
},
|
||||
"reusable": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"ephemeral": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"expiration": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1CreatePreAuthKeyResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"preAuthKey": {
|
||||
"$ref": "#/definitions/v1PreAuthKey"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1DebugCreateMachineRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"namespace": {
|
||||
"type": "string"
|
||||
},
|
||||
"key": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"routes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1DebugCreateMachineResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"machine": {
|
||||
"$ref": "#/definitions/v1Machine"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1DeleteMachineResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1DeleteNamespaceResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1EnableMachineRoutesResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"routes": {
|
||||
"$ref": "#/definitions/v1Routes"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1ExpireMachineResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"machine": {
|
||||
"$ref": "#/definitions/v1Machine"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1ExpirePreAuthKeyRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"namespace": {
|
||||
"type": "string"
|
||||
},
|
||||
"key": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1ExpirePreAuthKeyResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"v1GetMachineResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"machine": {
|
||||
"$ref": "#/definitions/v1Machine"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1GetMachineRouteResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"routes": {
|
||||
"$ref": "#/definitions/v1Routes"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1GetNamespaceResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"namespace": {
|
||||
"$ref": "#/definitions/v1Namespace"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1ListMachinesResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"machines": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1Machine"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1ListNamespacesResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"namespaces": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1Namespace"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1ListPreAuthKeysResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"preAuthKeys": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/v1PreAuthKey"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1Machine": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"format": "uint64"
|
||||
},
|
||||
"machineKey": {
|
||||
"type": "string"
|
||||
},
|
||||
"nodeKey": {
|
||||
"type": "string"
|
||||
},
|
||||
"discoKey": {
|
||||
"type": "string"
|
||||
},
|
||||
"ipAddresses": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"namespace": {
|
||||
"$ref": "#/definitions/v1Namespace"
|
||||
},
|
||||
"registered": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"registerMethod": {
|
||||
"$ref": "#/definitions/v1RegisterMethod"
|
||||
},
|
||||
"lastSeen": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"lastSuccessfulUpdate": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"expiry": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"preAuthKey": {
|
||||
"$ref": "#/definitions/v1PreAuthKey"
|
||||
},
|
||||
"createdAt": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1Namespace": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"createdAt": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1PreAuthKey": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"namespace": {
|
||||
"type": "string"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"key": {
|
||||
"type": "string"
|
||||
},
|
||||
"reusable": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"ephemeral": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"used": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"expiration": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"createdAt": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1RegisterMachineResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"machine": {
|
||||
"$ref": "#/definitions/v1Machine"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1RegisterMethod": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"REGISTER_METHOD_UNSPECIFIED",
|
||||
"REGISTER_METHOD_AUTH_KEY",
|
||||
"REGISTER_METHOD_CLI",
|
||||
"REGISTER_METHOD_OIDC"
|
||||
],
|
||||
"default": "REGISTER_METHOD_UNSPECIFIED"
|
||||
},
|
||||
"v1RenameNamespaceResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"namespace": {
|
||||
"$ref": "#/definitions/v1Namespace"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1Routes": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"advertisedRoutes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"enabledRoutes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1ShareMachineResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"machine": {
|
||||
"$ref": "#/definitions/v1Machine"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1UnshareMachineResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"machine": {
|
||||
"$ref": "#/definitions/v1Machine"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
43
gen/openapiv2/headscale/v1/machine.swagger.json
Normal file
43
gen/openapiv2/headscale/v1/machine.swagger.json
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "headscale/v1/machine.proto",
|
||||
"version": "version not set"
|
||||
},
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"paths": {},
|
||||
"definitions": {
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"@type": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": {}
|
||||
},
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
43
gen/openapiv2/headscale/v1/namespace.swagger.json
Normal file
43
gen/openapiv2/headscale/v1/namespace.swagger.json
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "headscale/v1/namespace.proto",
|
||||
"version": "version not set"
|
||||
},
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"paths": {},
|
||||
"definitions": {
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"@type": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": {}
|
||||
},
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
43
gen/openapiv2/headscale/v1/preauthkey.swagger.json
Normal file
43
gen/openapiv2/headscale/v1/preauthkey.swagger.json
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "headscale/v1/preauthkey.proto",
|
||||
"version": "version not set"
|
||||
},
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"paths": {},
|
||||
"definitions": {
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"@type": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": {}
|
||||
},
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
43
gen/openapiv2/headscale/v1/routes.swagger.json
Normal file
43
gen/openapiv2/headscale/v1/routes.swagger.json
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "headscale/v1/routes.proto",
|
||||
"version": "version not set"
|
||||
},
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"paths": {},
|
||||
"definitions": {
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"@type": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"additionalProperties": {}
|
||||
},
|
||||
"rpcStatus": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
150
go.mod
150
go.mod
@@ -1,26 +1,138 @@
|
||||
module github.com/juanfont/headscale
|
||||
|
||||
go 1.16
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/AlecAivazis/survey/v2 v2.0.5
|
||||
github.com/efekarakus/termcolor v1.0.1 // indirect
|
||||
github.com/gin-gonic/gin v1.7.2
|
||||
github.com/hako/durafmt v0.0.0-20210608085754-5c1018a4e16b
|
||||
github.com/klauspost/compress v1.13.1
|
||||
github.com/lib/pq v1.10.2 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.7 // indirect
|
||||
github.com/rs/zerolog v1.23.0 // indirect
|
||||
github.com/spf13/cobra v1.1.3
|
||||
github.com/spf13/viper v1.8.1
|
||||
github.com/tailscale/hujson v0.0.0-20200924210142-dde312d0d6a2
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e
|
||||
github.com/AlecAivazis/survey/v2 v2.3.2
|
||||
github.com/coreos/go-oidc/v3 v3.1.0
|
||||
github.com/efekarakus/termcolor v1.0.1
|
||||
github.com/fatih/set v0.2.1
|
||||
github.com/gin-gonic/gin v1.7.4
|
||||
github.com/gofrs/uuid v4.1.0+incompatible
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.6.0
|
||||
github.com/infobloxopen/protoc-gen-gorm v1.0.1
|
||||
github.com/klauspost/compress v1.13.6
|
||||
github.com/ory/dockertest/v3 v3.7.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/philip-bui/grpc-zerolog v1.0.1
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/pterm/pterm v0.12.30
|
||||
github.com/rs/zerolog v1.26.0
|
||||
github.com/soheilhy/cmux v0.1.5
|
||||
github.com/spf13/cobra v1.2.1
|
||||
github.com/spf13/viper v1.9.0
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/tailscale/hujson v0.0.0-20211105212140-3a0adc019d83
|
||||
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e
|
||||
github.com/zsais/go-gin-prometheus v0.1.0
|
||||
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
google.golang.org/genproto v0.0.0-20211104193956-4c6863e31247
|
||||
google.golang.org/grpc v1.42.0
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0
|
||||
google.golang.org/protobuf v1.27.1
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gorm.io/datatypes v1.0.1
|
||||
gorm.io/driver/postgres v1.1.0
|
||||
gorm.io/driver/sqlite v1.1.4
|
||||
gorm.io/gorm v1.21.11
|
||||
inet.af/netaddr v0.0.0-20210603230628-bf05d8b52dda
|
||||
tailscale.com v1.10.0
|
||||
gorm.io/datatypes v1.0.2
|
||||
gorm.io/driver/postgres v1.1.1
|
||||
gorm.io/driver/sqlite v1.1.5
|
||||
gorm.io/gorm v1.21.15
|
||||
inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6
|
||||
tailscale.com v1.20.3
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.1 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/atomicgo/cursor v0.0.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/containerd/continuity v0.1.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/cli v20.10.8+incompatible // indirect
|
||||
github.com/docker/docker v20.10.8+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-playground/locales v0.14.0 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.0 // indirect
|
||||
github.com/go-playground/validator/v10 v10.9.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/glog v1.0.0 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-github v17.0.0+incompatible // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/gookit/color v1.4.2 // indirect
|
||||
github.com/hashicorp/go-version v1.2.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||
github.com/jackc/pgconn v1.10.0 // indirect
|
||||
github.com/jackc/pgio v1.0.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgproto3/v2 v2.1.1 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
|
||||
github.com/jackc/pgtype v1.8.1 // indirect
|
||||
github.com/jackc/pgx/v4 v4.13.0 // indirect
|
||||
github.com/jinzhu/gorm v1.9.16 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.2 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/kr/pretty v0.3.0 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/lib/pq v1.10.3 // indirect
|
||||
github.com/magiconair/properties v1.8.5 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.8 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.3 // indirect
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/opencontainers/runc v1.0.3 // indirect
|
||||
github.com/pelletier/go-toml v1.9.4 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1-0.20211023094830-115ce09fd6b4 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/spf13/afero v1.6.0 // indirect
|
||||
github.com/spf13/cast v1.4.1 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/subosito/gotenv v1.2.0 // indirect
|
||||
github.com/ugorji/go/codec v1.2.6 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 // indirect
|
||||
go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect
|
||||
go4.org/mem v0.0.0-20210711025021-927187094b94 // indirect
|
||||
go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37 // indirect
|
||||
golang.org/x/net v0.0.0-20211205041911-012df41ee64c // indirect
|
||||
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
gopkg.in/ini.v1 v1.66.2 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
)
|
||||
|
||||
407
grpcv1.go
Normal file
407
grpcv1.go
Normal file
@@ -0,0 +1,407 @@
|
||||
//nolint
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/datatypes"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
type headscaleV1APIServer struct { // v1.HeadscaleServiceServer
|
||||
v1.UnimplementedHeadscaleServiceServer
|
||||
h *Headscale
|
||||
}
|
||||
|
||||
func newHeadscaleV1APIServer(h *Headscale) v1.HeadscaleServiceServer {
|
||||
return headscaleV1APIServer{
|
||||
h: h,
|
||||
}
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) GetNamespace(
|
||||
ctx context.Context,
|
||||
request *v1.GetNamespaceRequest,
|
||||
) (*v1.GetNamespaceResponse, error) {
|
||||
namespace, err := api.h.GetNamespace(request.GetName())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.GetNamespaceResponse{Namespace: namespace.toProto()}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) CreateNamespace(
|
||||
ctx context.Context,
|
||||
request *v1.CreateNamespaceRequest,
|
||||
) (*v1.CreateNamespaceResponse, error) {
|
||||
namespace, err := api.h.CreateNamespace(request.GetName())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.CreateNamespaceResponse{Namespace: namespace.toProto()}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) RenameNamespace(
|
||||
ctx context.Context,
|
||||
request *v1.RenameNamespaceRequest,
|
||||
) (*v1.RenameNamespaceResponse, error) {
|
||||
err := api.h.RenameNamespace(request.GetOldName(), request.GetNewName())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
namespace, err := api.h.GetNamespace(request.GetNewName())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.RenameNamespaceResponse{Namespace: namespace.toProto()}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) DeleteNamespace(
|
||||
ctx context.Context,
|
||||
request *v1.DeleteNamespaceRequest,
|
||||
) (*v1.DeleteNamespaceResponse, error) {
|
||||
err := api.h.DestroyNamespace(request.GetName())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.DeleteNamespaceResponse{}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) ListNamespaces(
|
||||
ctx context.Context,
|
||||
request *v1.ListNamespacesRequest,
|
||||
) (*v1.ListNamespacesResponse, error) {
|
||||
namespaces, err := api.h.ListNamespaces()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response := make([]*v1.Namespace, len(namespaces))
|
||||
for index, namespace := range namespaces {
|
||||
response[index] = namespace.toProto()
|
||||
}
|
||||
|
||||
log.Trace().Caller().Interface("namespaces", response).Msg("")
|
||||
|
||||
return &v1.ListNamespacesResponse{Namespaces: response}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) CreatePreAuthKey(
|
||||
ctx context.Context,
|
||||
request *v1.CreatePreAuthKeyRequest,
|
||||
) (*v1.CreatePreAuthKeyResponse, error) {
|
||||
var expiration time.Time
|
||||
if request.GetExpiration() != nil {
|
||||
expiration = request.GetExpiration().AsTime()
|
||||
}
|
||||
|
||||
preAuthKey, err := api.h.CreatePreAuthKey(
|
||||
request.GetNamespace(),
|
||||
request.GetReusable(),
|
||||
request.GetEphemeral(),
|
||||
&expiration,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.CreatePreAuthKeyResponse{PreAuthKey: preAuthKey.toProto()}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) ExpirePreAuthKey(
|
||||
ctx context.Context,
|
||||
request *v1.ExpirePreAuthKeyRequest,
|
||||
) (*v1.ExpirePreAuthKeyResponse, error) {
|
||||
preAuthKey, err := api.h.GetPreAuthKey(request.GetNamespace(), request.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = api.h.ExpirePreAuthKey(preAuthKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.ExpirePreAuthKeyResponse{}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) ListPreAuthKeys(
|
||||
ctx context.Context,
|
||||
request *v1.ListPreAuthKeysRequest,
|
||||
) (*v1.ListPreAuthKeysResponse, error) {
|
||||
preAuthKeys, err := api.h.ListPreAuthKeys(request.GetNamespace())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response := make([]*v1.PreAuthKey, len(preAuthKeys))
|
||||
for index, key := range preAuthKeys {
|
||||
response[index] = key.toProto()
|
||||
}
|
||||
|
||||
return &v1.ListPreAuthKeysResponse{PreAuthKeys: response}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) RegisterMachine(
|
||||
ctx context.Context,
|
||||
request *v1.RegisterMachineRequest,
|
||||
) (*v1.RegisterMachineResponse, error) {
|
||||
log.Trace().
|
||||
Str("namespace", request.GetNamespace()).
|
||||
Str("machine_key", request.GetKey()).
|
||||
Msg("Registering machine")
|
||||
machine, err := api.h.RegisterMachine(
|
||||
request.GetKey(),
|
||||
request.GetNamespace(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.RegisterMachineResponse{Machine: machine.toProto()}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) GetMachine(
|
||||
ctx context.Context,
|
||||
request *v1.GetMachineRequest,
|
||||
) (*v1.GetMachineResponse, error) {
|
||||
machine, err := api.h.GetMachineByID(request.GetMachineId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.GetMachineResponse{Machine: machine.toProto()}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) DeleteMachine(
|
||||
ctx context.Context,
|
||||
request *v1.DeleteMachineRequest,
|
||||
) (*v1.DeleteMachineResponse, error) {
|
||||
machine, err := api.h.GetMachineByID(request.GetMachineId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = api.h.DeleteMachine(
|
||||
machine,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.DeleteMachineResponse{}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) ExpireMachine(
|
||||
ctx context.Context,
|
||||
request *v1.ExpireMachineRequest,
|
||||
) (*v1.ExpireMachineResponse, error) {
|
||||
machine, err := api.h.GetMachineByID(request.GetMachineId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
api.h.ExpireMachine(
|
||||
machine,
|
||||
)
|
||||
|
||||
log.Trace().
|
||||
Str("machine", machine.Name).
|
||||
Time("expiry", *machine.Expiry).
|
||||
Msg("machine expired")
|
||||
|
||||
return &v1.ExpireMachineResponse{Machine: machine.toProto()}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) ListMachines(
|
||||
ctx context.Context,
|
||||
request *v1.ListMachinesRequest,
|
||||
) (*v1.ListMachinesResponse, error) {
|
||||
if request.GetNamespace() != "" {
|
||||
machines, err := api.h.ListMachinesInNamespace(request.GetNamespace())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sharedMachines, err := api.h.ListSharedMachinesInNamespace(
|
||||
request.GetNamespace(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
machines = append(machines, sharedMachines...)
|
||||
|
||||
response := make([]*v1.Machine, len(machines))
|
||||
for index, machine := range machines {
|
||||
response[index] = machine.toProto()
|
||||
}
|
||||
|
||||
return &v1.ListMachinesResponse{Machines: response}, nil
|
||||
}
|
||||
|
||||
machines, err := api.h.ListMachines()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
response := make([]*v1.Machine, len(machines))
|
||||
for index, machine := range machines {
|
||||
response[index] = machine.toProto()
|
||||
}
|
||||
|
||||
return &v1.ListMachinesResponse{Machines: response}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) ShareMachine(
|
||||
ctx context.Context,
|
||||
request *v1.ShareMachineRequest,
|
||||
) (*v1.ShareMachineResponse, error) {
|
||||
destinationNamespace, err := api.h.GetNamespace(request.GetNamespace())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
machine, err := api.h.GetMachineByID(request.GetMachineId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = api.h.AddSharedMachineToNamespace(machine, destinationNamespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.ShareMachineResponse{Machine: machine.toProto()}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) UnshareMachine(
|
||||
ctx context.Context,
|
||||
request *v1.UnshareMachineRequest,
|
||||
) (*v1.UnshareMachineResponse, error) {
|
||||
destinationNamespace, err := api.h.GetNamespace(request.GetNamespace())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
machine, err := api.h.GetMachineByID(request.GetMachineId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = api.h.RemoveSharedMachineFromNamespace(machine, destinationNamespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.UnshareMachineResponse{Machine: machine.toProto()}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) GetMachineRoute(
|
||||
ctx context.Context,
|
||||
request *v1.GetMachineRouteRequest,
|
||||
) (*v1.GetMachineRouteResponse, error) {
|
||||
machine, err := api.h.GetMachineByID(request.GetMachineId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
routes, err := machine.RoutesToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.GetMachineRouteResponse{
|
||||
Routes: routes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) EnableMachineRoutes(
|
||||
ctx context.Context,
|
||||
request *v1.EnableMachineRoutesRequest,
|
||||
) (*v1.EnableMachineRoutesResponse, error) {
|
||||
machine, err := api.h.GetMachineByID(request.GetMachineId())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = api.h.EnableRoutes(machine, request.GetRoutes()...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
routes, err := machine.RoutesToProto()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.EnableMachineRoutesResponse{
|
||||
Routes: routes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// The following service calls are for testing and debugging
|
||||
func (api headscaleV1APIServer) DebugCreateMachine(
|
||||
ctx context.Context,
|
||||
request *v1.DebugCreateMachineRequest,
|
||||
) (*v1.DebugCreateMachineResponse, error) {
|
||||
namespace, err := api.h.GetNamespace(request.GetNamespace())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
routes, err := stringToIPPrefix(request.GetRoutes())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Caller().
|
||||
Interface("route-prefix", routes).
|
||||
Interface("route-str", request.GetRoutes()).
|
||||
Msg("")
|
||||
|
||||
hostinfo := tailcfg.Hostinfo{
|
||||
RoutableIPs: routes,
|
||||
OS: "TestOS",
|
||||
Hostname: "DebugTestMachine",
|
||||
}
|
||||
|
||||
log.Trace().Caller().Interface("hostinfo", hostinfo).Msg("")
|
||||
|
||||
hostinfoJson, err := json.Marshal(hostinfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newMachine := Machine{
|
||||
MachineKey: request.GetKey(),
|
||||
Name: request.GetName(),
|
||||
Namespace: *namespace,
|
||||
|
||||
Expiry: &time.Time{},
|
||||
LastSeen: &time.Time{},
|
||||
LastSuccessfulUpdate: &time.Time{},
|
||||
|
||||
HostInfo: datatypes.JSON(hostinfoJson),
|
||||
}
|
||||
|
||||
// log.Trace().Caller().Interface("machine", newMachine).Msg("")
|
||||
|
||||
if err := api.h.db.Create(&newMachine).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v1.DebugCreateMachineResponse{Machine: newMachine.toProto()}, nil
|
||||
}
|
||||
|
||||
func (api headscaleV1APIServer) mustEmbedUnimplementedHeadscaleServiceServer() {}
|
||||
1195
integration_cli_test.go
Normal file
1195
integration_cli_test.go
Normal file
File diff suppressed because it is too large
Load Diff
120
integration_common_test.go
Normal file
120
integration_common_test.go
Normal file
@@ -0,0 +1,120 @@
|
||||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"inet.af/netaddr"
|
||||
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
)
|
||||
|
||||
const DOCKER_EXECUTE_TIMEOUT = 10 * time.Second
|
||||
|
||||
var IpPrefix4 = netaddr.MustParseIPPrefix("100.64.0.0/10")
|
||||
var IpPrefix6 = netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/48")
|
||||
|
||||
type ExecuteCommandConfig struct {
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
type ExecuteCommandOption func(*ExecuteCommandConfig) error
|
||||
|
||||
func ExecuteCommandTimeout(timeout time.Duration) ExecuteCommandOption {
|
||||
return ExecuteCommandOption(func(conf *ExecuteCommandConfig) error {
|
||||
conf.timeout = timeout
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func ExecuteCommand(
|
||||
resource *dockertest.Resource,
|
||||
cmd []string,
|
||||
env []string,
|
||||
options ...ExecuteCommandOption,
|
||||
) (string, error) {
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
|
||||
execConfig := ExecuteCommandConfig{
|
||||
timeout: DOCKER_EXECUTE_TIMEOUT,
|
||||
}
|
||||
|
||||
for _, opt := range options {
|
||||
if err := opt(&execConfig); err != nil {
|
||||
return "", fmt.Errorf("execute-command/options: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
type result struct {
|
||||
exitCode int
|
||||
err error
|
||||
}
|
||||
|
||||
resultChan := make(chan result, 1)
|
||||
|
||||
// Run your long running function in it's own goroutine and pass back it's
|
||||
// response into our channel.
|
||||
go func() {
|
||||
exitCode, err := resource.Exec(
|
||||
cmd,
|
||||
dockertest.ExecOptions{
|
||||
Env: append(env, "HEADSCALE_LOG_LEVEL=disabled"),
|
||||
StdOut: &stdout,
|
||||
StdErr: &stderr,
|
||||
},
|
||||
)
|
||||
resultChan <- result{exitCode, err}
|
||||
}()
|
||||
|
||||
// Listen on our channel AND a timeout channel - which ever happens first.
|
||||
select {
|
||||
case res := <-resultChan:
|
||||
if res.err != nil {
|
||||
return "", res.err
|
||||
}
|
||||
|
||||
if res.exitCode != 0 {
|
||||
fmt.Println("Command: ", cmd)
|
||||
fmt.Println("stdout: ", stdout.String())
|
||||
fmt.Println("stderr: ", stderr.String())
|
||||
|
||||
return "", fmt.Errorf("command failed with: %s", stderr.String())
|
||||
}
|
||||
|
||||
return stdout.String(), nil
|
||||
case <-time.After(execConfig.timeout):
|
||||
|
||||
return "", fmt.Errorf("command timed out after %s", execConfig.timeout)
|
||||
}
|
||||
}
|
||||
|
||||
func DockerRestartPolicy(config *docker.HostConfig) {
|
||||
// set AutoRemove to true so that stopped container goes away by itself on error *immediately*.
|
||||
// when set to false, containers remain until the end of the integration test.
|
||||
config.AutoRemove = false
|
||||
config.RestartPolicy = docker.RestartPolicy{
|
||||
Name: "no",
|
||||
}
|
||||
}
|
||||
|
||||
func DockerAllowLocalIPv6(config *docker.HostConfig) {
|
||||
if config.Sysctls == nil {
|
||||
config.Sysctls = make(map[string]string, 1)
|
||||
}
|
||||
config.Sysctls["net.ipv6.conf.all.disable_ipv6"] = "0"
|
||||
}
|
||||
|
||||
func DockerAllowNetworkAdministration(config *docker.HostConfig) {
|
||||
config.CapAdd = append(config.CapAdd, "NET_ADMIN")
|
||||
config.Mounts = append(config.Mounts, docker.HostMount{
|
||||
Type: "bind",
|
||||
Source: "/dev/net/tun",
|
||||
Target: "/dev/net/tun",
|
||||
})
|
||||
}
|
||||
845
integration_test.go
Normal file
845
integration_test.go
Normal file
@@ -0,0 +1,845 @@
|
||||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
"inet.af/netaddr"
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
)
|
||||
|
||||
var tailscaleVersions = []string{"1.20.2", "1.18.2", "1.16.2", "1.14.3", "1.12.3"}
|
||||
|
||||
type TestNamespace struct {
|
||||
count int
|
||||
tailscales map[string]dockertest.Resource
|
||||
}
|
||||
|
||||
type IntegrationTestSuite struct {
|
||||
suite.Suite
|
||||
stats *suite.SuiteInformation
|
||||
|
||||
pool dockertest.Pool
|
||||
network dockertest.Network
|
||||
headscale dockertest.Resource
|
||||
|
||||
namespaces map[string]TestNamespace
|
||||
}
|
||||
|
||||
func TestIntegrationTestSuite(t *testing.T) {
|
||||
s := new(IntegrationTestSuite)
|
||||
|
||||
s.namespaces = map[string]TestNamespace{
|
||||
"main": {
|
||||
count: 20,
|
||||
tailscales: make(map[string]dockertest.Resource),
|
||||
},
|
||||
"shared": {
|
||||
count: 5,
|
||||
tailscales: make(map[string]dockertest.Resource),
|
||||
},
|
||||
}
|
||||
|
||||
suite.Run(t, s)
|
||||
|
||||
// HandleStats, which allows us to check if we passed and save logs
|
||||
// is called after TearDown, so we cannot tear down containers before
|
||||
// we have potentially saved the logs.
|
||||
for _, scales := range s.namespaces {
|
||||
for _, tailscale := range scales.tailscales {
|
||||
if err := s.pool.Purge(&tailscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !s.stats.Passed() {
|
||||
err := s.saveLog(&s.headscale, "test_output")
|
||||
if err != nil {
|
||||
log.Printf("Could not save log: %s\n", err)
|
||||
}
|
||||
}
|
||||
if err := s.pool.Purge(&s.headscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
if err := s.network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) saveLog(
|
||||
resource *dockertest.Resource,
|
||||
basePath string,
|
||||
) error {
|
||||
err := os.MkdirAll(basePath, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
|
||||
err = s.pool.Client.Logs(
|
||||
docker.LogsOptions{
|
||||
Context: context.TODO(),
|
||||
Container: resource.Container.ID,
|
||||
OutputStream: &stdout,
|
||||
ErrorStream: &stderr,
|
||||
Tail: "all",
|
||||
RawTerminal: false,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
Follow: false,
|
||||
Timestamps: false,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Saving logs for %s to %s\n", resource.Container.Name, basePath)
|
||||
|
||||
err = ioutil.WriteFile(
|
||||
path.Join(basePath, resource.Container.Name+".stdout.log"),
|
||||
[]byte(stdout.String()),
|
||||
0o644,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(
|
||||
path.Join(basePath, resource.Container.Name+".stderr.log"),
|
||||
[]byte(stdout.String()),
|
||||
0o644,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) tailscaleContainer(
|
||||
namespace, identifier, version string,
|
||||
) (string, *dockertest.Resource) {
|
||||
tailscaleBuildOptions := &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile.tailscale",
|
||||
ContextDir: ".",
|
||||
BuildArgs: []docker.BuildArg{
|
||||
{
|
||||
Name: "TAILSCALE_VERSION",
|
||||
Value: version,
|
||||
},
|
||||
},
|
||||
}
|
||||
hostname := fmt.Sprintf(
|
||||
"%s-tailscale-%s-%s",
|
||||
namespace,
|
||||
strings.Replace(version, ".", "-", -1),
|
||||
identifier,
|
||||
)
|
||||
tailscaleOptions := &dockertest.RunOptions{
|
||||
Name: hostname,
|
||||
Networks: []*dockertest.Network{&s.network},
|
||||
Cmd: []string{
|
||||
"tailscaled", "--tun=tsdev",
|
||||
},
|
||||
}
|
||||
|
||||
pts, err := s.pool.BuildAndRunWithBuildOptions(
|
||||
tailscaleBuildOptions,
|
||||
tailscaleOptions,
|
||||
DockerRestartPolicy,
|
||||
DockerAllowLocalIPv6,
|
||||
DockerAllowNetworkAdministration,
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not start resource: %s", err)
|
||||
}
|
||||
fmt.Printf("Created %s container\n", hostname)
|
||||
|
||||
return hostname, pts
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) SetupSuite() {
|
||||
var err error
|
||||
app = Headscale{
|
||||
dbType: "sqlite3",
|
||||
dbString: "integration_test_db.sqlite3",
|
||||
}
|
||||
|
||||
if ppool, err := dockertest.NewPool(""); err == nil {
|
||||
s.pool = *ppool
|
||||
} else {
|
||||
log.Fatalf("Could not connect to docker: %s", err)
|
||||
}
|
||||
|
||||
if pnetwork, err := s.pool.CreateNetwork("headscale-test"); err == nil {
|
||||
s.network = *pnetwork
|
||||
} else {
|
||||
log.Fatalf("Could not create network: %s", err)
|
||||
}
|
||||
|
||||
headscaleBuildOptions := &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile",
|
||||
ContextDir: ".",
|
||||
}
|
||||
|
||||
currentPath, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatalf("Could not determine current path: %s", err)
|
||||
}
|
||||
|
||||
headscaleOptions := &dockertest.RunOptions{
|
||||
Name: "headscale",
|
||||
Mounts: []string{
|
||||
fmt.Sprintf("%s/integration_test/etc:/etc/headscale", currentPath),
|
||||
},
|
||||
Networks: []*dockertest.Network{&s.network},
|
||||
Cmd: []string{"headscale", "serve"},
|
||||
}
|
||||
|
||||
fmt.Println("Creating headscale container")
|
||||
if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil {
|
||||
s.headscale = *pheadscale
|
||||
} else {
|
||||
log.Fatalf("Could not start resource: %s", err)
|
||||
}
|
||||
fmt.Println("Created headscale container")
|
||||
|
||||
fmt.Println("Creating tailscale containers")
|
||||
for namespace, scales := range s.namespaces {
|
||||
for i := 0; i < scales.count; i++ {
|
||||
version := tailscaleVersions[i%len(tailscaleVersions)]
|
||||
|
||||
hostname, container := s.tailscaleContainer(
|
||||
namespace,
|
||||
fmt.Sprint(i),
|
||||
version,
|
||||
)
|
||||
scales.tailscales[hostname] = *container
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("Waiting for headscale to be ready")
|
||||
hostEndpoint := fmt.Sprintf("localhost:%s", s.headscale.GetPort("8080/tcp"))
|
||||
|
||||
if err := s.pool.Retry(func() error {
|
||||
url := fmt.Sprintf("http://%s/health", hostEndpoint)
|
||||
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("status code not OK")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
// TODO(kradalby): If we cannot access headscale, or any other fatal error during
|
||||
// test setup, we need to abort and tear down. However, testify does not seem to
|
||||
// support that at the moment:
|
||||
// https://github.com/stretchr/testify/issues/849
|
||||
return // fmt.Errorf("Could not connect to headscale: %s", err)
|
||||
}
|
||||
fmt.Println("headscale container is ready")
|
||||
|
||||
for namespace, scales := range s.namespaces {
|
||||
fmt.Printf("Creating headscale namespace: %s\n", namespace)
|
||||
result, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{"headscale", "namespaces", "create", namespace},
|
||||
[]string{},
|
||||
)
|
||||
fmt.Println("headscale create namespace result: ", result)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
fmt.Printf("Creating pre auth key for %s\n", namespace)
|
||||
preAuthResult, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"preauthkeys",
|
||||
"create",
|
||||
"--reusable",
|
||||
"--expiration",
|
||||
"24h",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
[]string{"LOG_LEVEL=error"},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
var preAuthKey v1.PreAuthKey
|
||||
err = json.Unmarshal([]byte(preAuthResult), &preAuthKey)
|
||||
assert.Nil(s.T(), err)
|
||||
assert.True(s.T(), preAuthKey.Reusable)
|
||||
|
||||
headscaleEndpoint := "http://headscale:8080"
|
||||
|
||||
fmt.Printf(
|
||||
"Joining tailscale containers to headscale at %s\n",
|
||||
headscaleEndpoint,
|
||||
)
|
||||
for hostname, tailscale := range scales.tailscales {
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"up",
|
||||
"-login-server",
|
||||
headscaleEndpoint,
|
||||
"--authkey",
|
||||
preAuthKey.Key,
|
||||
"--hostname",
|
||||
hostname,
|
||||
}
|
||||
|
||||
fmt.Println("Join command:", command)
|
||||
fmt.Printf("Running join command for %s\n", hostname)
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
fmt.Println("tailscale result: ", result)
|
||||
assert.Nil(s.T(), err)
|
||||
fmt.Printf("%s joined\n", hostname)
|
||||
}
|
||||
}
|
||||
|
||||
// The nodes need a bit of time to get their updated maps from headscale
|
||||
// TODO: See if we can have a more deterministic wait here.
|
||||
time.Sleep(60 * time.Second)
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TearDownSuite() {
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) HandleStats(
|
||||
suiteName string,
|
||||
stats *suite.SuiteInformation,
|
||||
) {
|
||||
s.stats = stats
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TestListNodes() {
|
||||
for namespace, scales := range s.namespaces {
|
||||
fmt.Println("Listing nodes")
|
||||
result, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{"headscale", "--namespace", namespace, "nodes", "list"},
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
fmt.Printf("List nodes: \n%s\n", result)
|
||||
|
||||
// Chck that the correct count of host is present in node list
|
||||
lines := strings.Split(result, "\n")
|
||||
assert.Equal(s.T(), len(scales.tailscales), len(lines)-2)
|
||||
|
||||
for hostname := range scales.tailscales {
|
||||
assert.Contains(s.T(), result, hostname)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TestGetIpAddresses() {
|
||||
for _, scales := range s.namespaces {
|
||||
ips, err := getIPs(scales.tailscales)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
for hostname, _ := range scales.tailscales {
|
||||
ips := ips[hostname]
|
||||
for _, ip := range ips {
|
||||
s.T().Run(hostname, func(t *testing.T) {
|
||||
assert.NotNil(t, ip)
|
||||
|
||||
fmt.Printf("IP for %s: %s\n", hostname, ip)
|
||||
|
||||
// c.Assert(ip.Valid(), check.IsTrue)
|
||||
assert.True(t, ip.Is4() || ip.Is6())
|
||||
switch {
|
||||
case ip.Is4():
|
||||
assert.True(t, IpPrefix4.Contains(ip))
|
||||
case ip.Is6():
|
||||
assert.True(t, IpPrefix6.Contains(ip))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(kradalby): fix this test
|
||||
// We need some way to import ipnstate.Status from multiple go packages.
|
||||
// Currently it will only work with 1.18.x since that is the last
|
||||
// version we have in go.mod
|
||||
// func (s *IntegrationTestSuite) TestStatus() {
|
||||
// for _, scales := range s.namespaces {
|
||||
// ips, err := getIPs(scales.tailscales)
|
||||
// assert.Nil(s.T(), err)
|
||||
//
|
||||
// for hostname, tailscale := range scales.tailscales {
|
||||
// s.T().Run(hostname, func(t *testing.T) {
|
||||
// command := []string{"tailscale", "status", "--json"}
|
||||
//
|
||||
// fmt.Printf("Getting status for %s\n", hostname)
|
||||
// result, err := ExecuteCommand(
|
||||
// &tailscale,
|
||||
// command,
|
||||
// []string{},
|
||||
// )
|
||||
// assert.Nil(t, err)
|
||||
//
|
||||
// var status ipnstate.Status
|
||||
// err = json.Unmarshal([]byte(result), &status)
|
||||
// assert.Nil(s.T(), err)
|
||||
//
|
||||
// // TODO(kradalby): Replace this check with peer length of SAME namespace
|
||||
// // Check if we have as many nodes in status
|
||||
// // as we have IPs/tailscales
|
||||
// // lines := strings.Split(result, "\n")
|
||||
// // assert.Equal(t, len(ips), len(lines)-1)
|
||||
// // assert.Equal(t, len(scales.tailscales), len(lines)-1)
|
||||
//
|
||||
// peerIps := getIPsfromIPNstate(status)
|
||||
//
|
||||
// // Check that all hosts is present in all hosts status
|
||||
// for ipHostname, ip := range ips {
|
||||
// if hostname != ipHostname {
|
||||
// assert.Contains(t, peerIps, ip)
|
||||
// }
|
||||
// }
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
func getIPsfromIPNstate(status ipnstate.Status) []netaddr.IP {
|
||||
ips := make([]netaddr.IP, 0)
|
||||
|
||||
for _, peer := range status.Peer {
|
||||
ips = append(ips, peer.TailscaleIPs...)
|
||||
}
|
||||
|
||||
return ips
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TestPingAllPeersByAddress() {
|
||||
for _, scales := range s.namespaces {
|
||||
ips, err := getIPs(scales.tailscales)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
for hostname, tailscale := range scales.tailscales {
|
||||
for peername, peerIPs := range ips {
|
||||
for i, ip := range peerIPs {
|
||||
// We currently cant ping ourselves, so skip that.
|
||||
if peername == hostname {
|
||||
continue
|
||||
}
|
||||
s.T().Run(fmt.Sprintf("%s-%s-%d", hostname, peername, i), func(t *testing.T) {
|
||||
// We are only interested in "direct ping" which means what we
|
||||
// might need a couple of more attempts before reaching the node.
|
||||
command := []string{
|
||||
"tailscale", "ping",
|
||||
"--timeout=1s",
|
||||
"--c=10",
|
||||
"--until-direct=true",
|
||||
ip.String(),
|
||||
}
|
||||
|
||||
fmt.Printf(
|
||||
"Pinging from %s to %s (%s)\n",
|
||||
hostname,
|
||||
peername,
|
||||
ip,
|
||||
)
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
fmt.Printf("Result for %s: %s\n", hostname, result)
|
||||
assert.Contains(t, result, "pong")
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TestSharedNodes() {
|
||||
main := s.namespaces["main"]
|
||||
shared := s.namespaces["shared"]
|
||||
|
||||
result, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"nodes",
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
"--namespace",
|
||||
"shared",
|
||||
},
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
var machineList []v1.Machine
|
||||
err = json.Unmarshal([]byte(result), &machineList)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
for _, machine := range machineList {
|
||||
result, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"nodes",
|
||||
"share",
|
||||
"--identifier", fmt.Sprint(machine.Id),
|
||||
"--namespace", "main",
|
||||
},
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
fmt.Println("Shared node with result: ", result)
|
||||
}
|
||||
|
||||
result, err = ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{"headscale", "nodes", "list", "--namespace", "main"},
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
fmt.Println("Nodelist after sharing", result)
|
||||
|
||||
// Chck that the correct count of host is present in node list
|
||||
lines := strings.Split(result, "\n")
|
||||
assert.Equal(s.T(), len(main.tailscales)+len(shared.tailscales), len(lines)-2)
|
||||
|
||||
for hostname := range main.tailscales {
|
||||
assert.Contains(s.T(), result, hostname)
|
||||
}
|
||||
|
||||
for hostname := range shared.tailscales {
|
||||
assert.Contains(s.T(), result, hostname)
|
||||
}
|
||||
|
||||
// TODO(juanfont): We have to find out why do we need to wait
|
||||
time.Sleep(100 * time.Second) // Wait for the nodes to receive updates
|
||||
|
||||
sharedIps, err := getIPs(shared.tailscales)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
for hostname, tailscale := range main.tailscales {
|
||||
for peername, peerIPs := range sharedIps {
|
||||
for i, ip := range peerIPs {
|
||||
// We currently cant ping ourselves, so skip that.
|
||||
if peername == hostname {
|
||||
continue
|
||||
}
|
||||
s.T().Run(fmt.Sprintf("%s-%s-%d", hostname, peername, i), func(t *testing.T) {
|
||||
// We are only interested in "direct ping" which means what we
|
||||
// might need a couple of more attempts before reaching the node.
|
||||
command := []string{
|
||||
"tailscale", "ping",
|
||||
"--timeout=15s",
|
||||
"--c=20",
|
||||
"--until-direct=true",
|
||||
ip.String(),
|
||||
}
|
||||
|
||||
fmt.Printf(
|
||||
"Pinging from %s to %s (%s)\n",
|
||||
hostname,
|
||||
peername,
|
||||
ip,
|
||||
)
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
fmt.Printf("Result for %s: %s\n", hostname, result)
|
||||
assert.Contains(t, result, "pong")
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TestTailDrop() {
|
||||
for _, scales := range s.namespaces {
|
||||
ips, err := getIPs(scales.tailscales)
|
||||
assert.Nil(s.T(), err)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
retry := func(times int, sleepInverval time.Duration, doWork func() error) (err error) {
|
||||
for attempts := 0; attempts < times; attempts++ {
|
||||
err = doWork()
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
time.Sleep(sleepInverval)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for hostname, tailscale := range scales.tailscales {
|
||||
command := []string{"touch", fmt.Sprintf("/tmp/file_from_%s", hostname)}
|
||||
_, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
for peername, _ := range ips {
|
||||
if peername == hostname {
|
||||
continue
|
||||
}
|
||||
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
|
||||
command := []string{
|
||||
"tailscale", "file", "cp",
|
||||
fmt.Sprintf("/tmp/file_from_%s", hostname),
|
||||
fmt.Sprintf("%s:", peername),
|
||||
}
|
||||
retry(10, 1*time.Second, func() error {
|
||||
fmt.Printf(
|
||||
"Sending file from %s to %s\n",
|
||||
hostname,
|
||||
peername,
|
||||
)
|
||||
_, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
ExecuteCommandTimeout(60*time.Second),
|
||||
)
|
||||
return err
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for hostname, tailscale := range scales.tailscales {
|
||||
command := []string{
|
||||
"tailscale", "file",
|
||||
"get",
|
||||
"/tmp/",
|
||||
}
|
||||
_, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
for peername, ip := range ips {
|
||||
if peername == hostname {
|
||||
continue
|
||||
}
|
||||
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
|
||||
command := []string{
|
||||
"ls",
|
||||
fmt.Sprintf("/tmp/file_from_%s", peername),
|
||||
}
|
||||
fmt.Printf(
|
||||
"Checking file in %s (%s) from %s (%s)\n",
|
||||
hostname,
|
||||
ips[hostname],
|
||||
peername,
|
||||
ip,
|
||||
)
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
fmt.Printf("Result for %s: %s\n", peername, result)
|
||||
assert.Equal(
|
||||
t,
|
||||
fmt.Sprintf("/tmp/file_from_%s\n", peername),
|
||||
result,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TestPingAllPeersByHostname() {
|
||||
for namespace, scales := range s.namespaces {
|
||||
ips, err := getIPs(scales.tailscales)
|
||||
assert.Nil(s.T(), err)
|
||||
for hostname, tailscale := range scales.tailscales {
|
||||
for peername, _ := range ips {
|
||||
if peername == hostname {
|
||||
continue
|
||||
}
|
||||
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
|
||||
command := []string{
|
||||
"tailscale", "ping",
|
||||
"--timeout=10s",
|
||||
"--c=20",
|
||||
"--until-direct=true",
|
||||
fmt.Sprintf("%s.%s.headscale.net", peername, namespace),
|
||||
}
|
||||
|
||||
fmt.Printf(
|
||||
"Pinging using hostname from %s to %s\n",
|
||||
hostname,
|
||||
peername,
|
||||
)
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
fmt.Printf("Result for %s: %s\n", hostname, result)
|
||||
assert.Contains(t, result, "pong")
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationTestSuite) TestMagicDNS() {
|
||||
for namespace, scales := range s.namespaces {
|
||||
ips, err := getIPs(scales.tailscales)
|
||||
assert.Nil(s.T(), err)
|
||||
for hostname, tailscale := range scales.tailscales {
|
||||
for peername, ips := range ips {
|
||||
if peername == hostname {
|
||||
continue
|
||||
}
|
||||
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
|
||||
command := []string{
|
||||
"tailscale", "ip",
|
||||
fmt.Sprintf("%s.%s.headscale.net", peername, namespace),
|
||||
}
|
||||
|
||||
fmt.Printf(
|
||||
"Resolving name %s from %s\n",
|
||||
peername,
|
||||
hostname,
|
||||
)
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
fmt.Printf("Result for %s: %s\n", hostname, result)
|
||||
|
||||
for _, ip := range ips {
|
||||
assert.Contains(t, result, ip.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getIPs(tailscales map[string]dockertest.Resource) (map[string][]netaddr.IP, error) {
|
||||
ips := make(map[string][]netaddr.IP)
|
||||
for hostname, tailscale := range tailscales {
|
||||
command := []string{"tailscale", "ip"}
|
||||
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, address := range strings.Split(result, "\n") {
|
||||
address = strings.TrimSuffix(address, "\n")
|
||||
if len(address) < 1 {
|
||||
continue
|
||||
}
|
||||
ip, err := netaddr.ParseIP(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ips[hostname] = append(ips[hostname], ip)
|
||||
}
|
||||
}
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
func getAPIURLs(
|
||||
tailscales map[string]dockertest.Resource,
|
||||
) (map[netaddr.IP]string, error) {
|
||||
fts := make(map[netaddr.IP]string)
|
||||
for _, tailscale := range tailscales {
|
||||
command := []string{
|
||||
"curl",
|
||||
"--unix-socket",
|
||||
"/run/tailscale/tailscaled.sock",
|
||||
"http://localhost/localapi/v0/file-targets",
|
||||
}
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pft []apitype.FileTarget
|
||||
if err := json.Unmarshal([]byte(result), &pft); err != nil {
|
||||
return nil, fmt.Errorf("invalid JSON: %w", err)
|
||||
}
|
||||
for _, ft := range pft {
|
||||
n := ft.Node
|
||||
for _, a := range n.Addresses { // just add all the addresses
|
||||
if _, ok := fts[a.IP()]; !ok {
|
||||
if ft.PeerAPIURL == "" {
|
||||
return nil, errors.New("api url is empty")
|
||||
}
|
||||
fts[a.IP()] = ft.PeerAPIURL
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fts, nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user