mirror of
https://github.com/juanfont/headscale.git
synced 2025-08-15 09:27:45 +00:00
Compare commits
261 Commits
v0.22.0-al
...
v0.23.0-al
Author | SHA1 | Date | |
---|---|---|---|
![]() |
c4c8cfe5ea | ||
![]() |
40953727cf | ||
![]() |
d4af0c386c | ||
![]() |
2ce23df45a | ||
![]() |
85cef84e17 | ||
![]() |
7d62e9fce5 | ||
![]() |
60f0cf908c | ||
![]() |
1704977e76 | ||
![]() |
bf4fd078fc | ||
![]() |
58c94d2bd3 | ||
![]() |
dd693c444c | ||
![]() |
2858ab402a | ||
![]() |
7bea885b8c | ||
![]() |
84de1854f8 | ||
![]() |
6efc50789d | ||
![]() |
0fcfd643fa | ||
![]() |
bdf54e802e | ||
![]() |
dbe32829a1 | ||
![]() |
2fb7428ba9 | ||
![]() |
8a8e25a8d1 | ||
![]() |
4d9021047f | ||
![]() |
74ff14eb30 | ||
![]() |
c1d4fef194 | ||
![]() |
785b150467 | ||
![]() |
20bf3777d3 | ||
![]() |
c29eddded3 | ||
![]() |
b477e5f366 | ||
![]() |
95004de5e8 | ||
![]() |
ef26f58085 | ||
![]() |
1d3eae8861 | ||
![]() |
a244eabd03 | ||
![]() |
e15a08326c | ||
![]() |
c9966ba6c2 | ||
![]() |
7a920ee701 | ||
![]() |
8b2c31aabc | ||
![]() |
5dbd59ca55 | ||
![]() |
3f162c212c | ||
![]() |
384ca03208 | ||
![]() |
f581d4d9c0 | ||
![]() |
b60ee9db54 | ||
![]() |
c73e8476b9 | ||
![]() |
6055d0b397 | ||
![]() |
1904d79e90 | ||
![]() |
1b01b9e14f | ||
![]() |
5717c8255a | ||
![]() |
c42f25bd72 | ||
![]() |
82c64f682c | ||
![]() |
7afc2fd180 | ||
![]() |
5109af94a3 | ||
![]() |
905fdaa409 | ||
![]() |
0333e97630 | ||
![]() |
e3553aae50 | ||
![]() |
47405931c6 | ||
![]() |
c4beb0b8af | ||
![]() |
3f2b238a46 | ||
![]() |
68a8ecee7a | ||
![]() |
c3257e2146 | ||
![]() |
9047c09871 | ||
![]() |
91bb85e7d2 | ||
![]() |
94b30abf56 | ||
![]() |
00e7550e76 | ||
![]() |
83769ba715 | ||
![]() |
cbf57e27a7 | ||
![]() |
4ea12f472a | ||
![]() |
b4210e2c90 | ||
![]() |
a369d57a17 | ||
![]() |
1e22f17f36 | ||
![]() |
65376e2842 | ||
![]() |
7e8bf4bfe5 | ||
![]() |
3b103280ef | ||
![]() |
a592ae56b4 | ||
![]() |
054b06d45d | ||
![]() |
55ca078f22 | ||
![]() |
6049ec758c | ||
![]() |
ac910fd44c | ||
![]() |
9982ae5f09 | ||
![]() |
cf8ffea154 | ||
![]() |
790bbe5e8d | ||
![]() |
2c8fc9b061 | ||
![]() |
b359939812 | ||
![]() |
f65f4eca35 | ||
![]() |
0153e26392 | ||
![]() |
6c9c55774b | ||
![]() |
2f558bee80 | ||
![]() |
4c608a4b58 | ||
![]() |
f13cf64578 | ||
![]() |
85e92db505 | ||
![]() |
a59aab2081 | ||
![]() |
b918aa03fc | ||
![]() |
ed4e19996b | ||
![]() |
c0fd06e3f5 | ||
![]() |
2af71c9e31 | ||
![]() |
42b7f8f65a | ||
![]() |
48c7d763d5 | ||
![]() |
d0d6438337 | ||
![]() |
fb4ed95ff6 | ||
![]() |
01b85e5232 | ||
![]() |
64c0a6523f | ||
![]() |
84fbca97f7 | ||
![]() |
56cf4b082e | ||
![]() |
6cd0f77511 | ||
![]() |
b27e8ab5a1 | ||
![]() |
0030af3fa4 | ||
![]() |
096ac31bb3 | ||
![]() |
c957f893bd | ||
![]() |
217ccd6540 | ||
![]() |
3bef63bb80 | ||
![]() |
591ff8d347 | ||
![]() |
14f8c1ba34 | ||
![]() |
ca4a48afbb | ||
![]() |
9ccf87c566 | ||
![]() |
4c12c02e71 | ||
![]() |
2434d76ade | ||
![]() |
432e975a7f | ||
![]() |
387aa03adb | ||
![]() |
3b0749a320 | ||
![]() |
a8079a2096 | ||
![]() |
593b3ad981 | ||
![]() |
e90a669951 | ||
![]() |
9c5301ee2e | ||
![]() |
13a7285658 | ||
![]() |
e55fe0671a | ||
![]() |
e0ba325b3b | ||
![]() |
eff529f2c5 | ||
![]() |
a1a3ff4ba8 | ||
![]() |
78268d78a0 | ||
![]() |
f73172fb21 | ||
![]() |
b7c6e0ec88 | ||
![]() |
2d87085cbc | ||
![]() |
13fe4ec91b | ||
![]() |
53a9e28faf | ||
![]() |
4b65cf48d0 | ||
![]() |
66ff1fcd40 | ||
![]() |
056d3a81c5 | ||
![]() |
7edc953d35 | ||
![]() |
12a04f9459 | ||
![]() |
1766e6b5df | ||
![]() |
f8a58aa15b | ||
![]() |
b4a4d0f760 | ||
![]() |
63caf9a222 | ||
![]() |
47255d267e | ||
![]() |
e3acc95859 | ||
![]() |
fb203a2e45 | ||
![]() |
6567af7730 | ||
![]() |
23a3adf8d2 | ||
![]() |
665a3cc666 | ||
![]() |
fe75b71620 | ||
![]() |
19dc0ac702 | ||
![]() |
155cc072f7 | ||
![]() |
e2c08db3b5 | ||
![]() |
fcdc7a6f7d | ||
![]() |
88ca2501d1 | ||
![]() |
2675ff4b94 | ||
![]() |
717abe89c1 | ||
![]() |
161243c787 | ||
![]() |
9c425a1c08 | ||
![]() |
db6cf4ac0a | ||
![]() |
35770278f7 | ||
![]() |
36c9b5ce74 | ||
![]() |
0562260fe0 | ||
![]() |
c1218ad3c2 | ||
![]() |
d36336a572 | ||
![]() |
80ea87c032 | ||
![]() |
8c4c4c8633 | ||
![]() |
2289a2acbf | ||
![]() |
c72401a99b | ||
![]() |
725bbd7408 | ||
![]() |
084d1d5d6e | ||
![]() |
f9f6e1557a | ||
![]() |
5bad48a24e | ||
![]() |
bce8427423 | ||
![]() |
f7f472ae07 | ||
![]() |
699655a93f | ||
![]() |
feb15365b5 | ||
![]() |
14e29a7bee | ||
![]() |
b01f1f1867 | ||
![]() |
c027ef0f6c | ||
![]() |
db97a7ab10 | ||
![]() |
252342a0a5 | ||
![]() |
cdf3c47d63 | ||
![]() |
61a2915f17 | ||
![]() |
a16f0c9f60 | ||
![]() |
52ad138c32 | ||
![]() |
d2413d0a2f | ||
![]() |
51dc0d5784 | ||
![]() |
2d365c8c9c | ||
![]() |
f2c1d1b8f9 | ||
![]() |
2d6356fa13 | ||
![]() |
3bfc598ccc | ||
![]() |
3683d3e82f | ||
![]() |
4a7921ead5 | ||
![]() |
22e397e0b6 | ||
![]() |
c7db99d6ca | ||
![]() |
f73354b4f4 | ||
![]() |
4c8f8c6a1c | ||
![]() |
997e93455d | ||
![]() |
9f381256c4 | ||
![]() |
f60c5a1398 | ||
![]() |
5706f84cb0 | ||
![]() |
9478c288f6 | ||
![]() |
6043ec87cf | ||
![]() |
dcf2439c61 | ||
![]() |
ba45d7dbd3 | ||
![]() |
bab4e14828 | ||
![]() |
526e568e1e | ||
![]() |
02ab0df2de | ||
![]() |
7338775de7 | ||
![]() |
00c514608e | ||
![]() |
6c5723a463 | ||
![]() |
57fd5cf310 | ||
![]() |
f113cc7846 | ||
![]() |
ca54fb9f56 | ||
![]() |
735b185e7f | ||
![]() |
1a7ae11697 | ||
![]() |
644be822d5 | ||
![]() |
56b63c6e10 | ||
![]() |
ccedf276ab | ||
![]() |
10320a5f1f | ||
![]() |
ecd62fb785 | ||
![]() |
0d24e878d0 | ||
![]() |
889d5a1b29 | ||
![]() |
1700a747f6 | ||
![]() |
200e3b88cc | ||
![]() |
5bbbe437df | ||
![]() |
6de53e2f8d | ||
![]() |
b23a9153df | ||
![]() |
80772033ee | ||
![]() |
a2b760834f | ||
![]() |
493bcfcf18 | ||
![]() |
df72508089 | ||
![]() |
0f8d8fc2d8 | ||
![]() |
744e5a11b6 | ||
![]() |
3ea1750ea0 | ||
![]() |
a45777d22e | ||
![]() |
56dd734300 | ||
![]() |
d0113732fe | ||
![]() |
6215eb6471 | ||
![]() |
1d2b4bca8a | ||
![]() |
96f9680afd | ||
![]() |
b465592c07 | ||
![]() |
991ff25362 | ||
![]() |
eacd687dbf | ||
![]() |
549f5a164d | ||
![]() |
bb07aec82c | ||
![]() |
a5afe4bd06 | ||
![]() |
a71cc81fe7 | ||
![]() |
679305c3e4 | ||
![]() |
c0680f34f1 | ||
![]() |
64ebe6b0c8 | ||
![]() |
e6b26499f7 | ||
![]() |
977eb1dee3 | ||
![]() |
b2e2b02210 | ||
![]() |
2abff4bb08 | ||
![]() |
54c00645d1 | ||
![]() |
cad5ce0ebd | ||
![]() |
b12a167fa2 | ||
![]() |
667295e15e | ||
![]() |
bea52678e3 | ||
![]() |
307cfc3304 | ||
![]() |
5e74ca9414 | ||
![]() |
9836b097a4 |
49
.github/ISSUE_TEMPLATE/bug_report.md
vendored
49
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -6,19 +6,24 @@ labels: ["bug"]
|
||||
assignees: ""
|
||||
---
|
||||
|
||||
<!-- Headscale is a multinational community across the globe. Our common language is English. Please consider raising the bug report in this language. -->
|
||||
<!--
|
||||
Before posting a bug report, discuss the behaviour you are expecting with the Discord community
|
||||
to make sure that it is truly a bug.
|
||||
The issue tracker is not the place to ask for support or how to set up Headscale.
|
||||
|
||||
**Bug description**
|
||||
Bug reports without the sufficient information will be closed.
|
||||
|
||||
Headscale is a multinational community across the globe. Our language is English.
|
||||
All bug reports needs to be in English.
|
||||
-->
|
||||
|
||||
## Bug description
|
||||
|
||||
<!-- A clear and concise description of what the bug is. Describe the expected bahavior
|
||||
and how it is currently different. If you are unsure if it is a bug, consider discussing
|
||||
it on our Discord server first. -->
|
||||
|
||||
**To Reproduce**
|
||||
|
||||
<!-- Steps to reproduce the behavior. -->
|
||||
|
||||
**Context info**
|
||||
## Environment
|
||||
|
||||
<!-- Please add relevant information about your system. For example:
|
||||
- Version of headscale used
|
||||
@@ -28,3 +33,33 @@ assignees: ""
|
||||
- The relevant config parameters you used
|
||||
- Log output
|
||||
-->
|
||||
|
||||
- OS:
|
||||
- Headscale version:
|
||||
- Tailscale version:
|
||||
|
||||
<!--
|
||||
We do not support running Headscale in a container nor behind a (reverse) proxy.
|
||||
If either of these are true for your environment, ask the community in Discord
|
||||
instead of filing a bug report.
|
||||
-->
|
||||
|
||||
- [ ] Headscale is behind a (reverse) proxy
|
||||
- [ ] Headscale runs in a container
|
||||
|
||||
## To Reproduce
|
||||
|
||||
<!-- Steps to reproduce the behavior. -->
|
||||
|
||||
## Logs and attachments
|
||||
|
||||
<!-- Please attach files with:
|
||||
- Client netmap dump (see below)
|
||||
- ACL configuration
|
||||
- Headscale configuration
|
||||
|
||||
Dump the netmap of tailscale clients:
|
||||
`tailscale debug netmap > DESCRIPTIVE_NAME.json`
|
||||
|
||||
Please provide information describing the netmap, which client, which headscale version etc.
|
||||
-->
|
||||
|
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -6,12 +6,21 @@ labels: ["enhancement"]
|
||||
assignees: ""
|
||||
---
|
||||
|
||||
<!-- Headscale is a multinational community across the globe. Our common language is English. Please consider raising the feature request in this language. -->
|
||||
<!--
|
||||
We typically have a clear roadmap for what we want to improve and reserve the right
|
||||
to close feature requests that does not fit in the roadmap, or fit with the scope
|
||||
of the project, or we actually want to implement ourselves.
|
||||
|
||||
**Feature request**
|
||||
Headscale is a multinational community across the globe. Our language is English.
|
||||
All bug reports needs to be in English.
|
||||
-->
|
||||
|
||||
<!-- A clear and precise description of what new or changed feature you want. -->
|
||||
## Why
|
||||
|
||||
<!-- Please include the reason, why you would need the feature. E.g. what problem
|
||||
<!-- Include the reason, why you would need the feature. E.g. what problem
|
||||
does it solve? Or which workflow is currently frustrating and will be improved by
|
||||
this? -->
|
||||
|
||||
## Description
|
||||
|
||||
<!-- A clear and precise description of what new or changed feature you want. -->
|
||||
|
30
.github/ISSUE_TEMPLATE/other_issue.md
vendored
30
.github/ISSUE_TEMPLATE/other_issue.md
vendored
@@ -1,30 +0,0 @@
|
||||
---
|
||||
name: "Other issue"
|
||||
about: "Report a different issue"
|
||||
title: ""
|
||||
labels: ["bug"]
|
||||
assignees: ""
|
||||
---
|
||||
|
||||
<!-- Headscale is a multinational community across the globe. Our common language is English. Please consider raising the issue in this language. -->
|
||||
|
||||
<!-- If you have a question, please consider using our Discord for asking questions -->
|
||||
|
||||
**Issue description**
|
||||
|
||||
<!-- Please add your issue description. -->
|
||||
|
||||
**To Reproduce**
|
||||
|
||||
<!-- Steps to reproduce the behavior. -->
|
||||
|
||||
**Context info**
|
||||
|
||||
<!-- Please add relevant information about your system. For example:
|
||||
- Version of headscale used
|
||||
- Version of tailscale client
|
||||
- OS (e.g. Linux, Mac, Cygwin, WSL, etc.) and version
|
||||
- Kernel version
|
||||
- The relevant config parameters you used
|
||||
- Log output
|
||||
-->
|
12
.github/pull_request_template.md
vendored
12
.github/pull_request_template.md
vendored
@@ -1,3 +1,15 @@
|
||||
<!--
|
||||
Headscale is "Open Source, acknowledged contribution", this means that any
|
||||
contribution will have to be discussed with the Maintainers before being submitted.
|
||||
|
||||
This model has been chosen to reduce the risk of burnout by limiting the
|
||||
maintenance overhead of reviewing and validating third-party code.
|
||||
|
||||
Headscale is open to code contributions for bug fixes without discussion.
|
||||
|
||||
If you find mistakes in the documentation, please submit a fix to the documentation.
|
||||
-->
|
||||
|
||||
<!-- Please tick if the following things apply. You… -->
|
||||
|
||||
- [ ] read the [CONTRIBUTING guidelines](README.md#contributing)
|
||||
|
26
.github/renovate.json
vendored
26
.github/renovate.json
vendored
@@ -6,31 +6,27 @@
|
||||
"onboarding": false,
|
||||
"extends": ["config:base", ":rebaseStalePrs"],
|
||||
"ignorePresets": [":prHourlyLimit2"],
|
||||
"enabledManagers": ["dockerfile", "gomod", "github-actions","regex" ],
|
||||
"enabledManagers": ["dockerfile", "gomod", "github-actions", "regex"],
|
||||
"includeForks": true,
|
||||
"repositories": ["juanfont/headscale"],
|
||||
"platform": "github",
|
||||
"packageRules": [
|
||||
{
|
||||
"matchDatasources": ["go"],
|
||||
"groupName": "Go modules",
|
||||
"groupSlug": "gomod",
|
||||
"separateMajorMinor": false
|
||||
"matchDatasources": ["go"],
|
||||
"groupName": "Go modules",
|
||||
"groupSlug": "gomod",
|
||||
"separateMajorMinor": false
|
||||
},
|
||||
{
|
||||
"matchDatasources": ["docker"],
|
||||
"groupName": "Dockerfiles",
|
||||
"groupSlug": "dockerfiles"
|
||||
}
|
||||
"matchDatasources": ["docker"],
|
||||
"groupName": "Dockerfiles",
|
||||
"groupSlug": "dockerfiles"
|
||||
}
|
||||
],
|
||||
"regexManagers": [
|
||||
{
|
||||
"fileMatch": [
|
||||
".github/workflows/.*.yml$"
|
||||
],
|
||||
"matchStrings": [
|
||||
"\\s*go-version:\\s*\"?(?<currentValue>.*?)\"?\\n"
|
||||
],
|
||||
"fileMatch": [".github/workflows/.*.yml$"],
|
||||
"matchStrings": ["\\s*go-version:\\s*\"?(?<currentValue>.*?)\"?\\n"],
|
||||
"datasourceTemplate": "golang-version",
|
||||
"depNameTemplate": "actions/go-version"
|
||||
}
|
||||
|
32
.github/workflows/build.yml
vendored
32
.github/workflows/build.yml
vendored
@@ -16,29 +16,29 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v16
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
filters: |
|
||||
files:
|
||||
- '*.nix'
|
||||
- 'go.*'
|
||||
- '**/*.go'
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
|
||||
- name: Run build
|
||||
id: build
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
run: |
|
||||
nix build |& tee build-result
|
||||
BUILD_STATUS="${PIPESTATUS[0]}"
|
||||
@@ -64,8 +64,8 @@ jobs:
|
||||
body: 'Nix build failed with wrong gosum, please update "vendorSha256" (${{ steps.build.outputs.OLD_HASH }}) for the "headscale" package in flake.nix with the new SHA: ${{ steps.build.outputs.NEW_HASH }}'
|
||||
})
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
name: headscale-linux
|
||||
path: result/bin/headscale
|
||||
|
41
.github/workflows/check-tests.yaml
vendored
Normal file
41
.github/workflows/check-tests.yaml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: Check integration tests workflow
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check-tests:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
files:
|
||||
- '*.nix'
|
||||
- 'go.*'
|
||||
- '**/*.go'
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
|
||||
- name: Generate and check integration tests
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
run: |
|
||||
nix develop --command bash -c "cd cmd/gh-action-integration-generator/ && go generate"
|
||||
git diff --exit-code .github/workflows/test-integration.yaml
|
||||
|
||||
- name: Show missing tests
|
||||
if: failure()
|
||||
run: |
|
||||
git diff .github/workflows/test-integration.yaml
|
3
.github/workflows/contributors.yml
vendored
3
.github/workflows/contributors.yml
vendored
@@ -5,11 +5,12 @@ on:
|
||||
branches:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
add-contributors:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Delete upstream contributor branch
|
||||
# Allow continue on failure to account for when the
|
||||
# upstream branch is deleted or does not exist.
|
||||
|
15
.github/workflows/docs.yml
vendored
15
.github/workflows/docs.yml
vendored
@@ -1,4 +1,5 @@
|
||||
name: Build documentation
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
@@ -15,7 +16,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: Install python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
@@ -26,20 +27,26 @@ jobs:
|
||||
key: ${{ github.ref }}
|
||||
path: .cache
|
||||
- name: Setup dependencies
|
||||
run: pip install mkdocs-material pillow cairosvg mkdocs-minify-plugin
|
||||
run: pip install -r docs/requirements.txt
|
||||
- name: Build docs
|
||||
run: mkdocs build --strict
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v1
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
with:
|
||||
path: ./site
|
||||
|
||||
deploy:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
permissions:
|
||||
pages: write
|
||||
id-token: write
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
steps:
|
||||
- name: Configure Pages
|
||||
uses: actions/configure-pages@v4
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v1
|
||||
uses: actions/deploy-pages@v4
|
||||
|
5
.github/workflows/gh-actions-updater.yaml
vendored
5
.github/workflows/gh-actions-updater.yaml
vendored
@@ -1,6 +1,5 @@
|
||||
name: GitHub Actions Version Updater
|
||||
|
||||
# Controls when the action will run.
|
||||
on:
|
||||
schedule:
|
||||
# Automatically run on every Sunday
|
||||
@@ -11,13 +10,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
# [Required] Access token with `workflow` scope.
|
||||
token: ${{ secrets.WORKFLOW_SECRET }}
|
||||
|
||||
- name: Run GitHub Actions Version Updater
|
||||
uses: saadmk11/github-actions-version-updater@v0.7.1
|
||||
uses: saadmk11/github-actions-version-updater@v0.8.1
|
||||
with:
|
||||
# [Required] Access token with `workflow` scope.
|
||||
token: ${{ secrets.WORKFLOW_SECRET }}
|
||||
|
89
.github/workflows/lint.yml
vendored
89
.github/workflows/lint.yml
vendored
@@ -1,7 +1,6 @@
|
||||
---
|
||||
name: Lint
|
||||
|
||||
on: [push, pull_request]
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
@@ -11,70 +10,66 @@ jobs:
|
||||
golangci-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
filters: |
|
||||
files:
|
||||
- '*.nix'
|
||||
- 'go.*'
|
||||
- '**/*.go'
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
|
||||
- name: golangci-lint
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
version: v1.51.2
|
||||
|
||||
# Only block PRs on new problems.
|
||||
# If this is not enabled, we will end up having PRs
|
||||
# blocked because new linters has appared and other
|
||||
# parts of the code is affected.
|
||||
only-new-issues: true
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
run: nix develop --command -- golangci-lint run --new-from-rev=${{github.event.pull_request.base.sha}} --out-format=github-actions .
|
||||
|
||||
prettier-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v14.1
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
**/*.md
|
||||
**/*.yml
|
||||
**/*.yaml
|
||||
**/*.ts
|
||||
**/*.js
|
||||
**/*.sass
|
||||
**/*.css
|
||||
**/*.scss
|
||||
**/*.html
|
||||
filters: |
|
||||
files:
|
||||
- '*.nix'
|
||||
- '**/*.md'
|
||||
- '**/*.yml'
|
||||
- '**/*.yaml'
|
||||
- '**/*.ts'
|
||||
- '**/*.js'
|
||||
- '**/*.sass'
|
||||
- '**/*.css'
|
||||
- '**/*.scss'
|
||||
- '**/*.html'
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
|
||||
- name: Prettify code
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
uses: creyD/prettier_action@v4.3
|
||||
with:
|
||||
prettier_options: >-
|
||||
--check **/*.{ts,js,md,yaml,yml,sass,css,scss,html}
|
||||
only_changed: false
|
||||
dry: true
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
run: nix develop --command -- prettier --no-error-on-unmatched-pattern --ignore-unknown --check **/*.{ts,js,md,yaml,yml,sass,css,scss,html}
|
||||
|
||||
proto-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: bufbuild/buf-setup-action@v1.7.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: "proto"
|
||||
- uses: actions/checkout@v4
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
|
||||
- name: Buf lint
|
||||
run: nix develop --command -- buf lint proto
|
||||
|
138
.github/workflows/release-docker.yml
vendored
138
.github/workflows/release-docker.yml
vendored
@@ -1,138 +0,0 @@
|
||||
---
|
||||
name: Release Docker
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "*" # triggers only if push new tag version
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
docker-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Set up QEMU for multiple platforms
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: arm64,amd64
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/headscale
|
||||
ghcr.io/${{ github.repository_owner }}/headscale
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=sha
|
||||
type=raw,value=develop
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-new
|
||||
build-args: |
|
||||
VERSION=${{ steps.meta.outputs.version }}
|
||||
- name: Prepare cache for next build
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache
|
||||
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
|
||||
|
||||
docker-debug-release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Set up QEMU for multiple platforms
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: arm64,amd64
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: /tmp/.buildx-cache-debug
|
||||
key: ${{ runner.os }}-buildx-debug-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-debug-
|
||||
- name: Docker meta
|
||||
id: meta-debug
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/headscale
|
||||
ghcr.io/${{ github.repository_owner }}/headscale
|
||||
flavor: |
|
||||
suffix=-debug,onlatest=true
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=sha
|
||||
type=raw,value=develop
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
file: Dockerfile.debug
|
||||
tags: ${{ steps.meta-debug.outputs.tags }}
|
||||
labels: ${{ steps.meta-debug.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
cache-from: type=local,src=/tmp/.buildx-cache-debug
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache-debug-new
|
||||
build-args: |
|
||||
VERSION=${{ steps.meta-debug.outputs.version }}
|
||||
- name: Prepare cache for next build
|
||||
run: |
|
||||
rm -rf /tmp/.buildx-cache-debug
|
||||
mv /tmp/.buildx-cache-debug-new /tmp/.buildx-cache-debug
|
20
.github/workflows/release.yml
vendored
20
.github/workflows/release.yml
vendored
@@ -12,13 +12,27 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: cachix/install-nix-action@v16
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
|
||||
- name: Run goreleaser
|
||||
run: nix develop --command -- goreleaser release --rm-dist
|
||||
run: nix develop --command -- goreleaser release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
23
.github/workflows/stale.yml
vendored
Normal file
23
.github/workflows/stale.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Close inactive issues
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 1 * * *"
|
||||
|
||||
jobs:
|
||||
close-issues:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
with:
|
||||
days-before-issue-stale: 90
|
||||
days-before-issue-close: 7
|
||||
stale-issue-label: "stale"
|
||||
stale-issue-message: "This issue is stale because it has been open for 90 days with no activity."
|
||||
close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale."
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
35
.github/workflows/test-integration-cli.yml
vendored
35
.github/workflows/test-integration-cli.yml
vendored
@@ -1,35 +0,0 @@
|
||||
name: Integration Test CLI
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
integration-test-cli:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Set Swap Space
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 10
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v16
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run CLI integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: nix develop --command -- make test_integration_cli
|
35
.github/workflows/test-integration-derp.yml
vendored
35
.github/workflows/test-integration-derp.yml
vendored
@@ -1,35 +0,0 @@
|
||||
name: Integration Test DERP
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
integration-test-derp:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Set Swap Space
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 10
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v16
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run Embedded DERP server integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: nix develop --command -- make test_integration_derp
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestACLAllowStarDst
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestACLAllowStarDst$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestACLAllowUser80Dst
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestACLAllowUser80Dst$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestACLAllowUserDst
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestACLAllowUserDst$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestACLDenyAllPort80
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestACLDenyAllPort80$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestACLHostsInNetMapTable
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestACLHostsInNetMapTable$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestAuthKeyLogoutAndRelogin
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestAuthKeyLogoutAndRelogin$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestAuthWebFlowAuthenticationPingAll
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestAuthWebFlowAuthenticationPingAll$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestAuthWebFlowLogoutAndRelogin
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestAuthWebFlowLogoutAndRelogin$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestCreateTailscale
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestCreateTailscale$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestEnablingRoutes
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestEnablingRoutes$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestEphemeral
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestEphemeral$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestExpireNode
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestExpireNode$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestHeadscale
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestHeadscale$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestOIDCAuthenticationPingAll
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestOIDCAuthenticationPingAll$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestOIDCExpireNodesBasedOnTokenExpiry
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestOIDCExpireNodesBasedOnTokenExpiry$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestPingAllByHostname
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestPingAllByHostname$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestPingAllByIP
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestPingAllByIP$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestPreAuthKeyCommand
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestPreAuthKeyCommand$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestPreAuthKeyCommandReusableEphemeral
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestPreAuthKeyCommandReusableEphemeral$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestPreAuthKeyCommandWithoutExpiry
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestPreAuthKeyCommandWithoutExpiry$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestResolveMagicDNS
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestResolveMagicDNS$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestSSHIsBlockedInACL
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestSSHIsBlockedInACL$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestSSHMultipleUsersAllToAll
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestSSHMultipleUsersAllToAll$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestSSHNoSSHConfigured
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestSSHNoSSHConfigured$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestSSHOneUserAllToAll
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestSSHOneUserAllToAll$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestSSUserOnlyIsolation
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestSSUserOnlyIsolation$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestTaildrop
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestTaildrop$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestTailscaleNodesJoiningHeadcale
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestTailscaleNodesJoiningHeadcale$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
@@ -1,57 +0,0 @@
|
||||
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - TestUserCommand
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: ${{ env.ACT }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^TestUserCommand$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
114
.github/workflows/test-integration.yaml
vendored
Normal file
114
.github/workflows/test-integration.yaml
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
name: Integration Tests
|
||||
on: [pull_request]
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
jobs:
|
||||
integration-test:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
test:
|
||||
- TestACLHostsInNetMapTable
|
||||
- TestACLAllowUser80Dst
|
||||
- TestACLDenyAllPort80
|
||||
- TestACLAllowUserDst
|
||||
- TestACLAllowStarDst
|
||||
- TestACLNamedHostsCanReachBySubnet
|
||||
- TestACLNamedHostsCanReach
|
||||
- TestACLDevice1CanAccessDevice2
|
||||
- TestOIDCAuthenticationPingAll
|
||||
- TestOIDCExpireNodesBasedOnTokenExpiry
|
||||
- TestAuthWebFlowAuthenticationPingAll
|
||||
- TestAuthWebFlowLogoutAndRelogin
|
||||
- TestUserCommand
|
||||
- TestPreAuthKeyCommand
|
||||
- TestPreAuthKeyCommandWithoutExpiry
|
||||
- TestPreAuthKeyCommandReusableEphemeral
|
||||
- TestApiKeyCommand
|
||||
- TestNodeTagCommand
|
||||
- TestNodeAdvertiseTagNoACLCommand
|
||||
- TestNodeAdvertiseTagWithACLCommand
|
||||
- TestNodeCommand
|
||||
- TestNodeExpireCommand
|
||||
- TestNodeRenameCommand
|
||||
- TestNodeMoveCommand
|
||||
- TestDERPServerScenario
|
||||
- TestPingAllByIP
|
||||
- TestPingAllByIPPublicDERP
|
||||
- TestAuthKeyLogoutAndRelogin
|
||||
- TestEphemeral
|
||||
- TestPingAllByHostname
|
||||
- TestTaildrop
|
||||
- TestResolveMagicDNS
|
||||
- TestExpireNode
|
||||
- TestNodeOnlineStatus
|
||||
- TestPingAllByIPManyUpDown
|
||||
- TestEnablingRoutes
|
||||
- TestHASubnetRouterFailover
|
||||
- TestEnableDisableAutoApprovedRoute
|
||||
- TestSubnetRouteACL
|
||||
- TestHeadscale
|
||||
- TestCreateTailscale
|
||||
- TestTailscaleNodesJoiningHeadcale
|
||||
- TestSSHOneUserToAll
|
||||
- TestSSHMultipleUsersAllToAll
|
||||
- TestSSHNoSSHConfigured
|
||||
- TestSSHIsBlockedInACL
|
||||
- TestSSHUserOnlyIsolation
|
||||
database: [postgres, sqlite]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
filters: |
|
||||
files:
|
||||
- '*.nix'
|
||||
- 'go.*'
|
||||
- '**/*.go'
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: satackey/action-docker-layer-caching@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
continue-on-error: true
|
||||
- name: Run Integration Test
|
||||
uses: Wandalen/wretry.action@master
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
env:
|
||||
USE_POSTGRES: ${{ matrix.database == 'postgres' && '1' || '0' }}
|
||||
with:
|
||||
attempt_limit: 5
|
||||
command: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
--env HEADSCALE_INTEGRATION_POSTGRES=${{env.USE_POSTGRES}} \
|
||||
golang:1 \
|
||||
go run gotest.tools/gotestsum@latest -- ./... \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^${{ matrix.test }}$"
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: always() && steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
name: ${{ matrix.test }}-${{matrix.database}}-logs
|
||||
path: "control_logs/*.log"
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: always() && steps.changed-files.outputs.files == 'true'
|
||||
with:
|
||||
name: ${{ matrix.test }}-${{matrix.database}}-pprof
|
||||
path: "control_logs/*.pprof.tar"
|
25
.github/workflows/test.yml
vendored
25
.github/workflows/test.yml
vendored
@@ -11,24 +11,27 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
uses: dorny/paths-filter@v3
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
filters: |
|
||||
files:
|
||||
- '*.nix'
|
||||
- 'go.*'
|
||||
- '**/*.go'
|
||||
- 'integration_test/'
|
||||
- 'config-example.yaml'
|
||||
|
||||
- uses: cachix/install-nix-action@v16
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
- uses: DeterminateSystems/nix-installer-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
- uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
|
||||
- name: Run tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
if: steps.changed-files.outputs.files == 'true'
|
||||
run: nix develop --check
|
||||
|
18
.github/workflows/update-flake.yml
vendored
Normal file
18
.github/workflows/update-flake.yml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
name: update-flake-lock
|
||||
on:
|
||||
workflow_dispatch: # allows manual triggering
|
||||
schedule:
|
||||
- cron: "0 0 * * 0" # runs weekly on Sunday at 00:00
|
||||
|
||||
jobs:
|
||||
lockfile:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Install Nix
|
||||
uses: DeterminateSystems/nix-installer-action@main
|
||||
- name: Update flake.lock
|
||||
uses: DeterminateSystems/update-flake-lock@main
|
||||
with:
|
||||
pr-title: "Update flake.lock"
|
8
.gitignore
vendored
8
.gitignore
vendored
@@ -1,3 +1,7 @@
|
||||
ignored/
|
||||
tailscale/
|
||||
.vscode/
|
||||
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
@@ -12,7 +16,7 @@
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
vendor/
|
||||
|
||||
dist/
|
||||
/headscale
|
||||
@@ -39,3 +43,5 @@ integration_test/etc/config.dump.yaml
|
||||
# MkDocs
|
||||
.cache
|
||||
/site
|
||||
|
||||
__debug_bin
|
||||
|
@@ -10,6 +10,8 @@ issues:
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- depguard
|
||||
|
||||
- exhaustivestruct
|
||||
- revive
|
||||
- lll
|
||||
@@ -30,6 +32,7 @@ linters:
|
||||
- exhaustruct
|
||||
- nolintlint
|
||||
- musttag # causes issues with imported libs
|
||||
- depguard
|
||||
|
||||
# deprecated
|
||||
- structcheck # replaced by unused
|
||||
|
129
.goreleaser.yml
129
.goreleaser.yml
@@ -1,14 +1,15 @@
|
||||
---
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy -compat=1.20
|
||||
- go mod tidy -compat=1.22
|
||||
- go mod vendor
|
||||
|
||||
release:
|
||||
prerelease: auto
|
||||
|
||||
builds:
|
||||
- id: headscale
|
||||
main: ./cmd/headscale/headscale.go
|
||||
main: ./cmd/headscale
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
@@ -31,19 +32,16 @@ builds:
|
||||
|
||||
archives:
|
||||
- id: golang-cross
|
||||
builds:
|
||||
- darwin-amd64
|
||||
- darwin-arm64
|
||||
- freebsd-amd64
|
||||
- linux-386
|
||||
- linux-amd64
|
||||
- linux-arm64
|
||||
- linux-arm-5
|
||||
- linux-arm-6
|
||||
- linux-arm-7
|
||||
name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
|
||||
name_template: '{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ with .Mips }}_{{ . }}{{ end }}{{ if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }}'
|
||||
format: binary
|
||||
|
||||
source:
|
||||
enabled: true
|
||||
name_template: "{{ .ProjectName }}_{{ .Version }}"
|
||||
format: tar.gz
|
||||
files:
|
||||
- "vendor/"
|
||||
|
||||
nfpms:
|
||||
# Configure nFPM for .deb and .rpm releases
|
||||
#
|
||||
@@ -65,7 +63,6 @@ nfpms:
|
||||
bindir: /usr/bin
|
||||
formats:
|
||||
- deb
|
||||
- rpm
|
||||
contents:
|
||||
- src: ./config-example.yaml
|
||||
dst: /etc/headscale/config.yaml
|
||||
@@ -73,7 +70,7 @@ nfpms:
|
||||
file_info:
|
||||
mode: 0644
|
||||
- src: ./docs/packaging/headscale.systemd.service
|
||||
dst: /etc/systemd/system/headscale.service
|
||||
dst: /usr/lib/systemd/system/headscale.service
|
||||
- dst: /var/lib/headscale
|
||||
type: dir
|
||||
- dst: /var/run/headscale
|
||||
@@ -82,6 +79,108 @@ nfpms:
|
||||
postinstall: ./docs/packaging/postinstall.sh
|
||||
postremove: ./docs/packaging/postremove.sh
|
||||
|
||||
kos:
|
||||
- id: ghcr
|
||||
repository: ghcr.io/juanfont/headscale
|
||||
|
||||
# bare tells KO to only use the repository
|
||||
# for tagging and naming the container.
|
||||
bare: true
|
||||
base_image: gcr.io/distroless/base-debian12
|
||||
build: headscale
|
||||
main: ./cmd/headscale
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/386
|
||||
- linux/arm64
|
||||
- linux/arm/v7
|
||||
tags:
|
||||
- "{{ if not .Prerelease }}latest{{ end }}"
|
||||
- "{{ if not .Prerelease }}{{ .Major }}.{{ .Minor }}.{{ .Patch }}{{ end }}"
|
||||
- "{{ if not .Prerelease }}{{ .Major }}.{{ .Minor }}{{ end }}"
|
||||
- "{{ if not .Prerelease }}{{ .Major }}{{ end }}"
|
||||
- "{{ if not .Prerelease }}v{{ .Major }}.{{ .Minor }}.{{ .Patch }}{{ end }}"
|
||||
- "{{ if not .Prerelease }}v{{ .Major }}.{{ .Minor }}{{ end }}"
|
||||
- "{{ if not .Prerelease }}v{{ .Major }}{{ end }}"
|
||||
- "{{ if not .Prerelease }}stable{{ else }}unstable{{ end }}"
|
||||
- "{{ .Tag }}"
|
||||
- '{{ trimprefix .Tag "v" }}'
|
||||
- "sha-{{ .ShortCommit }}"
|
||||
|
||||
- id: dockerhub
|
||||
build: headscale
|
||||
base_image: gcr.io/distroless/base-debian12
|
||||
repository: headscale/headscale
|
||||
bare: true
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/386
|
||||
- linux/arm64
|
||||
- linux/arm/v7
|
||||
tags:
|
||||
- "{{ if not .Prerelease }}latest{{ end }}"
|
||||
- "{{ if not .Prerelease }}{{ .Major }}.{{ .Minor }}.{{ .Patch }}{{ end }}"
|
||||
- "{{ if not .Prerelease }}{{ .Major }}.{{ .Minor }}{{ end }}"
|
||||
- "{{ if not .Prerelease }}{{ .Major }}{{ end }}"
|
||||
- "{{ if not .Prerelease }}v{{ .Major }}.{{ .Minor }}.{{ .Patch }}{{ end }}"
|
||||
- "{{ if not .Prerelease }}v{{ .Major }}.{{ .Minor }}{{ end }}"
|
||||
- "{{ if not .Prerelease }}v{{ .Major }}{{ end }}"
|
||||
- "{{ if not .Prerelease }}stable{{ else }}unstable{{ end }}"
|
||||
- "{{ .Tag }}"
|
||||
- '{{ trimprefix .Tag "v" }}'
|
||||
- "sha-{{ .ShortCommit }}"
|
||||
|
||||
- id: ghcr-debug
|
||||
repository: ghcr.io/juanfont/headscale
|
||||
bare: true
|
||||
base_image: "debian:12"
|
||||
build: headscale
|
||||
main: ./cmd/headscale
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/386
|
||||
- linux/arm64
|
||||
- linux/arm/v7
|
||||
tags:
|
||||
- "{{ if not .Prerelease }}latest-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}{{ .Major }}.{{ .Minor }}.{{ .Patch }}-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}{{ .Major }}.{{ .Minor }}-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}{{ .Major }}-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}v{{ .Major }}.{{ .Minor }}.{{ .Patch }}-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}v{{ .Major }}.{{ .Minor }}-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}v{{ .Major }}-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}stable{{ else }}unstable-debug{{ end }}"
|
||||
- "{{ .Tag }}-debug"
|
||||
- '{{ trimprefix .Tag "v" }}-debug'
|
||||
- "sha-{{ .ShortCommit }}-debug"
|
||||
|
||||
- id: dockerhub-debug
|
||||
build: headscale
|
||||
base_image: "debian:12"
|
||||
repository: headscale/headscale
|
||||
bare: true
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/386
|
||||
- linux/arm64
|
||||
- linux/arm/v7
|
||||
tags:
|
||||
- "{{ if not .Prerelease }}latest-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}{{ .Major }}.{{ .Minor }}.{{ .Patch }}-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}{{ .Major }}.{{ .Minor }}-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}{{ .Major }}-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}v{{ .Major }}.{{ .Minor }}.{{ .Patch }}-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}v{{ .Major }}.{{ .Minor }}-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}v{{ .Major }}-debug{{ end }}"
|
||||
- "{{ if not .Prerelease }}stable{{ else }}unstable-debug{{ end }}"
|
||||
- "{{ .Tag }}-debug"
|
||||
- '{{ trimprefix .Tag "v" }}-debug'
|
||||
- "sha-{{ .ShortCommit }}-debug"
|
||||
|
||||
checksum:
|
||||
name_template: "checksums.txt"
|
||||
snapshot:
|
||||
|
@@ -1 +1,6 @@
|
||||
.github/workflows/test-integration-v2*
|
||||
docs/dns-records.md
|
||||
docs/running-headscale-container.md
|
||||
docs/running-headscale-linux-manual.md
|
||||
docs/running-headscale-linux.md
|
||||
docs/running-headscale-openbsd.md
|
||||
|
87
CHANGELOG.md
87
CHANGELOG.md
@@ -1,12 +1,95 @@
|
||||
# CHANGELOG
|
||||
|
||||
## 0.22.0 (2023-XX-XX)
|
||||
## 0.23.0 (2023-XX-XX)
|
||||
|
||||
This release is mainly a code reorganisation and refactoring, significantly improving the maintainability of the codebase. This should allow us to improve further and make it easier for the maintainers to keep on top of the project.
|
||||
|
||||
**Please remember to always back up your database between versions**
|
||||
|
||||
#### Here is a short summary of the broad topics of changes:
|
||||
|
||||
Code has been organised into modules, reducing use of global variables/objects, isolating concerns and “putting the right things in the logical place”.
|
||||
|
||||
The new [policy](https://github.com/juanfont/headscale/tree/main/hscontrol/policy) and [mapper](https://github.com/juanfont/headscale/tree/main/hscontrol/mapper) package, containing the ACL/Policy logic and the logic for creating the data served to clients (the network “map”) has been rewritten and improved. This change has allowed us to finish SSH support and add additional tests throughout the code to ensure correctness.
|
||||
|
||||
The [“poller”, or streaming logic](https://github.com/juanfont/headscale/blob/main/hscontrol/poll.go) has been rewritten and instead of keeping track of the latest updates, checking at a fixed interval, it now uses go channels, implemented in our new [notifier](https://github.com/juanfont/headscale/tree/main/hscontrol/notifier) package and it allows us to send updates to connected clients immediately. This should both improve performance and potential latency before a client picks up an update.
|
||||
|
||||
Headscale now supports sending “delta” updates, thanks to the new mapper and poller logic, allowing us to only inform nodes about new nodes, changed nodes and removed nodes. Previously we sent the entire state of the network every time an update was due.
|
||||
|
||||
While we have a pretty good [test harness](https://github.com/search?q=repo%3Ajuanfont%2Fheadscale+path%3A_test.go&type=code) for validating our changes, we have rewritten over [10000 lines of code](https://github.com/juanfont/headscale/compare/b01f1f1867136d9b2d7b1392776eb363b482c525...main) and bugs are expected. We need help testing this release. In addition, while we think the performance should in general be better, there might be regressions in parts of the platform, particularly where we prioritised correctness over speed.
|
||||
|
||||
There are also several bugfixes that has been encountered and fixed as part of implementing these changes, particularly
|
||||
after improving the test harness as part of adopting [#1460](https://github.com/juanfont/headscale/pull/1460).
|
||||
|
||||
### BREAKING
|
||||
|
||||
- Code reorganisation, a lot of code has moved, please review the following PRs accordingly [#1473](https://github.com/juanfont/headscale/pull/1473)
|
||||
- Change the structure of database configuration, see [config-example.yaml](./config-example.yaml) for the new structure. [#1700](https://github.com/juanfont/headscale/pull/1700)
|
||||
- Old structure has been remove and the configuration _must_ be converted.
|
||||
- Adds additional configuration for PostgreSQL for setting max open, idle conection and idle connection lifetime.
|
||||
- API: Machine is now Node [#1553](https://github.com/juanfont/headscale/pull/1553)
|
||||
- Remove support for older Tailscale clients [#1611](https://github.com/juanfont/headscale/pull/1611)
|
||||
- The latest supported client is 1.38
|
||||
- Headscale checks that _at least_ one DERP is defined at start [#1564](https://github.com/juanfont/headscale/pull/1564)
|
||||
- If no DERP is configured, the server will fail to start, this can be because it cannot load the DERPMap from file or url.
|
||||
- Embedded DERP server requires a private key [#1611](https://github.com/juanfont/headscale/pull/1611)
|
||||
- Add a filepath entry to [`derp.server.private_key_path`](https://github.com/juanfont/headscale/blob/b35993981297e18393706b2c963d6db882bba6aa/config-example.yaml#L95)
|
||||
- Docker images are now built with goreleaser (ko) [#1716](https://github.com/juanfont/headscale/pull/1716) [#1763](https://github.com/juanfont/headscale/pull/1763)
|
||||
- Entrypoint of container image has changed from shell to headscale, require change from `headscale serve` to `serve`
|
||||
- `/var/lib/headscale` and `/var/run/headscale` is no longer created automatically, see [container docs](./docs/running-headscale-container.md)
|
||||
- Prefixes are now defined per v4 and v6 range. [#1756](https://github.com/juanfont/headscale/pull/1756)
|
||||
- `ip_prefixes` option is now `prefixes.v4` and `prefixes.v6`
|
||||
- `prefixes.allocation` can be set to assign IPs at `sequential` or `random`. [#1869](https://github.com/juanfont/headscale/pull/1869)
|
||||
|
||||
### Changes
|
||||
|
||||
- Add `.deb` and `.rpm` packages to release process [#1297](https://github.com/juanfont/headscale/pull/1297)
|
||||
- Use versioned migrations [#1644](https://github.com/juanfont/headscale/pull/1644)
|
||||
- Make the OIDC callback page better [#1484](https://github.com/juanfont/headscale/pull/1484)
|
||||
- SSH support [#1487](https://github.com/juanfont/headscale/pull/1487)
|
||||
- State management has been improved [#1492](https://github.com/juanfont/headscale/pull/1492)
|
||||
- Use error group handling to ensure tests actually pass [#1535](https://github.com/juanfont/headscale/pull/1535) based on [#1460](https://github.com/juanfont/headscale/pull/1460)
|
||||
- Fix hang on SIGTERM [#1492](https://github.com/juanfont/headscale/pull/1492) taken from [#1480](https://github.com/juanfont/headscale/pull/1480)
|
||||
- Send logs to stderr by default [#1524](https://github.com/juanfont/headscale/pull/1524)
|
||||
- Fix [TS-2023-006](https://tailscale.com/security-bulletins/#ts-2023-006) security UPnP issue [#1563](https://github.com/juanfont/headscale/pull/1563)
|
||||
- Turn off gRPC logging [#1640](https://github.com/juanfont/headscale/pull/1640) fixes [#1259](https://github.com/juanfont/headscale/issues/1259)
|
||||
- Added the possibility to manually create a DERP-map entry which can be customized, instead of automatically creating it. [#1565](https://github.com/juanfont/headscale/pull/1565)
|
||||
- Add support for deleting api keys [#1702](https://github.com/juanfont/headscale/pull/1702)
|
||||
- Add command to backfill IP addresses for nodes missing IPs from configured prefixes. [#1869](https://github.com/juanfont/headscale/pull/1869)
|
||||
- Log available update as warning [#1877](https://github.com/juanfont/headscale/pull/1877)
|
||||
|
||||
## 0.22.3 (2023-05-12)
|
||||
|
||||
### Changes
|
||||
|
||||
- Added missing ca-certificates in Docker image [#1463](https://github.com/juanfont/headscale/pull/1463)
|
||||
|
||||
## 0.22.2 (2023-05-10)
|
||||
|
||||
### Changes
|
||||
|
||||
- Add environment flags to enable pprof (profiling) [#1382](https://github.com/juanfont/headscale/pull/1382)
|
||||
- Profiles are continously generated in our integration tests.
|
||||
- Fix systemd service file location in `.deb` packages [#1391](https://github.com/juanfont/headscale/pull/1391)
|
||||
- Improvements on Noise implementation [#1379](https://github.com/juanfont/headscale/pull/1379)
|
||||
- Replace node filter logic, ensuring nodes with access can see eachother [#1381](https://github.com/juanfont/headscale/pull/1381)
|
||||
- Disable (or delete) both exit routes at the same time [#1428](https://github.com/juanfont/headscale/pull/1428)
|
||||
- Ditch distroless for Docker image, create default socket dir in `/var/run/headscale` [#1450](https://github.com/juanfont/headscale/pull/1450)
|
||||
|
||||
## 0.22.1 (2023-04-20)
|
||||
|
||||
### Changes
|
||||
|
||||
- Fix issue where systemd could not bind to port 80 [#1365](https://github.com/juanfont/headscale/pull/1365)
|
||||
|
||||
## 0.22.0 (2023-04-20)
|
||||
|
||||
### Changes
|
||||
|
||||
- Add `.deb` packages to release process [#1297](https://github.com/juanfont/headscale/pull/1297)
|
||||
- Update and simplify the documentation to use new `.deb` packages [#1349](https://github.com/juanfont/headscale/pull/1349)
|
||||
- Add 32-bit Arm platforms to release process [#1297](https://github.com/juanfont/headscale/pull/1297)
|
||||
- Fix longstanding bug that would prevent "\*" from working properly in ACLs (issue [#699](https://github.com/juanfont/headscale/issues/699)) [#1279](https://github.com/juanfont/headscale/pull/1279)
|
||||
- Fix issue where IPv6 could not be used in, or while using ACLs (part of [#809](https://github.com/juanfont/headscale/issues/809)) [#1339](https://github.com/juanfont/headscale/pull/1339)
|
||||
- Target Go 1.20 and Tailscale 1.38 for Headscale [#1323](https://github.com/juanfont/headscale/pull/1323)
|
||||
|
||||
## 0.21.0 (2023-03-20)
|
||||
|
23
Dockerfile
23
Dockerfile
@@ -1,23 +0,0 @@
|
||||
# Builder image
|
||||
FROM docker.io/golang:1.20-bullseye AS build
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
|
||||
COPY go.mod go.sum /go/src/headscale/
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -tags ts2019 -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN strip /go/bin/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
# Production image
|
||||
FROM gcr.io/distroless/base-debian11
|
||||
|
||||
COPY --from=build /go/bin/headscale /bin/headscale
|
||||
ENV TZ UTC
|
||||
|
||||
EXPOSE 8080/tcp
|
||||
CMD ["headscale"]
|
@@ -1,5 +1,8 @@
|
||||
# Builder image
|
||||
FROM docker.io/golang:1.20-bullseye AS build
|
||||
# This Dockerfile and the images produced are for testing headscale,
|
||||
# and are in no way endorsed by Headscale's maintainers as an
|
||||
# official nor supported release or distribution.
|
||||
|
||||
FROM docker.io/golang:1.22-bookworm AS build
|
||||
ARG VERSION=dev
|
||||
ENV GOPATH /go
|
||||
WORKDIR /go/src/headscale
|
||||
@@ -9,15 +12,21 @@ RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -tags ts2019 -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale
|
||||
RUN test -e /go/bin/headscale
|
||||
|
||||
# Debug image
|
||||
FROM docker.io/golang:1.20.0-bullseye
|
||||
FROM docker.io/golang:1.22-bookworm
|
||||
|
||||
COPY --from=build /go/bin/headscale /bin/headscale
|
||||
ENV TZ UTC
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install --no-install-recommends --yes less jq \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& apt-get clean
|
||||
RUN mkdir -p /var/run/headscale
|
||||
|
||||
# Need to reset the entrypoint or everything will run as a busybox script
|
||||
ENTRYPOINT []
|
||||
EXPOSE 8080/tcp
|
||||
|
@@ -1,19 +0,0 @@
|
||||
FROM ubuntu:latest
|
||||
|
||||
ARG TAILSCALE_VERSION=*
|
||||
ARG TAILSCALE_CHANNEL=stable
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y gnupg curl ssh \
|
||||
&& curl -fsSL https://pkgs.tailscale.com/${TAILSCALE_CHANNEL}/ubuntu/focal.gpg | apt-key add - \
|
||||
&& curl -fsSL https://pkgs.tailscale.com/${TAILSCALE_CHANNEL}/ubuntu/focal.list | tee /etc/apt/sources.list.d/tailscale.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y ca-certificates tailscale=${TAILSCALE_VERSION} dnsutils \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN adduser --shell=/bin/bash ssh-it-user
|
||||
|
||||
ADD integration_test/etc_embedded_derp/tls/server.crt /usr/local/share/ca-certificates/
|
||||
RUN chmod 644 /usr/local/share/ca-certificates/server.crt
|
||||
|
||||
RUN update-ca-certificates
|
@@ -1,7 +1,11 @@
|
||||
# This Dockerfile and the images produced are for testing headscale,
|
||||
# and are in no way endorsed by Headscale's maintainers as an
|
||||
# official nor supported release or distribution.
|
||||
|
||||
FROM golang:latest
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y ca-certificates dnsutils git iptables ssh \
|
||||
&& apt-get install -y dnsutils git iptables ssh ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN useradd --shell=/bin/bash --create-home ssh-it-user
|
||||
@@ -10,15 +14,8 @@ RUN git clone https://github.com/tailscale/tailscale.git
|
||||
|
||||
WORKDIR /go/tailscale
|
||||
|
||||
RUN git checkout main
|
||||
|
||||
RUN sh build_dist.sh tailscale.com/cmd/tailscale
|
||||
RUN sh build_dist.sh tailscale.com/cmd/tailscaled
|
||||
|
||||
RUN cp tailscale /usr/local/bin/
|
||||
RUN cp tailscaled /usr/local/bin/
|
||||
|
||||
ADD integration_test/etc_embedded_derp/tls/server.crt /usr/local/share/ca-certificates/
|
||||
RUN chmod 644 /usr/local/share/ca-certificates/server.crt
|
||||
|
||||
RUN update-ca-certificates
|
||||
RUN git checkout main \
|
||||
&& sh build_dist.sh tailscale.com/cmd/tailscale \
|
||||
&& sh build_dist.sh tailscale.com/cmd/tailscaled \
|
||||
&& cp tailscale /usr/local/bin/ \
|
||||
&& cp tailscaled /usr/local/bin/
|
||||
|
45
Makefile
45
Makefile
@@ -10,8 +10,6 @@ ifeq ($(filter $(GOOS), openbsd netbsd soloaris plan9), )
|
||||
else
|
||||
endif
|
||||
|
||||
TAGS = -tags ts2019
|
||||
|
||||
# GO_SOURCES = $(wildcard *.go)
|
||||
# PROTO_SOURCES = $(wildcard **/*.proto)
|
||||
GO_SOURCES = $(call rwildcard,,*.go)
|
||||
@@ -24,31 +22,9 @@ build:
|
||||
dev: lint test build
|
||||
|
||||
test:
|
||||
@go test $(TAGS) -short -coverprofile=coverage.out ./...
|
||||
gotestsum -- -short -coverprofile=coverage.out ./...
|
||||
|
||||
test_integration: test_integration_cli test_integration_derp test_integration_v2_general
|
||||
|
||||
test_integration_cli:
|
||||
docker network rm $$(docker network ls --filter name=headscale --quiet) || true
|
||||
docker network create headscale-test || true
|
||||
docker run -t --rm \
|
||||
--network headscale-test \
|
||||
-v ~/.cache/hs-integration-go:/go \
|
||||
-v $$PWD:$$PWD -w $$PWD \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock golang:1 \
|
||||
go test $(TAGS) -failfast -timeout 30m -count=1 -run IntegrationCLI ./...
|
||||
|
||||
test_integration_derp:
|
||||
docker network rm $$(docker network ls --filter name=headscale --quiet) || true
|
||||
docker network create headscale-test || true
|
||||
docker run -t --rm \
|
||||
--network headscale-test \
|
||||
-v ~/.cache/hs-integration-go:/go \
|
||||
-v $$PWD:$$PWD -w $$PWD \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock golang:1 \
|
||||
go test $(TAGS) -failfast -timeout 30m -count=1 -run IntegrationDERP ./...
|
||||
|
||||
test_integration_v2_general:
|
||||
test_integration:
|
||||
docker run \
|
||||
-t --rm \
|
||||
-v ~/.cache/hs-integration-go:/go \
|
||||
@@ -56,13 +32,7 @@ test_integration_v2_general:
|
||||
-v $$PWD:$$PWD -w $$PWD/integration \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
golang:1 \
|
||||
go test $(TAGS) -failfast ./... -timeout 120m -parallel 8
|
||||
|
||||
coverprofile_func:
|
||||
go tool cover -func=coverage.out
|
||||
|
||||
coverprofile_html:
|
||||
go tool cover -html=coverage.out
|
||||
go run gotest.tools/gotestsum@latest -- -failfast ./... -timeout 120m -parallel 8
|
||||
|
||||
lint:
|
||||
golangci-lint run --fix --timeout 10m
|
||||
@@ -80,11 +50,4 @@ compress: build
|
||||
|
||||
generate:
|
||||
rm -rf gen
|
||||
go run github.com/bufbuild/buf/cmd/buf generate proto
|
||||
|
||||
install-protobuf-plugins:
|
||||
go install \
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway \
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2 \
|
||||
google.golang.org/protobuf/cmd/protoc-gen-go \
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc
|
||||
buf generate proto
|
||||
|
780
acls.go
780
acls.go
@@ -1,780 +0,0 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/netip"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/tailscale/hujson"
|
||||
"go4.org/netipx"
|
||||
"gopkg.in/yaml.v3"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
const (
|
||||
errEmptyPolicy = Error("empty policy")
|
||||
errInvalidAction = Error("invalid action")
|
||||
errInvalidGroup = Error("invalid group")
|
||||
errInvalidTag = Error("invalid tag")
|
||||
errInvalidPortFormat = Error("invalid port format")
|
||||
errWildcardIsNeeded = Error("wildcard as port is required for the protocol")
|
||||
)
|
||||
|
||||
const (
|
||||
Base8 = 8
|
||||
Base10 = 10
|
||||
BitSize16 = 16
|
||||
BitSize32 = 32
|
||||
BitSize64 = 64
|
||||
portRangeBegin = 0
|
||||
portRangeEnd = 65535
|
||||
expectedTokenItems = 2
|
||||
)
|
||||
|
||||
// For some reason golang.org/x/net/internal/iana is an internal package.
|
||||
const (
|
||||
protocolICMP = 1 // Internet Control Message
|
||||
protocolIGMP = 2 // Internet Group Management
|
||||
protocolIPv4 = 4 // IPv4 encapsulation
|
||||
protocolTCP = 6 // Transmission Control
|
||||
protocolEGP = 8 // Exterior Gateway Protocol
|
||||
protocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP)
|
||||
protocolUDP = 17 // User Datagram
|
||||
protocolGRE = 47 // Generic Routing Encapsulation
|
||||
protocolESP = 50 // Encap Security Payload
|
||||
protocolAH = 51 // Authentication Header
|
||||
protocolIPv6ICMP = 58 // ICMP for IPv6
|
||||
protocolSCTP = 132 // Stream Control Transmission Protocol
|
||||
ProtocolFC = 133 // Fibre Channel
|
||||
)
|
||||
|
||||
var featureEnableSSH = envknob.RegisterBool("HEADSCALE_EXPERIMENTAL_FEATURE_SSH")
|
||||
|
||||
// LoadACLPolicy loads the ACL policy from the specify path, and generates the ACL rules.
|
||||
func (h *Headscale) LoadACLPolicy(path string) error {
|
||||
log.Debug().
|
||||
Str("func", "LoadACLPolicy").
|
||||
Str("path", path).
|
||||
Msg("Loading ACL policy from path")
|
||||
|
||||
policyFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer policyFile.Close()
|
||||
|
||||
var policy ACLPolicy
|
||||
policyBytes, err := io.ReadAll(policyFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch filepath.Ext(path) {
|
||||
case ".yml", ".yaml":
|
||||
log.Debug().
|
||||
Str("path", path).
|
||||
Bytes("file", policyBytes).
|
||||
Msg("Loading ACLs from YAML")
|
||||
|
||||
err := yaml.Unmarshal(policyBytes, &policy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Interface("policy", policy).
|
||||
Msg("Loaded policy from YAML")
|
||||
|
||||
default:
|
||||
ast, err := hujson.Parse(policyBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ast.Standardize()
|
||||
policyBytes = ast.Pack()
|
||||
err = json.Unmarshal(policyBytes, &policy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if policy.IsZero() {
|
||||
return errEmptyPolicy
|
||||
}
|
||||
|
||||
h.aclPolicy = &policy
|
||||
|
||||
return h.UpdateACLRules()
|
||||
}
|
||||
|
||||
func (h *Headscale) UpdateACLRules() error {
|
||||
machines, err := h.ListMachines()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if h.aclPolicy == nil {
|
||||
return errEmptyPolicy
|
||||
}
|
||||
|
||||
rules, err := generateACLRules(machines, *h.aclPolicy, h.cfg.OIDC.StripEmaildomain)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Trace().Interface("ACL", rules).Msg("ACL rules generated")
|
||||
h.aclRules = rules
|
||||
|
||||
// Precompute a map of which sources can reach each destination, this is
|
||||
// to provide quicker lookup when we calculate the peerlist for the map
|
||||
// response to nodes.
|
||||
aclPeerCacheMap := generateACLPeerCacheMap(rules)
|
||||
h.aclPeerCacheMapRW.Lock()
|
||||
h.aclPeerCacheMap = aclPeerCacheMap
|
||||
h.aclPeerCacheMapRW.Unlock()
|
||||
|
||||
if featureEnableSSH() {
|
||||
sshRules, err := h.generateSSHRules()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Trace().Interface("SSH", sshRules).Msg("SSH rules generated")
|
||||
if h.sshPolicy == nil {
|
||||
h.sshPolicy = &tailcfg.SSHPolicy{}
|
||||
}
|
||||
h.sshPolicy.Rules = sshRules
|
||||
} else if h.aclPolicy != nil && len(h.aclPolicy.SSHs) > 0 {
|
||||
log.Info().Msg("SSH ACLs has been defined, but HEADSCALE_EXPERIMENTAL_FEATURE_SSH is not enabled, this is a unstable feature, check docs before activating")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// generateACLPeerCacheMap takes a list of Tailscale filter rules and generates a map
|
||||
// of which Sources ("*" and IPs) can access destinations. This is to speed up the
|
||||
// process of generating MapResponses when deciding which Peers to inform nodes about.
|
||||
func generateACLPeerCacheMap(rules []tailcfg.FilterRule) map[string]map[string]struct{} {
|
||||
aclCachePeerMap := make(map[string]map[string]struct{})
|
||||
for _, rule := range rules {
|
||||
for _, srcIP := range rule.SrcIPs {
|
||||
for _, ip := range expandACLPeerAddr(srcIP) {
|
||||
if data, ok := aclCachePeerMap[ip]; ok {
|
||||
for _, dstPort := range rule.DstPorts {
|
||||
for _, dstIP := range expandACLPeerAddr(dstPort.IP) {
|
||||
data[dstIP] = struct{}{}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dstPortsMap := make(map[string]struct{}, len(rule.DstPorts))
|
||||
for _, dstPort := range rule.DstPorts {
|
||||
for _, dstIP := range expandACLPeerAddr(dstPort.IP) {
|
||||
dstPortsMap[dstIP] = struct{}{}
|
||||
}
|
||||
}
|
||||
aclCachePeerMap[ip] = dstPortsMap
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Trace().Interface("ACL Cache Map", aclCachePeerMap).Msg("ACL Peer Cache Map generated")
|
||||
|
||||
return aclCachePeerMap
|
||||
}
|
||||
|
||||
// expandACLPeerAddr takes a "tailcfg.FilterRule" "IP" and expands it into
|
||||
// something our cache logic can look up, which is "*" or single IP addresses.
|
||||
// This is probably quite inefficient, but it is a result of
|
||||
// "make it work, then make it fast", and a lot of the ACL stuff does not
|
||||
// work, but people have tried to make it fast.
|
||||
func expandACLPeerAddr(srcIP string) []string {
|
||||
if ip, err := netip.ParseAddr(srcIP); err == nil {
|
||||
return []string{ip.String()}
|
||||
}
|
||||
|
||||
if cidr, err := netip.ParsePrefix(srcIP); err == nil {
|
||||
addrs := []string{}
|
||||
|
||||
ipRange := netipx.RangeOfPrefix(cidr)
|
||||
|
||||
from := ipRange.From()
|
||||
too := ipRange.To()
|
||||
|
||||
if from == too {
|
||||
return []string{from.String()}
|
||||
}
|
||||
|
||||
for from != too && from.Less(too) {
|
||||
addrs = append(addrs, from.String())
|
||||
from = from.Next()
|
||||
}
|
||||
addrs = append(addrs, too.String()) // Add the last IP address in the range
|
||||
|
||||
return addrs
|
||||
}
|
||||
|
||||
// probably "*" or other string based "IP"
|
||||
return []string{srcIP}
|
||||
}
|
||||
|
||||
func generateACLRules(
|
||||
machines []Machine,
|
||||
aclPolicy ACLPolicy,
|
||||
stripEmaildomain bool,
|
||||
) ([]tailcfg.FilterRule, error) {
|
||||
rules := []tailcfg.FilterRule{}
|
||||
|
||||
for index, acl := range aclPolicy.ACLs {
|
||||
if acl.Action != "accept" {
|
||||
return nil, errInvalidAction
|
||||
}
|
||||
|
||||
srcIPs := []string{}
|
||||
for innerIndex, src := range acl.Sources {
|
||||
srcs, err := generateACLPolicySrc(machines, aclPolicy, src, stripEmaildomain)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing ACL %d, Source %d", index, innerIndex)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
srcIPs = append(srcIPs, srcs...)
|
||||
}
|
||||
|
||||
protocols, needsWildcard, err := parseProtocol(acl.Protocol)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing ACL %d. protocol unknown %s", index, acl.Protocol)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
destPorts := []tailcfg.NetPortRange{}
|
||||
for innerIndex, dest := range acl.Destinations {
|
||||
dests, err := generateACLPolicyDest(
|
||||
machines,
|
||||
aclPolicy,
|
||||
dest,
|
||||
needsWildcard,
|
||||
stripEmaildomain,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing ACL %d, Destination %d", index, innerIndex)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
destPorts = append(destPorts, dests...)
|
||||
}
|
||||
|
||||
rules = append(rules, tailcfg.FilterRule{
|
||||
SrcIPs: srcIPs,
|
||||
DstPorts: destPorts,
|
||||
IPProto: protocols,
|
||||
})
|
||||
}
|
||||
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) generateSSHRules() ([]*tailcfg.SSHRule, error) {
|
||||
rules := []*tailcfg.SSHRule{}
|
||||
|
||||
if h.aclPolicy == nil {
|
||||
return nil, errEmptyPolicy
|
||||
}
|
||||
|
||||
machines, err := h.ListMachines()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
acceptAction := tailcfg.SSHAction{
|
||||
Message: "",
|
||||
Reject: false,
|
||||
Accept: true,
|
||||
SessionDuration: 0,
|
||||
AllowAgentForwarding: false,
|
||||
HoldAndDelegate: "",
|
||||
AllowLocalPortForwarding: true,
|
||||
}
|
||||
|
||||
rejectAction := tailcfg.SSHAction{
|
||||
Message: "",
|
||||
Reject: true,
|
||||
Accept: false,
|
||||
SessionDuration: 0,
|
||||
AllowAgentForwarding: false,
|
||||
HoldAndDelegate: "",
|
||||
AllowLocalPortForwarding: false,
|
||||
}
|
||||
|
||||
for index, sshACL := range h.aclPolicy.SSHs {
|
||||
action := rejectAction
|
||||
switch sshACL.Action {
|
||||
case "accept":
|
||||
action = acceptAction
|
||||
case "check":
|
||||
checkAction, err := sshCheckAction(sshACL.CheckPeriod)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing SSH %d, check action with unparsable duration '%s'", index, sshACL.CheckPeriod)
|
||||
} else {
|
||||
action = *checkAction
|
||||
}
|
||||
default:
|
||||
log.Error().
|
||||
Msgf("Error parsing SSH %d, unknown action '%s'", index, sshACL.Action)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
principals := make([]*tailcfg.SSHPrincipal, 0, len(sshACL.Sources))
|
||||
for innerIndex, rawSrc := range sshACL.Sources {
|
||||
expandedSrcs, err := expandAlias(
|
||||
machines,
|
||||
*h.aclPolicy,
|
||||
rawSrc,
|
||||
h.cfg.OIDC.StripEmaildomain,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Msgf("Error parsing SSH %d, Source %d", index, innerIndex)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
for _, expandedSrc := range expandedSrcs {
|
||||
principals = append(principals, &tailcfg.SSHPrincipal{
|
||||
NodeIP: expandedSrc,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
userMap := make(map[string]string, len(sshACL.Users))
|
||||
for _, user := range sshACL.Users {
|
||||
userMap[user] = "="
|
||||
}
|
||||
rules = append(rules, &tailcfg.SSHRule{
|
||||
RuleExpires: nil,
|
||||
Principals: principals,
|
||||
SSHUsers: userMap,
|
||||
Action: &action,
|
||||
})
|
||||
}
|
||||
|
||||
return rules, nil
|
||||
}
|
||||
|
||||
func sshCheckAction(duration string) (*tailcfg.SSHAction, error) {
|
||||
sessionLength, err := time.ParseDuration(duration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &tailcfg.SSHAction{
|
||||
Message: "",
|
||||
Reject: false,
|
||||
Accept: true,
|
||||
SessionDuration: sessionLength,
|
||||
AllowAgentForwarding: false,
|
||||
HoldAndDelegate: "",
|
||||
AllowLocalPortForwarding: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func generateACLPolicySrc(
|
||||
machines []Machine,
|
||||
aclPolicy ACLPolicy,
|
||||
src string,
|
||||
stripEmaildomain bool,
|
||||
) ([]string, error) {
|
||||
return expandAlias(machines, aclPolicy, src, stripEmaildomain)
|
||||
}
|
||||
|
||||
func generateACLPolicyDest(
|
||||
machines []Machine,
|
||||
aclPolicy ACLPolicy,
|
||||
dest string,
|
||||
needsWildcard bool,
|
||||
stripEmaildomain bool,
|
||||
) ([]tailcfg.NetPortRange, error) {
|
||||
tokens := strings.Split(dest, ":")
|
||||
if len(tokens) < expectedTokenItems || len(tokens) > 3 {
|
||||
return nil, errInvalidPortFormat
|
||||
}
|
||||
|
||||
var alias string
|
||||
// We can have here stuff like:
|
||||
// git-server:*
|
||||
// 192.168.1.0/24:22
|
||||
// tag:montreal-webserver:80,443
|
||||
// tag:api-server:443
|
||||
// example-host-1:*
|
||||
if len(tokens) == expectedTokenItems {
|
||||
alias = tokens[0]
|
||||
} else {
|
||||
alias = fmt.Sprintf("%s:%s", tokens[0], tokens[1])
|
||||
}
|
||||
|
||||
expanded, err := expandAlias(
|
||||
machines,
|
||||
aclPolicy,
|
||||
alias,
|
||||
stripEmaildomain,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ports, err := expandPorts(tokens[len(tokens)-1], needsWildcard)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dests := []tailcfg.NetPortRange{}
|
||||
for _, d := range expanded {
|
||||
for _, p := range *ports {
|
||||
pr := tailcfg.NetPortRange{
|
||||
IP: d,
|
||||
Ports: p,
|
||||
}
|
||||
dests = append(dests, pr)
|
||||
}
|
||||
}
|
||||
|
||||
return dests, nil
|
||||
}
|
||||
|
||||
// parseProtocol reads the proto field of the ACL and generates a list of
|
||||
// protocols that will be allowed, following the IANA IP protocol number
|
||||
// https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
|
||||
//
|
||||
// If the ACL proto field is empty, it allows ICMPv4, ICMPv6, TCP, and UDP,
|
||||
// as per Tailscale behaviour (see tailcfg.FilterRule).
|
||||
//
|
||||
// Also returns a boolean indicating if the protocol
|
||||
// requires all the destinations to use wildcard as port number (only TCP,
|
||||
// UDP and SCTP support specifying ports).
|
||||
func parseProtocol(protocol string) ([]int, bool, error) {
|
||||
switch protocol {
|
||||
case "":
|
||||
return nil, false, nil
|
||||
case "igmp":
|
||||
return []int{protocolIGMP}, true, nil
|
||||
case "ipv4", "ip-in-ip":
|
||||
return []int{protocolIPv4}, true, nil
|
||||
case "tcp":
|
||||
return []int{protocolTCP}, false, nil
|
||||
case "egp":
|
||||
return []int{protocolEGP}, true, nil
|
||||
case "igp":
|
||||
return []int{protocolIGP}, true, nil
|
||||
case "udp":
|
||||
return []int{protocolUDP}, false, nil
|
||||
case "gre":
|
||||
return []int{protocolGRE}, true, nil
|
||||
case "esp":
|
||||
return []int{protocolESP}, true, nil
|
||||
case "ah":
|
||||
return []int{protocolAH}, true, nil
|
||||
case "sctp":
|
||||
return []int{protocolSCTP}, false, nil
|
||||
case "icmp":
|
||||
return []int{protocolICMP, protocolIPv6ICMP}, true, nil
|
||||
|
||||
default:
|
||||
protocolNumber, err := strconv.Atoi(protocol)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
needsWildcard := protocolNumber != protocolTCP &&
|
||||
protocolNumber != protocolUDP &&
|
||||
protocolNumber != protocolSCTP
|
||||
|
||||
return []int{protocolNumber}, needsWildcard, nil
|
||||
}
|
||||
}
|
||||
|
||||
// expandalias has an input of either
|
||||
// - a user
|
||||
// - a group
|
||||
// - a tag
|
||||
// - a host
|
||||
// and transform these in IPAddresses.
|
||||
func expandAlias(
|
||||
machines []Machine,
|
||||
aclPolicy ACLPolicy,
|
||||
alias string,
|
||||
stripEmailDomain bool,
|
||||
) ([]string, error) {
|
||||
ips := []string{}
|
||||
if alias == "*" {
|
||||
return []string{"*"}, nil
|
||||
}
|
||||
|
||||
log.Debug().
|
||||
Str("alias", alias).
|
||||
Msg("Expanding")
|
||||
|
||||
if strings.HasPrefix(alias, "group:") {
|
||||
users, err := expandGroup(aclPolicy, alias, stripEmailDomain)
|
||||
if err != nil {
|
||||
return ips, err
|
||||
}
|
||||
for _, n := range users {
|
||||
nodes := filterMachinesByUser(machines, n)
|
||||
for _, node := range nodes {
|
||||
ips = append(ips, node.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
}
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
if strings.HasPrefix(alias, "tag:") {
|
||||
// check for forced tags
|
||||
for _, machine := range machines {
|
||||
if contains(machine.ForcedTags, alias) {
|
||||
ips = append(ips, machine.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
}
|
||||
|
||||
// find tag owners
|
||||
owners, err := expandTagOwners(aclPolicy, alias, stripEmailDomain)
|
||||
if err != nil {
|
||||
if errors.Is(err, errInvalidTag) {
|
||||
if len(ips) == 0 {
|
||||
return ips, fmt.Errorf(
|
||||
"%w. %v isn't owned by a TagOwner and no forced tags are defined",
|
||||
errInvalidTag,
|
||||
alias,
|
||||
)
|
||||
}
|
||||
|
||||
return ips, nil
|
||||
} else {
|
||||
return ips, err
|
||||
}
|
||||
}
|
||||
|
||||
// filter out machines per tag owner
|
||||
for _, user := range owners {
|
||||
machines := filterMachinesByUser(machines, user)
|
||||
for _, machine := range machines {
|
||||
hi := machine.GetHostInfo()
|
||||
if contains(hi.RequestTags, alias) {
|
||||
ips = append(ips, machine.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
// if alias is a user
|
||||
nodes := filterMachinesByUser(machines, alias)
|
||||
nodes = excludeCorrectlyTaggedNodes(aclPolicy, nodes, alias, stripEmailDomain)
|
||||
|
||||
for _, n := range nodes {
|
||||
ips = append(ips, n.IPAddresses.ToStringSlice()...)
|
||||
}
|
||||
if len(ips) > 0 {
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
// if alias is an host
|
||||
if h, ok := aclPolicy.Hosts[alias]; ok {
|
||||
return []string{h.String()}, nil
|
||||
}
|
||||
|
||||
// if alias is an IP
|
||||
ip, err := netip.ParseAddr(alias)
|
||||
if err == nil {
|
||||
return []string{ip.String()}, nil
|
||||
}
|
||||
|
||||
// if alias is an CIDR
|
||||
cidr, err := netip.ParsePrefix(alias)
|
||||
if err == nil {
|
||||
return []string{cidr.String()}, nil
|
||||
}
|
||||
|
||||
log.Warn().Msgf("No IPs found with the alias %v", alias)
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
// excludeCorrectlyTaggedNodes will remove from the list of input nodes the ones
|
||||
// that are correctly tagged since they should not be listed as being in the user
|
||||
// we assume in this function that we only have nodes from 1 user.
|
||||
func excludeCorrectlyTaggedNodes(
|
||||
aclPolicy ACLPolicy,
|
||||
nodes []Machine,
|
||||
user string,
|
||||
stripEmailDomain bool,
|
||||
) []Machine {
|
||||
out := []Machine{}
|
||||
tags := []string{}
|
||||
for tag := range aclPolicy.TagOwners {
|
||||
owners, _ := expandTagOwners(aclPolicy, user, stripEmailDomain)
|
||||
ns := append(owners, user)
|
||||
if contains(ns, user) {
|
||||
tags = append(tags, tag)
|
||||
}
|
||||
}
|
||||
// for each machine if tag is in tags list, don't append it.
|
||||
for _, machine := range nodes {
|
||||
hi := machine.GetHostInfo()
|
||||
|
||||
found := false
|
||||
for _, t := range hi.RequestTags {
|
||||
if contains(tags, t) {
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(machine.ForcedTags) > 0 {
|
||||
found = true
|
||||
}
|
||||
if !found {
|
||||
out = append(out, machine)
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func expandPorts(portsStr string, needsWildcard bool) (*[]tailcfg.PortRange, error) {
|
||||
if portsStr == "*" {
|
||||
return &[]tailcfg.PortRange{
|
||||
{First: portRangeBegin, Last: portRangeEnd},
|
||||
}, nil
|
||||
}
|
||||
|
||||
if needsWildcard {
|
||||
return nil, errWildcardIsNeeded
|
||||
}
|
||||
|
||||
ports := []tailcfg.PortRange{}
|
||||
for _, portStr := range strings.Split(portsStr, ",") {
|
||||
rang := strings.Split(portStr, "-")
|
||||
switch len(rang) {
|
||||
case 1:
|
||||
port, err := strconv.ParseUint(rang[0], Base10, BitSize16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ports = append(ports, tailcfg.PortRange{
|
||||
First: uint16(port),
|
||||
Last: uint16(port),
|
||||
})
|
||||
|
||||
case expectedTokenItems:
|
||||
start, err := strconv.ParseUint(rang[0], Base10, BitSize16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
last, err := strconv.ParseUint(rang[1], Base10, BitSize16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ports = append(ports, tailcfg.PortRange{
|
||||
First: uint16(start),
|
||||
Last: uint16(last),
|
||||
})
|
||||
|
||||
default:
|
||||
return nil, errInvalidPortFormat
|
||||
}
|
||||
}
|
||||
|
||||
return &ports, nil
|
||||
}
|
||||
|
||||
func filterMachinesByUser(machines []Machine, user string) []Machine {
|
||||
out := []Machine{}
|
||||
for _, machine := range machines {
|
||||
if machine.User.Name == user {
|
||||
out = append(out, machine)
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// expandTagOwners will return a list of user. An owner can be either a user or a group
|
||||
// a group cannot be composed of groups.
|
||||
func expandTagOwners(
|
||||
aclPolicy ACLPolicy,
|
||||
tag string,
|
||||
stripEmailDomain bool,
|
||||
) ([]string, error) {
|
||||
var owners []string
|
||||
ows, ok := aclPolicy.TagOwners[tag]
|
||||
if !ok {
|
||||
return []string{}, fmt.Errorf(
|
||||
"%w. %v isn't owned by a TagOwner. Please add one first. https://tailscale.com/kb/1018/acls/#tag-owners",
|
||||
errInvalidTag,
|
||||
tag,
|
||||
)
|
||||
}
|
||||
for _, owner := range ows {
|
||||
if strings.HasPrefix(owner, "group:") {
|
||||
gs, err := expandGroup(aclPolicy, owner, stripEmailDomain)
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
owners = append(owners, gs...)
|
||||
} else {
|
||||
owners = append(owners, owner)
|
||||
}
|
||||
}
|
||||
|
||||
return owners, nil
|
||||
}
|
||||
|
||||
// expandGroup will return the list of user inside the group
|
||||
// after some validation.
|
||||
func expandGroup(
|
||||
aclPolicy ACLPolicy,
|
||||
group string,
|
||||
stripEmailDomain bool,
|
||||
) ([]string, error) {
|
||||
outGroups := []string{}
|
||||
aclGroups, ok := aclPolicy.Groups[group]
|
||||
if !ok {
|
||||
return []string{}, fmt.Errorf(
|
||||
"group %v isn't registered. %w",
|
||||
group,
|
||||
errInvalidGroup,
|
||||
)
|
||||
}
|
||||
for _, group := range aclGroups {
|
||||
if strings.HasPrefix(group, "group:") {
|
||||
return []string{}, fmt.Errorf(
|
||||
"%w. A group cannot be composed of groups. https://tailscale.com/kb/1018/acls/#groups",
|
||||
errInvalidGroup,
|
||||
)
|
||||
}
|
||||
grp, err := NormalizeToFQDNRules(group, stripEmailDomain)
|
||||
if err != nil {
|
||||
return []string{}, fmt.Errorf(
|
||||
"failed to normalize group %q, err: %w",
|
||||
group,
|
||||
errInvalidGroup,
|
||||
)
|
||||
}
|
||||
outGroups = append(outGroups, grp)
|
||||
}
|
||||
|
||||
return outGroups, nil
|
||||
}
|
1693
acls_test.go
1693
acls_test.go
File diff suppressed because it is too large
Load Diff
113
api_common.go
113
api_common.go
@@ -1,113 +0,0 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
func (h *Headscale) generateMapResponse(
|
||||
mapRequest tailcfg.MapRequest,
|
||||
machine *Machine,
|
||||
) (*tailcfg.MapResponse, error) {
|
||||
log.Trace().
|
||||
Str("func", "generateMapResponse").
|
||||
Str("machine", mapRequest.Hostinfo.Hostname).
|
||||
Msg("Creating Map response")
|
||||
node, err := h.toNode(*machine, h.cfg.BaseDomain, h.cfg.DNSConfig)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "generateMapResponse").
|
||||
Err(err).
|
||||
Msg("Cannot convert to node")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
peers, err := h.getValidPeers(machine)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "generateMapResponse").
|
||||
Err(err).
|
||||
Msg("Cannot fetch peers")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
profiles := h.getMapResponseUserProfiles(*machine, peers)
|
||||
|
||||
nodePeers, err := h.toNodes(peers, h.cfg.BaseDomain, h.cfg.DNSConfig)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("func", "generateMapResponse").
|
||||
Err(err).
|
||||
Msg("Failed to convert peers to Tailscale nodes")
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dnsConfig := getMapResponseDNSConfig(
|
||||
h.cfg.DNSConfig,
|
||||
h.cfg.BaseDomain,
|
||||
*machine,
|
||||
peers,
|
||||
)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
resp := tailcfg.MapResponse{
|
||||
KeepAlive: false,
|
||||
Node: node,
|
||||
|
||||
// TODO: Only send if updated
|
||||
DERPMap: h.DERPMap,
|
||||
|
||||
// TODO: Only send if updated
|
||||
Peers: nodePeers,
|
||||
|
||||
// TODO(kradalby): Implement:
|
||||
// https://github.com/tailscale/tailscale/blob/main/tailcfg/tailcfg.go#L1351-L1374
|
||||
// PeersChanged
|
||||
// PeersRemoved
|
||||
// PeersChangedPatch
|
||||
// PeerSeenChange
|
||||
// OnlineChange
|
||||
|
||||
// TODO: Only send if updated
|
||||
DNSConfig: dnsConfig,
|
||||
|
||||
// TODO: Only send if updated
|
||||
Domain: h.cfg.BaseDomain,
|
||||
|
||||
// Do not instruct clients to collect services, we do not
|
||||
// support or do anything with them
|
||||
CollectServices: "false",
|
||||
|
||||
// TODO: Only send if updated
|
||||
PacketFilter: h.aclRules,
|
||||
|
||||
UserProfiles: profiles,
|
||||
|
||||
// TODO: Only send if updated
|
||||
SSHPolicy: h.sshPolicy,
|
||||
|
||||
ControlTime: &now,
|
||||
|
||||
Debug: &tailcfg.Debug{
|
||||
DisableLogTail: !h.cfg.LogTail.Enabled,
|
||||
RandomizeClientPort: h.cfg.RandomizeClientPort,
|
||||
},
|
||||
}
|
||||
|
||||
log.Trace().
|
||||
Str("func", "generateMapResponse").
|
||||
Str("machine", mapRequest.Hostinfo.Hostname).
|
||||
// Interface("payload", resp).
|
||||
Msgf("Generated map response: %s", tailMapResponseToString(resp))
|
||||
|
||||
return &resp, nil
|
||||
}
|
157
api_key.go
157
api_key.go
@@ -1,157 +0,0 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
const (
|
||||
apiPrefixLength = 7
|
||||
apiKeyLength = 32
|
||||
|
||||
ErrAPIKeyFailedToParse = Error("Failed to parse ApiKey")
|
||||
)
|
||||
|
||||
// APIKey describes the datamodel for API keys used to remotely authenticate with
|
||||
// headscale.
|
||||
type APIKey struct {
|
||||
ID uint64 `gorm:"primary_key"`
|
||||
Prefix string `gorm:"uniqueIndex"`
|
||||
Hash []byte
|
||||
|
||||
CreatedAt *time.Time
|
||||
Expiration *time.Time
|
||||
LastSeen *time.Time
|
||||
}
|
||||
|
||||
// CreateAPIKey creates a new ApiKey in a user, and returns it.
|
||||
func (h *Headscale) CreateAPIKey(
|
||||
expiration *time.Time,
|
||||
) (string, *APIKey, error) {
|
||||
prefix, err := GenerateRandomStringURLSafe(apiPrefixLength)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
toBeHashed, err := GenerateRandomStringURLSafe(apiKeyLength)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// Key to return to user, this will only be visible _once_
|
||||
keyStr := prefix + "." + toBeHashed
|
||||
|
||||
hash, err := bcrypt.GenerateFromPassword([]byte(toBeHashed), bcrypt.DefaultCost)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
key := APIKey{
|
||||
Prefix: prefix,
|
||||
Hash: hash,
|
||||
Expiration: expiration,
|
||||
}
|
||||
|
||||
if err := h.db.Save(&key).Error; err != nil {
|
||||
return "", nil, fmt.Errorf("failed to save API key to database: %w", err)
|
||||
}
|
||||
|
||||
return keyStr, &key, nil
|
||||
}
|
||||
|
||||
// ListAPIKeys returns the list of ApiKeys for a user.
|
||||
func (h *Headscale) ListAPIKeys() ([]APIKey, error) {
|
||||
keys := []APIKey{}
|
||||
if err := h.db.Find(&keys).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// GetAPIKey returns a ApiKey for a given key.
|
||||
func (h *Headscale) GetAPIKey(prefix string) (*APIKey, error) {
|
||||
key := APIKey{}
|
||||
if result := h.db.First(&key, "prefix = ?", prefix); result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
return &key, nil
|
||||
}
|
||||
|
||||
// GetAPIKeyByID returns a ApiKey for a given id.
|
||||
func (h *Headscale) GetAPIKeyByID(id uint64) (*APIKey, error) {
|
||||
key := APIKey{}
|
||||
if result := h.db.Find(&APIKey{ID: id}).First(&key); result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
return &key, nil
|
||||
}
|
||||
|
||||
// DestroyAPIKey destroys a ApiKey. Returns error if the ApiKey
|
||||
// does not exist.
|
||||
func (h *Headscale) DestroyAPIKey(key APIKey) error {
|
||||
if result := h.db.Unscoped().Delete(key); result.Error != nil {
|
||||
return result.Error
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ExpireAPIKey marks a ApiKey as expired.
|
||||
func (h *Headscale) ExpireAPIKey(key *APIKey) error {
|
||||
if err := h.db.Model(&key).Update("Expiration", time.Now()).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Headscale) ValidateAPIKey(keyStr string) (bool, error) {
|
||||
prefix, hash, found := strings.Cut(keyStr, ".")
|
||||
if !found {
|
||||
return false, ErrAPIKeyFailedToParse
|
||||
}
|
||||
|
||||
key, err := h.GetAPIKey(prefix)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to validate api key: %w", err)
|
||||
}
|
||||
|
||||
if key.Expiration.Before(time.Now()) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err := bcrypt.CompareHashAndPassword(key.Hash, []byte(hash)); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (key *APIKey) toProto() *v1.ApiKey {
|
||||
protoKey := v1.ApiKey{
|
||||
Id: key.ID,
|
||||
Prefix: key.Prefix,
|
||||
}
|
||||
|
||||
if key.Expiration != nil {
|
||||
protoKey.Expiration = timestamppb.New(*key.Expiration)
|
||||
}
|
||||
|
||||
if key.CreatedAt != nil {
|
||||
protoKey.CreatedAt = timestamppb.New(*key.CreatedAt)
|
||||
}
|
||||
|
||||
if key.LastSeen != nil {
|
||||
protoKey.LastSeen = timestamppb.New(*key.LastSeen)
|
||||
}
|
||||
|
||||
return &protoKey
|
||||
}
|
@@ -6,98 +6,10 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
var (
|
||||
githubWorkflowPath = "../../.github/workflows/"
|
||||
jobFileNameTemplate = `test-integration-v2-%s.yaml`
|
||||
jobTemplate = template.Must(
|
||||
template.New("jobTemplate").
|
||||
Parse(`# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
|
||||
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
|
||||
|
||||
name: Integration Test v2 - {{.Name}}
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: {{ "${{ github.workflow }}-$${{ github.head_ref || github.run_id }}" }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Get changed files
|
||||
id: changed-files
|
||||
uses: tj-actions/changed-files@v34
|
||||
with:
|
||||
files: |
|
||||
*.nix
|
||||
go.*
|
||||
**/*.go
|
||||
integration_test/
|
||||
config-example.yaml
|
||||
|
||||
- uses: cachix/install-nix-action@v18
|
||||
if: {{ "${{ env.ACT }}" }} || steps.changed-files.outputs.any_changed == 'true'
|
||||
|
||||
- name: Run general integration tests
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: |
|
||||
nix develop --command -- docker run \
|
||||
--tty --rm \
|
||||
--volume ~/.cache/hs-integration-go:/go \
|
||||
--name headscale-test-suite \
|
||||
--volume $PWD:$PWD -w $PWD/integration \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume $PWD/control_logs:/tmp/control \
|
||||
golang:1 \
|
||||
go test ./... \
|
||||
-tags ts2019 \
|
||||
-failfast \
|
||||
-timeout 120m \
|
||||
-parallel 1 \
|
||||
-run "^{{.Name}}$"
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
if: always() && steps.changed-files.outputs.any_changed == 'true'
|
||||
with:
|
||||
name: logs
|
||||
path: "control_logs/*.log"
|
||||
`),
|
||||
)
|
||||
)
|
||||
|
||||
const workflowFilePerm = 0o600
|
||||
|
||||
func removeTests() {
|
||||
glob := fmt.Sprintf(jobFileNameTemplate, "*")
|
||||
|
||||
files, err := filepath.Glob(filepath.Join(githubWorkflowPath, glob))
|
||||
if err != nil {
|
||||
log.Fatalf("failed to find test files")
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
err := os.Remove(file)
|
||||
if err != nil {
|
||||
log.Printf("failed to remove: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func findTests() []string {
|
||||
rgBin, err := exec.LookPath("rg")
|
||||
if err != nil {
|
||||
@@ -114,50 +26,44 @@ func findTests() []string {
|
||||
"--no-heading",
|
||||
}
|
||||
|
||||
log.Printf("executing: %s %s", rgBin, strings.Join(args, " "))
|
||||
|
||||
ripgrep := exec.Command(
|
||||
rgBin,
|
||||
args...,
|
||||
)
|
||||
|
||||
result, err := ripgrep.CombinedOutput()
|
||||
cmd := exec.Command(rgBin, args...)
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
log.Printf("out: %s", result)
|
||||
log.Fatalf("failed to run ripgrep: %s", err)
|
||||
log.Fatalf("failed to run command: %s", err)
|
||||
}
|
||||
|
||||
tests := strings.Split(string(result), "\n")
|
||||
tests = tests[:len(tests)-1]
|
||||
|
||||
tests := strings.Split(strings.TrimSpace(out.String()), "\n")
|
||||
return tests
|
||||
}
|
||||
|
||||
func main() {
|
||||
type testConfig struct {
|
||||
Name string
|
||||
func updateYAML(tests []string) {
|
||||
testsForYq := fmt.Sprintf("[%s]", strings.Join(tests, ", "))
|
||||
|
||||
yqCommand := fmt.Sprintf(
|
||||
"yq eval '.jobs.integration-test.strategy.matrix.test = %s' ../../.github/workflows/test-integration.yaml -i",
|
||||
testsForYq,
|
||||
)
|
||||
cmd := exec.Command("bash", "-c", yqCommand)
|
||||
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to run yq command: %s", err)
|
||||
}
|
||||
|
||||
fmt.Println("YAML file updated successfully")
|
||||
}
|
||||
|
||||
func main() {
|
||||
tests := findTests()
|
||||
|
||||
removeTests()
|
||||
|
||||
for _, test := range tests {
|
||||
log.Printf("generating workflow for %s", test)
|
||||
|
||||
var content bytes.Buffer
|
||||
|
||||
if err := jobTemplate.Execute(&content, testConfig{
|
||||
Name: test,
|
||||
}); err != nil {
|
||||
log.Fatalf("failed to render template: %s", err)
|
||||
}
|
||||
|
||||
testPath := path.Join(githubWorkflowPath, fmt.Sprintf(jobFileNameTemplate, test))
|
||||
|
||||
err := os.WriteFile(testPath, content.Bytes(), workflowFilePerm)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to write github job: %s", err)
|
||||
}
|
||||
quotedTests := make([]string, len(tests))
|
||||
for i, test := range tests {
|
||||
quotedTests[i] = fmt.Sprintf("\"%s\"", test)
|
||||
}
|
||||
|
||||
updateYAML(quotedTests)
|
||||
}
|
||||
|
@@ -5,8 +5,8 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/prometheus/common/model"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/rs/zerolog/log"
|
||||
@@ -29,11 +29,16 @@ func init() {
|
||||
apiKeysCmd.AddCommand(createAPIKeyCmd)
|
||||
|
||||
expireAPIKeyCmd.Flags().StringP("prefix", "p", "", "ApiKey prefix")
|
||||
err := expireAPIKeyCmd.MarkFlagRequired("prefix")
|
||||
if err != nil {
|
||||
if err := expireAPIKeyCmd.MarkFlagRequired("prefix"); err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
apiKeysCmd.AddCommand(expireAPIKeyCmd)
|
||||
|
||||
deleteAPIKeyCmd.Flags().StringP("prefix", "p", "", "ApiKey prefix")
|
||||
if err := deleteAPIKeyCmd.MarkFlagRequired("prefix"); err != nil {
|
||||
log.Fatal().Err(err).Msg("")
|
||||
}
|
||||
apiKeysCmd.AddCommand(deleteAPIKeyCmd)
|
||||
}
|
||||
|
||||
var apiKeysCmd = &cobra.Command{
|
||||
@@ -67,7 +72,7 @@ var listAPIKeys = &cobra.Command{
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.ApiKeys, "", output)
|
||||
SuccessOutput(response.GetApiKeys(), "", output)
|
||||
|
||||
return
|
||||
}
|
||||
@@ -75,15 +80,15 @@ var listAPIKeys = &cobra.Command{
|
||||
tableData := pterm.TableData{
|
||||
{"ID", "Prefix", "Expiration", "Created"},
|
||||
}
|
||||
for _, key := range response.ApiKeys {
|
||||
for _, key := range response.GetApiKeys() {
|
||||
expiration := "-"
|
||||
|
||||
if key.GetExpiration() != nil {
|
||||
expiration = ColourTime(key.Expiration.AsTime())
|
||||
expiration = ColourTime(key.GetExpiration().AsTime())
|
||||
}
|
||||
|
||||
tableData = append(tableData, []string{
|
||||
strconv.FormatUint(key.GetId(), headscale.Base10),
|
||||
strconv.FormatUint(key.GetId(), util.Base10),
|
||||
key.GetPrefix(),
|
||||
expiration,
|
||||
key.GetCreatedAt().AsTime().Format(HeadscaleDateTimeFormat),
|
||||
@@ -155,7 +160,7 @@ If you loose a key, create a new one and revoke (expire) the old one.`,
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.ApiKey, response.ApiKey, output)
|
||||
SuccessOutput(response.GetApiKey(), response.GetApiKey(), output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -199,3 +204,44 @@ var expireAPIKeyCmd = &cobra.Command{
|
||||
SuccessOutput(response, "Key expired", output)
|
||||
},
|
||||
}
|
||||
|
||||
var deleteAPIKeyCmd = &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "Delete an ApiKey",
|
||||
Aliases: []string{"remove", "del"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
prefix, err := cmd.Flags().GetString("prefix")
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error getting prefix from CLI flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.DeleteApiKeyRequest{
|
||||
Prefix: prefix,
|
||||
}
|
||||
|
||||
response, err := client.DeleteApiKey(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot delete Api Key: %s\n", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response, "Key deleted", output)
|
||||
},
|
||||
}
|
||||
|
@@ -3,11 +3,11 @@ package cli
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -57,7 +57,7 @@ var debugCmd = &cobra.Command{
|
||||
|
||||
var createNodeCmd = &cobra.Command{
|
||||
Use: "create-node",
|
||||
Short: "Create a node (machine) that can be registered with `nodes register <>` command",
|
||||
Short: "Create a node that can be registered with `nodes register <>` command",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
@@ -93,11 +93,13 @@ var createNodeCmd = &cobra.Command{
|
||||
|
||||
return
|
||||
}
|
||||
if !headscale.NodePublicKeyRegex.Match([]byte(machineKey)) {
|
||||
err = errPreAuthKeyMalformed
|
||||
|
||||
var mkey key.MachinePublic
|
||||
err = mkey.UnmarshalText([]byte(machineKey))
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error: %s", err),
|
||||
fmt.Sprintf("Failed to parse machine key from flag: %s", err),
|
||||
output,
|
||||
)
|
||||
|
||||
@@ -115,24 +117,24 @@ var createNodeCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
request := &v1.DebugCreateMachineRequest{
|
||||
request := &v1.DebugCreateNodeRequest{
|
||||
Key: machineKey,
|
||||
Name: name,
|
||||
User: user,
|
||||
Routes: routes,
|
||||
}
|
||||
|
||||
response, err := client.DebugCreateMachine(ctx, request)
|
||||
response, err := client.DebugCreateNode(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot create machine: %s", status.Convert(err).Message()),
|
||||
fmt.Sprintf("Cannot create node: %s", status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Machine created", output)
|
||||
SuccessOutput(response.GetNode(), "Node created", output)
|
||||
},
|
||||
}
|
||||
|
@@ -9,8 +9,8 @@ import (
|
||||
"time"
|
||||
|
||||
survey "github.com/AlecAivazis/survey/v2"
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -97,6 +97,8 @@ func init() {
|
||||
tagCmd.Flags().
|
||||
StringSliceP("tags", "t", []string{}, "List of tags to add to the node")
|
||||
nodeCmd.AddCommand(tagCmd)
|
||||
|
||||
nodeCmd.AddCommand(backfillNodeIPsCmd)
|
||||
}
|
||||
|
||||
var nodeCmd = &cobra.Command{
|
||||
@@ -107,7 +109,7 @@ var nodeCmd = &cobra.Command{
|
||||
|
||||
var registerNodeCmd = &cobra.Command{
|
||||
Use: "register",
|
||||
Short: "Registers a machine to your network",
|
||||
Short: "Registers a node to your network",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
user, err := cmd.Flags().GetString("user")
|
||||
@@ -132,17 +134,17 @@ var registerNodeCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
request := &v1.RegisterMachineRequest{
|
||||
request := &v1.RegisterNodeRequest{
|
||||
Key: machineKey,
|
||||
User: user,
|
||||
}
|
||||
|
||||
response, err := client.RegisterMachine(ctx, request)
|
||||
response, err := client.RegisterNode(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot register machine: %s\n",
|
||||
"Cannot register node: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
@@ -152,8 +154,8 @@ var registerNodeCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
SuccessOutput(
|
||||
response.Machine,
|
||||
fmt.Sprintf("Machine %s registered", response.Machine.GivenName), output)
|
||||
response.GetNode(),
|
||||
fmt.Sprintf("Node %s registered", response.GetNode().GetGivenName()), output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -180,11 +182,11 @@ var listNodesCmd = &cobra.Command{
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ListMachinesRequest{
|
||||
request := &v1.ListNodesRequest{
|
||||
User: user,
|
||||
}
|
||||
|
||||
response, err := client.ListMachines(ctx, request)
|
||||
response, err := client.ListNodes(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
@@ -196,12 +198,12 @@ var listNodesCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.Machines, "", output)
|
||||
SuccessOutput(response.GetNodes(), "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
tableData, err := nodesToPtables(user, showTags, response.Machines)
|
||||
tableData, err := nodesToPtables(user, showTags, response.GetNodes())
|
||||
if err != nil {
|
||||
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
|
||||
|
||||
@@ -223,7 +225,7 @@ var listNodesCmd = &cobra.Command{
|
||||
|
||||
var expireNodeCmd = &cobra.Command{
|
||||
Use: "expire",
|
||||
Short: "Expire (log out) a machine in your network",
|
||||
Short: "Expire (log out) a node in your network",
|
||||
Long: "Expiring a node will keep the node in the database and force it to reauthenticate.",
|
||||
Aliases: []string{"logout", "exp", "e"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
@@ -244,16 +246,16 @@ var expireNodeCmd = &cobra.Command{
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
request := &v1.ExpireMachineRequest{
|
||||
MachineId: identifier,
|
||||
request := &v1.ExpireNodeRequest{
|
||||
NodeId: identifier,
|
||||
}
|
||||
|
||||
response, err := client.ExpireMachine(ctx, request)
|
||||
response, err := client.ExpireNode(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot expire machine: %s\n",
|
||||
"Cannot expire node: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
@@ -262,13 +264,13 @@ var expireNodeCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Machine expired", output)
|
||||
SuccessOutput(response.GetNode(), "Node expired", output)
|
||||
},
|
||||
}
|
||||
|
||||
var renameNodeCmd = &cobra.Command{
|
||||
Use: "rename NEW_NAME",
|
||||
Short: "Renames a machine in your network",
|
||||
Short: "Renames a node in your network",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
@@ -291,17 +293,17 @@ var renameNodeCmd = &cobra.Command{
|
||||
if len(args) > 0 {
|
||||
newName = args[0]
|
||||
}
|
||||
request := &v1.RenameMachineRequest{
|
||||
MachineId: identifier,
|
||||
NewName: newName,
|
||||
request := &v1.RenameNodeRequest{
|
||||
NodeId: identifier,
|
||||
NewName: newName,
|
||||
}
|
||||
|
||||
response, err := client.RenameMachine(ctx, request)
|
||||
response, err := client.RenameNode(ctx, request)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Cannot rename machine: %s\n",
|
||||
"Cannot rename node: %s\n",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
@@ -310,7 +312,7 @@ var renameNodeCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.Machine, "Machine renamed", output)
|
||||
SuccessOutput(response.GetNode(), "Node renamed", output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -336,11 +338,11 @@ var deleteNodeCmd = &cobra.Command{
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
getRequest := &v1.GetMachineRequest{
|
||||
MachineId: identifier,
|
||||
getRequest := &v1.GetNodeRequest{
|
||||
NodeId: identifier,
|
||||
}
|
||||
|
||||
getResponse, err := client.GetMachine(ctx, getRequest)
|
||||
getResponse, err := client.GetNode(ctx, getRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
@@ -354,8 +356,8 @@ var deleteNodeCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
deleteRequest := &v1.DeleteMachineRequest{
|
||||
MachineId: identifier,
|
||||
deleteRequest := &v1.DeleteNodeRequest{
|
||||
NodeId: identifier,
|
||||
}
|
||||
|
||||
confirm := false
|
||||
@@ -364,7 +366,7 @@ var deleteNodeCmd = &cobra.Command{
|
||||
prompt := &survey.Confirm{
|
||||
Message: fmt.Sprintf(
|
||||
"Do you want to remove the node %s?",
|
||||
getResponse.GetMachine().Name,
|
||||
getResponse.GetNode().GetName(),
|
||||
),
|
||||
}
|
||||
err = survey.AskOne(prompt, &confirm)
|
||||
@@ -374,7 +376,7 @@ var deleteNodeCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if confirm || force {
|
||||
response, err := client.DeleteMachine(ctx, deleteRequest)
|
||||
response, err := client.DeleteNode(ctx, deleteRequest)
|
||||
if output != "" {
|
||||
SuccessOutput(response, "", output)
|
||||
|
||||
@@ -436,11 +438,11 @@ var moveNodeCmd = &cobra.Command{
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
getRequest := &v1.GetMachineRequest{
|
||||
MachineId: identifier,
|
||||
getRequest := &v1.GetNodeRequest{
|
||||
NodeId: identifier,
|
||||
}
|
||||
|
||||
_, err = client.GetMachine(ctx, getRequest)
|
||||
_, err = client.GetNode(ctx, getRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
@@ -454,12 +456,12 @@ var moveNodeCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
moveRequest := &v1.MoveMachineRequest{
|
||||
MachineId: identifier,
|
||||
User: user,
|
||||
moveRequest := &v1.MoveNodeRequest{
|
||||
NodeId: identifier,
|
||||
User: user,
|
||||
}
|
||||
|
||||
moveResponse, err := client.MoveMachine(ctx, moveRequest)
|
||||
moveResponse, err := client.MoveNode(ctx, moveRequest)
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
@@ -473,14 +475,65 @@ var moveNodeCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(moveResponse.Machine, "Node moved to another user", output)
|
||||
SuccessOutput(moveResponse.GetNode(), "Node moved to another user", output)
|
||||
},
|
||||
}
|
||||
|
||||
var backfillNodeIPsCmd = &cobra.Command{
|
||||
Use: "backfillips",
|
||||
Short: "Backfill IPs missing from nodes",
|
||||
Long: `
|
||||
Backfill IPs can be used to add/remove IPs from nodes
|
||||
based on the current configuration of Headscale.
|
||||
|
||||
If there are nodes that does not have IPv4 or IPv6
|
||||
even if prefixes for both are configured in the config,
|
||||
this command can be used to assign IPs of the sort to
|
||||
all nodes that are missing.
|
||||
|
||||
If you remove IPv4 or IPv6 prefixes from the config,
|
||||
it can be run to remove the IPs that should no longer
|
||||
be assigned to nodes.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var err error
|
||||
output, _ := cmd.Flags().GetString("output")
|
||||
|
||||
confirm := false
|
||||
prompt := &survey.Confirm{
|
||||
Message: "Are you sure that you want to assign/remove IPs to/from nodes?",
|
||||
}
|
||||
err = survey.AskOne(prompt, &confirm)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if confirm {
|
||||
ctx, client, conn, cancel := getHeadscaleCLIClient()
|
||||
defer cancel()
|
||||
defer conn.Close()
|
||||
|
||||
changes, err := client.BackfillNodeIPs(ctx, &v1.BackfillNodeIPsRequest{Confirmed: confirm})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf(
|
||||
"Error backfilling IPs: %s",
|
||||
status.Convert(err).Message(),
|
||||
),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(changes, "Node IPs backfilled successfully", output)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func nodesToPtables(
|
||||
currentUser string,
|
||||
showTags bool,
|
||||
machines []*v1.Machine,
|
||||
nodes []*v1.Node,
|
||||
) (pterm.TableData, error) {
|
||||
tableHeader := []string{
|
||||
"ID",
|
||||
@@ -493,7 +546,7 @@ func nodesToPtables(
|
||||
"Ephemeral",
|
||||
"Last seen",
|
||||
"Expiration",
|
||||
"Online",
|
||||
"Connected",
|
||||
"Expired",
|
||||
}
|
||||
if showTags {
|
||||
@@ -505,23 +558,23 @@ func nodesToPtables(
|
||||
}
|
||||
tableData := pterm.TableData{tableHeader}
|
||||
|
||||
for _, machine := range machines {
|
||||
for _, node := range nodes {
|
||||
var ephemeral bool
|
||||
if machine.PreAuthKey != nil && machine.PreAuthKey.Ephemeral {
|
||||
if node.GetPreAuthKey() != nil && node.GetPreAuthKey().GetEphemeral() {
|
||||
ephemeral = true
|
||||
}
|
||||
|
||||
var lastSeen time.Time
|
||||
var lastSeenTime string
|
||||
if machine.LastSeen != nil {
|
||||
lastSeen = machine.LastSeen.AsTime()
|
||||
if node.GetLastSeen() != nil {
|
||||
lastSeen = node.GetLastSeen().AsTime()
|
||||
lastSeenTime = lastSeen.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
|
||||
var expiry time.Time
|
||||
var expiryTime string
|
||||
if machine.Expiry != nil {
|
||||
expiry = machine.Expiry.AsTime()
|
||||
if node.GetExpiry() != nil {
|
||||
expiry = node.GetExpiry().AsTime()
|
||||
expiryTime = expiry.Format("2006-01-02 15:04:05")
|
||||
} else {
|
||||
expiryTime = "N/A"
|
||||
@@ -529,7 +582,7 @@ func nodesToPtables(
|
||||
|
||||
var machineKey key.MachinePublic
|
||||
err := machineKey.UnmarshalText(
|
||||
[]byte(headscale.MachinePublicKeyEnsurePrefix(machine.MachineKey)),
|
||||
[]byte(node.GetMachineKey()),
|
||||
)
|
||||
if err != nil {
|
||||
machineKey = key.MachinePublic{}
|
||||
@@ -537,14 +590,14 @@ func nodesToPtables(
|
||||
|
||||
var nodeKey key.NodePublic
|
||||
err = nodeKey.UnmarshalText(
|
||||
[]byte(headscale.NodePublicKeyEnsurePrefix(machine.NodeKey)),
|
||||
[]byte(node.GetNodeKey()),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var online string
|
||||
if machine.Online {
|
||||
if node.GetOnline() {
|
||||
online = pterm.LightGreen("online")
|
||||
} else {
|
||||
online = pterm.LightRed("offline")
|
||||
@@ -558,36 +611,36 @@ func nodesToPtables(
|
||||
}
|
||||
|
||||
var forcedTags string
|
||||
for _, tag := range machine.ForcedTags {
|
||||
for _, tag := range node.GetForcedTags() {
|
||||
forcedTags += "," + tag
|
||||
}
|
||||
forcedTags = strings.TrimLeft(forcedTags, ",")
|
||||
var invalidTags string
|
||||
for _, tag := range machine.InvalidTags {
|
||||
if !contains(machine.ForcedTags, tag) {
|
||||
for _, tag := range node.GetInvalidTags() {
|
||||
if !contains(node.GetForcedTags(), tag) {
|
||||
invalidTags += "," + pterm.LightRed(tag)
|
||||
}
|
||||
}
|
||||
invalidTags = strings.TrimLeft(invalidTags, ",")
|
||||
var validTags string
|
||||
for _, tag := range machine.ValidTags {
|
||||
if !contains(machine.ForcedTags, tag) {
|
||||
for _, tag := range node.GetValidTags() {
|
||||
if !contains(node.GetForcedTags(), tag) {
|
||||
validTags += "," + pterm.LightGreen(tag)
|
||||
}
|
||||
}
|
||||
validTags = strings.TrimLeft(validTags, ",")
|
||||
|
||||
var user string
|
||||
if currentUser == "" || (currentUser == machine.User.Name) {
|
||||
user = pterm.LightMagenta(machine.User.Name)
|
||||
if currentUser == "" || (currentUser == node.GetUser().GetName()) {
|
||||
user = pterm.LightMagenta(node.GetUser().GetName())
|
||||
} else {
|
||||
// Shared into this user
|
||||
user = pterm.LightYellow(machine.User.Name)
|
||||
user = pterm.LightYellow(node.GetUser().GetName())
|
||||
}
|
||||
|
||||
var IPV4Address string
|
||||
var IPV6Address string
|
||||
for _, addr := range machine.IpAddresses {
|
||||
for _, addr := range node.GetIpAddresses() {
|
||||
if netip.MustParseAddr(addr).Is4() {
|
||||
IPV4Address = addr
|
||||
} else {
|
||||
@@ -596,9 +649,9 @@ func nodesToPtables(
|
||||
}
|
||||
|
||||
nodeData := []string{
|
||||
strconv.FormatUint(machine.Id, headscale.Base10),
|
||||
machine.Name,
|
||||
machine.GetGivenName(),
|
||||
strconv.FormatUint(node.GetId(), util.Base10),
|
||||
node.GetName(),
|
||||
node.GetGivenName(),
|
||||
machineKey.ShortString(),
|
||||
nodeKey.ShortString(),
|
||||
user,
|
||||
@@ -646,17 +699,17 @@ var tagCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Error retrieving list of tags to add to machine, %v", err),
|
||||
fmt.Sprintf("Error retrieving list of tags to add to node, %v", err),
|
||||
output,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sending tags to machine
|
||||
// Sending tags to node
|
||||
request := &v1.SetTagsRequest{
|
||||
MachineId: identifier,
|
||||
Tags: tagsToSet,
|
||||
NodeId: identifier,
|
||||
Tags: tagsToSet,
|
||||
}
|
||||
resp, err := client.SetTags(ctx, request)
|
||||
if err != nil {
|
||||
@@ -671,8 +724,8 @@ var tagCmd = &cobra.Command{
|
||||
|
||||
if resp != nil {
|
||||
SuccessOutput(
|
||||
resp.GetMachine(),
|
||||
"Machine updated",
|
||||
resp.GetNode(),
|
||||
"Node updated",
|
||||
output,
|
||||
)
|
||||
}
|
||||
|
@@ -84,7 +84,7 @@ var listPreAuthKeys = &cobra.Command{
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.PreAuthKeys, "", output)
|
||||
SuccessOutput(response.GetPreAuthKeys(), "", output)
|
||||
|
||||
return
|
||||
}
|
||||
@@ -101,22 +101,15 @@ var listPreAuthKeys = &cobra.Command{
|
||||
"Tags",
|
||||
},
|
||||
}
|
||||
for _, key := range response.PreAuthKeys {
|
||||
for _, key := range response.GetPreAuthKeys() {
|
||||
expiration := "-"
|
||||
if key.GetExpiration() != nil {
|
||||
expiration = ColourTime(key.Expiration.AsTime())
|
||||
}
|
||||
|
||||
var reusable string
|
||||
if key.GetEphemeral() {
|
||||
reusable = "N/A"
|
||||
} else {
|
||||
reusable = fmt.Sprintf("%v", key.GetReusable())
|
||||
expiration = ColourTime(key.GetExpiration().AsTime())
|
||||
}
|
||||
|
||||
aclTags := ""
|
||||
|
||||
for _, tag := range key.AclTags {
|
||||
for _, tag := range key.GetAclTags() {
|
||||
aclTags += "," + tag
|
||||
}
|
||||
|
||||
@@ -125,7 +118,7 @@ var listPreAuthKeys = &cobra.Command{
|
||||
tableData = append(tableData, []string{
|
||||
key.GetId(),
|
||||
key.GetKey(),
|
||||
reusable,
|
||||
strconv.FormatBool(key.GetReusable()),
|
||||
strconv.FormatBool(key.GetEphemeral()),
|
||||
strconv.FormatBool(key.GetUsed()),
|
||||
expiration,
|
||||
@@ -214,7 +207,7 @@ var createPreAuthKeyCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.PreAuthKey, response.PreAuthKey.Key, output)
|
||||
SuccessOutput(response.GetPreAuthKey(), response.GetPreAuthKey().GetKey(), output)
|
||||
},
|
||||
}
|
||||
|
||||
|
@@ -5,7 +5,7 @@ import (
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -38,33 +38,33 @@ func initConfig() {
|
||||
cfgFile = os.Getenv("HEADSCALE_CONFIG")
|
||||
}
|
||||
if cfgFile != "" {
|
||||
err := headscale.LoadConfig(cfgFile, true)
|
||||
err := types.LoadConfig(cfgFile, true)
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msgf("Error loading config file %s", cfgFile)
|
||||
}
|
||||
} else {
|
||||
err := headscale.LoadConfig("", false)
|
||||
err := types.LoadConfig("", false)
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err).Msgf("Error loading config")
|
||||
}
|
||||
}
|
||||
|
||||
cfg, err := headscale.GetHeadscaleConfig()
|
||||
cfg, err := types.GetHeadscaleConfig()
|
||||
if err != nil {
|
||||
log.Fatal().Caller().Err(err)
|
||||
log.Fatal().Caller().Err(err).Msg("Failed to get headscale configuration")
|
||||
}
|
||||
|
||||
machineOutput := HasMachineOutputFlag()
|
||||
|
||||
zerolog.SetGlobalLevel(cfg.Log.Level)
|
||||
|
||||
// If the user has requested a "machine" readable format,
|
||||
// If the user has requested a "node" readable format,
|
||||
// then disable login so the output remains valid.
|
||||
if machineOutput {
|
||||
zerolog.SetGlobalLevel(zerolog.Disabled)
|
||||
}
|
||||
|
||||
if cfg.Log.Format == headscale.JSONLogFormat {
|
||||
if cfg.Log.Format == types.JSONLogFormat {
|
||||
log.Logger = log.Output(os.Stdout)
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func initConfig() {
|
||||
res, err := latest.Check(githubTag, Version)
|
||||
if err == nil && res.Outdated {
|
||||
//nolint
|
||||
fmt.Printf(
|
||||
log.Warn().Msgf(
|
||||
"An updated version of Headscale has been found (%s vs. your current %s). Check it out https://github.com/juanfont/headscale/releases\n",
|
||||
res.Current,
|
||||
Version,
|
||||
|
@@ -6,8 +6,8 @@ import (
|
||||
"net/netip"
|
||||
"strconv"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/spf13/cobra"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -87,20 +87,20 @@ var listRoutesCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.Routes, "", output)
|
||||
SuccessOutput(response.GetRoutes(), "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
routes = response.Routes
|
||||
routes = response.GetRoutes()
|
||||
} else {
|
||||
response, err := client.GetMachineRoutes(ctx, &v1.GetMachineRoutesRequest{
|
||||
MachineId: machineID,
|
||||
response, err := client.GetNodeRoutes(ctx, &v1.GetNodeRoutesRequest{
|
||||
NodeId: machineID,
|
||||
})
|
||||
if err != nil {
|
||||
ErrorOutput(
|
||||
err,
|
||||
fmt.Sprintf("Cannot get routes for machine %d: %s", machineID, status.Convert(err).Message()),
|
||||
fmt.Sprintf("Cannot get routes for node %d: %s", machineID, status.Convert(err).Message()),
|
||||
output,
|
||||
)
|
||||
|
||||
@@ -108,12 +108,12 @@ var listRoutesCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.Routes, "", output)
|
||||
SuccessOutput(response.GetRoutes(), "", output)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
routes = response.Routes
|
||||
routes = response.GetRoutes()
|
||||
}
|
||||
|
||||
tableData := routesToPtables(routes)
|
||||
@@ -267,29 +267,29 @@ var deleteRouteCmd = &cobra.Command{
|
||||
|
||||
// routesToPtables converts the list of routes to a nice table.
|
||||
func routesToPtables(routes []*v1.Route) pterm.TableData {
|
||||
tableData := pterm.TableData{{"ID", "Machine", "Prefix", "Advertised", "Enabled", "Primary"}}
|
||||
tableData := pterm.TableData{{"ID", "Node", "Prefix", "Advertised", "Enabled", "Primary"}}
|
||||
|
||||
for _, route := range routes {
|
||||
var isPrimaryStr string
|
||||
prefix, err := netip.ParsePrefix(route.Prefix)
|
||||
prefix, err := netip.ParsePrefix(route.GetPrefix())
|
||||
if err != nil {
|
||||
log.Printf("Error parsing prefix %s: %s", route.Prefix, err)
|
||||
log.Printf("Error parsing prefix %s: %s", route.GetPrefix(), err)
|
||||
|
||||
continue
|
||||
}
|
||||
if prefix == headscale.ExitRouteV4 || prefix == headscale.ExitRouteV6 {
|
||||
if prefix == types.ExitRouteV4 || prefix == types.ExitRouteV6 {
|
||||
isPrimaryStr = "-"
|
||||
} else {
|
||||
isPrimaryStr = strconv.FormatBool(route.IsPrimary)
|
||||
isPrimaryStr = strconv.FormatBool(route.GetIsPrimary())
|
||||
}
|
||||
|
||||
tableData = append(tableData,
|
||||
[]string{
|
||||
strconv.FormatUint(route.Id, Base10),
|
||||
route.Machine.GivenName,
|
||||
route.Prefix,
|
||||
strconv.FormatBool(route.Advertised),
|
||||
strconv.FormatBool(route.Enabled),
|
||||
strconv.FormatUint(route.GetId(), Base10),
|
||||
route.GetNode().GetGivenName(),
|
||||
route.GetPrefix(),
|
||||
strconv.FormatBool(route.GetAdvertised()),
|
||||
strconv.FormatBool(route.GetEnabled()),
|
||||
isPrimaryStr,
|
||||
})
|
||||
}
|
||||
|
@@ -1,10 +1,10 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
survey "github.com/AlecAivazis/survey/v2"
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/pterm/pterm"
|
||||
"github.com/rs/zerolog/log"
|
||||
@@ -20,9 +20,7 @@ func init() {
|
||||
userCmd.AddCommand(renameUserCmd)
|
||||
}
|
||||
|
||||
const (
|
||||
errMissingParameter = headscale.Error("missing parameters")
|
||||
)
|
||||
var errMissingParameter = errors.New("missing parameters")
|
||||
|
||||
var userCmd = &cobra.Command{
|
||||
Use: "users",
|
||||
@@ -69,7 +67,7 @@ var createUserCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.User, "User created", output)
|
||||
SuccessOutput(response.GetUser(), "User created", output)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -171,7 +169,7 @@ var listUsersCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if output != "" {
|
||||
SuccessOutput(response.Users, "", output)
|
||||
SuccessOutput(response.GetUsers(), "", output)
|
||||
|
||||
return
|
||||
}
|
||||
@@ -238,6 +236,6 @@ var renameUserCmd = &cobra.Command{
|
||||
return
|
||||
}
|
||||
|
||||
SuccessOutput(response.User, "User renamed", output)
|
||||
SuccessOutput(response.GetUser(), "User renamed", output)
|
||||
},
|
||||
}
|
||||
|
@@ -8,8 +8,11 @@ import (
|
||||
"os"
|
||||
"reflect"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/hscontrol"
|
||||
"github.com/juanfont/headscale/hscontrol/policy"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/rs/zerolog/log"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
@@ -22,8 +25,8 @@ const (
|
||||
SocketWritePermissions = 0o666
|
||||
)
|
||||
|
||||
func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
cfg, err := headscale.GetHeadscaleConfig()
|
||||
func getHeadscaleApp() (*hscontrol.Headscale, error) {
|
||||
cfg, err := types.GetHeadscaleConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to load configuration while creating headscale instance: %w",
|
||||
@@ -31,7 +34,7 @@ func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
)
|
||||
}
|
||||
|
||||
app, err := headscale.NewHeadscale(cfg)
|
||||
app, err := hscontrol.NewHeadscale(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -39,21 +42,23 @@ func getHeadscaleApp() (*headscale.Headscale, error) {
|
||||
// We are doing this here, as in the future could be cool to have it also hot-reload
|
||||
|
||||
if cfg.ACL.PolicyPath != "" {
|
||||
aclPath := headscale.AbsolutePathFromConfigPath(cfg.ACL.PolicyPath)
|
||||
err = app.LoadACLPolicy(aclPath)
|
||||
aclPath := util.AbsolutePathFromConfigPath(cfg.ACL.PolicyPath)
|
||||
pol, err := policy.LoadACLPolicyFromPath(aclPath)
|
||||
if err != nil {
|
||||
log.Fatal().
|
||||
Str("path", aclPath).
|
||||
Err(err).
|
||||
Msg("Could not load the ACL policy")
|
||||
}
|
||||
|
||||
app.ACLPolicy = pol
|
||||
}
|
||||
|
||||
return app, nil
|
||||
}
|
||||
|
||||
func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc.ClientConn, context.CancelFunc) {
|
||||
cfg, err := headscale.GetHeadscaleConfig()
|
||||
cfg, err := types.GetHeadscaleConfig()
|
||||
if err != nil {
|
||||
log.Fatal().
|
||||
Err(err).
|
||||
@@ -74,7 +79,7 @@ func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc.
|
||||
|
||||
address := cfg.CLI.Address
|
||||
|
||||
// If the address is not set, we assume that we are on the server hosting headscale.
|
||||
// If the address is not set, we assume that we are on the server hosting hscontrol.
|
||||
if address == "" {
|
||||
log.Debug().
|
||||
Str("socket", cfg.UnixSocket).
|
||||
@@ -98,7 +103,7 @@ func getHeadscaleCLIClient() (context.Context, v1.HeadscaleServiceClient, *grpc.
|
||||
grpcOptions = append(
|
||||
grpcOptions,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithContextDialer(headscale.GrpcSocketDialer),
|
||||
grpc.WithContextDialer(util.GrpcSocketDialer),
|
||||
)
|
||||
} else {
|
||||
// If we are not connecting to a local server, require an API key for authentication
|
||||
@@ -149,17 +154,17 @@ func SuccessOutput(result interface{}, override string, outputFormat string) {
|
||||
case "json":
|
||||
jsonBytes, err = json.MarshalIndent(result, "", "\t")
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
log.Fatal().Err(err).Msg("failed to unmarshal output")
|
||||
}
|
||||
case "json-line":
|
||||
jsonBytes, err = json.Marshal(result)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
log.Fatal().Err(err).Msg("failed to unmarshal output")
|
||||
}
|
||||
case "yaml":
|
||||
jsonBytes, err = yaml.Marshal(result)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
log.Fatal().Err(err).Msg("failed to unmarshal output")
|
||||
}
|
||||
default:
|
||||
//nolint
|
||||
|
@@ -4,7 +4,7 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/efekarakus/termcolor"
|
||||
"github.com/jagottsicher/termcolor"
|
||||
"github.com/juanfont/headscale/cmd/headscale/cli"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
@@ -34,7 +34,7 @@ func main() {
|
||||
|
||||
zerolog.TimeFieldFormat = zerolog.TimeFormatUnix
|
||||
log.Logger = log.Output(zerolog.ConsoleWriter{
|
||||
Out: os.Stdout,
|
||||
Out: os.Stderr,
|
||||
TimeFormat: time.RFC3339,
|
||||
NoColor: !colors,
|
||||
})
|
||||
|
@@ -7,7 +7,8 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
"github.com/juanfont/headscale/hscontrol/types"
|
||||
"github.com/juanfont/headscale/hscontrol/util"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
@@ -50,21 +51,21 @@ func (*Suite) TestConfigFileLoading(c *check.C) {
|
||||
}
|
||||
|
||||
// Load example config, it should load without validation errors
|
||||
err = headscale.LoadConfig(cfgFile, true)
|
||||
err = types.LoadConfig(cfgFile, true)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Test that config file was interpreted correctly
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
|
||||
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite3")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "/var/lib/headscale/db.sqlite")
|
||||
c.Assert(viper.GetString("database.type"), check.Equals, "sqlite")
|
||||
c.Assert(viper.GetString("database.sqlite.path"), check.Equals, "/var/lib/headscale/db.sqlite")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
c.Assert(viper.GetStringSlice("dns_config.nameservers")[0], check.Equals, "1.1.1.1")
|
||||
c.Assert(
|
||||
headscale.GetFileMode("unix_socket_permission"),
|
||||
util.GetFileMode("unix_socket_permission"),
|
||||
check.Equals,
|
||||
fs.FileMode(0o770),
|
||||
)
|
||||
@@ -93,21 +94,21 @@ func (*Suite) TestConfigLoading(c *check.C) {
|
||||
}
|
||||
|
||||
// Load example config, it should load without validation errors
|
||||
err = headscale.LoadConfig(tmpDir, false)
|
||||
err = types.LoadConfig(tmpDir, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Test that config file was interpreted correctly
|
||||
c.Assert(viper.GetString("server_url"), check.Equals, "http://127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("listen_addr"), check.Equals, "127.0.0.1:8080")
|
||||
c.Assert(viper.GetString("metrics_listen_addr"), check.Equals, "127.0.0.1:9090")
|
||||
c.Assert(viper.GetString("db_type"), check.Equals, "sqlite3")
|
||||
c.Assert(viper.GetString("db_path"), check.Equals, "/var/lib/headscale/db.sqlite")
|
||||
c.Assert(viper.GetString("database.type"), check.Equals, "sqlite")
|
||||
c.Assert(viper.GetString("database.sqlite.path"), check.Equals, "/var/lib/headscale/db.sqlite")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_hostname"), check.Equals, "")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_listen"), check.Equals, ":http")
|
||||
c.Assert(viper.GetString("tls_letsencrypt_challenge_type"), check.Equals, "HTTP-01")
|
||||
c.Assert(viper.GetStringSlice("dns_config.nameservers")[0], check.Equals, "1.1.1.1")
|
||||
c.Assert(
|
||||
headscale.GetFileMode("unix_socket_permission"),
|
||||
util.GetFileMode("unix_socket_permission"),
|
||||
check.Equals,
|
||||
fs.FileMode(0o770),
|
||||
)
|
||||
@@ -137,10 +138,10 @@ func (*Suite) TestDNSConfigLoading(c *check.C) {
|
||||
}
|
||||
|
||||
// Load example config, it should load without validation errors
|
||||
err = headscale.LoadConfig(tmpDir, false)
|
||||
err = types.LoadConfig(tmpDir, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
dnsConfig, baseDomain := headscale.GetDNSConfig()
|
||||
dnsConfig, baseDomain := types.GetDNSConfig()
|
||||
|
||||
c.Assert(dnsConfig.Nameservers[0].String(), check.Equals, "1.1.1.1")
|
||||
c.Assert(dnsConfig.Resolvers[0].Addr, check.Equals, "1.1.1.1")
|
||||
@@ -172,7 +173,7 @@ noise:
|
||||
writeConfig(c, tmpDir, configYaml)
|
||||
|
||||
// Check configuration validation errors (1)
|
||||
err = headscale.LoadConfig(tmpDir, false)
|
||||
err = types.LoadConfig(tmpDir, false)
|
||||
c.Assert(err, check.NotNil)
|
||||
// check.Matches can not handle multiline strings
|
||||
tmp := strings.ReplaceAll(err.Error(), "\n", "***")
|
||||
@@ -201,6 +202,6 @@ tls_letsencrypt_hostname: example.com
|
||||
tls_letsencrypt_challenge_type: TLS-ALPN-01
|
||||
`)
|
||||
writeConfig(c, tmpDir, configYaml)
|
||||
err = headscale.LoadConfig(tmpDir, false)
|
||||
err = types.LoadConfig(tmpDir, false)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
@@ -40,32 +40,31 @@ grpc_listen_addr: 127.0.0.1:50443
|
||||
# are doing.
|
||||
grpc_allow_insecure: false
|
||||
|
||||
# Private key used to encrypt the traffic between headscale
|
||||
# and Tailscale clients.
|
||||
# The private key file will be autogenerated if it's missing.
|
||||
#
|
||||
private_key_path: /var/lib/headscale/private.key
|
||||
|
||||
# The Noise section includes specific configuration for the
|
||||
# TS2021 Noise protocol
|
||||
noise:
|
||||
# The Noise private key is used to encrypt the
|
||||
# traffic between headscale and Tailscale clients when
|
||||
# using the new Noise-based protocol. It must be different
|
||||
# from the legacy private key.
|
||||
# using the new Noise-based protocol.
|
||||
private_key_path: /var/lib/headscale/noise_private.key
|
||||
|
||||
# List of IP prefixes to allocate tailaddresses from.
|
||||
# Each prefix consists of either an IPv4 or IPv6 address,
|
||||
# and the associated prefix length, delimited by a slash.
|
||||
# While this looks like it can take arbitrary values, it
|
||||
# needs to be within IP ranges supported by the Tailscale
|
||||
# client.
|
||||
# It must be within IP ranges supported by the Tailscale
|
||||
# client - i.e., subnets of 100.64.0.0/10 and fd7a:115c:a1e0::/48.
|
||||
# See below:
|
||||
# IPv6: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#LL81C52-L81C71
|
||||
# IPv4: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#L33
|
||||
ip_prefixes:
|
||||
- fd7a:115c:a1e0::/48
|
||||
- 100.64.0.0/10
|
||||
# Any other range is NOT supported, and it will cause unexpected issues.
|
||||
prefixes:
|
||||
v6: fd7a:115c:a1e0::/48
|
||||
v4: 100.64.0.0/10
|
||||
|
||||
# Strategy used for allocation of IPs to nodes, available options:
|
||||
# - sequential (default): assigns the next free IP from the previous given IP.
|
||||
# - random: assigns the next free IP from a pseudo-random IP generator (crypto/rand).
|
||||
allocation: sequential
|
||||
|
||||
# DERP is a relay system that Tailscale uses when a direct
|
||||
# connection cannot be established.
|
||||
@@ -94,6 +93,22 @@ derp:
|
||||
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
|
||||
stun_listen_addr: "0.0.0.0:3478"
|
||||
|
||||
# Private key used to encrypt the traffic between headscale DERP
|
||||
# and Tailscale clients.
|
||||
# The private key file will be autogenerated if it's missing.
|
||||
#
|
||||
private_key_path: /var/lib/headscale/derp_server_private.key
|
||||
|
||||
# This flag can be used, so the DERP map entry for the embedded DERP server is not written automatically,
|
||||
# it enables the creation of your very own DERP map entry using a locally available file with the parameter DERP.paths
|
||||
# If you enable the DERP server and set this to false, it is required to add the DERP server to the DERP map using DERP.paths
|
||||
automatically_add_embedded_derp_region: true
|
||||
|
||||
# For better connection stability (especially when using an Exit-Node and DNS is not working),
|
||||
# it is possible to optionall add the public IPv4 and IPv6 address to the Derp-Map using:
|
||||
ipv4: 1.2.3.4
|
||||
ipv6: 2001:db8::1
|
||||
|
||||
# List of externally available DERP maps encoded in JSON
|
||||
urls:
|
||||
- https://controlplane.tailscale.com/derpmap/default
|
||||
@@ -128,24 +143,28 @@ ephemeral_node_inactivity_timeout: 30m
|
||||
# In case of doubts, do not touch the default 10s.
|
||||
node_update_check_interval: 10s
|
||||
|
||||
# SQLite config
|
||||
db_type: sqlite3
|
||||
database:
|
||||
type: sqlite
|
||||
|
||||
# For production:
|
||||
db_path: /var/lib/headscale/db.sqlite
|
||||
# SQLite config
|
||||
sqlite:
|
||||
path: /var/lib/headscale/db.sqlite
|
||||
|
||||
# # Postgres config
|
||||
# If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank.
|
||||
# db_type: postgres
|
||||
# db_host: localhost
|
||||
# db_port: 5432
|
||||
# db_name: headscale
|
||||
# db_user: foo
|
||||
# db_pass: bar
|
||||
# # Postgres config
|
||||
# postgres:
|
||||
# # If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank.
|
||||
# host: localhost
|
||||
# port: 5432
|
||||
# name: headscale
|
||||
# user: foo
|
||||
# pass: bar
|
||||
# max_open_conns: 10
|
||||
# max_idle_conns: 10
|
||||
# conn_max_idle_time_secs: 3600
|
||||
|
||||
# If other 'sslmode' is required instead of 'require(true)' and 'disabled(false)', set the 'sslmode' you need
|
||||
# in the 'db_ssl' field. Refers to https://www.postgresql.org/docs/current/libpq-ssl.html Table 34.1.
|
||||
# db_ssl: false
|
||||
# # If other 'sslmode' is required instead of 'require(true)' and 'disabled(false)', set the 'sslmode' you need
|
||||
# # in the 'ssl' field. Refers to https://www.postgresql.org/docs/current/libpq-ssl.html Table 34.1.
|
||||
# ssl: false
|
||||
|
||||
### TLS configuration
|
||||
#
|
||||
|
404
db.go
404
db.go
@@ -1,404 +0,0 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"time"
|
||||
|
||||
"github.com/glebarez/sqlite"
|
||||
"github.com/rs/zerolog/log"
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
const (
|
||||
dbVersion = "1"
|
||||
|
||||
errValueNotFound = Error("not found")
|
||||
ErrCannotParsePrefix = Error("cannot parse prefix")
|
||||
)
|
||||
|
||||
// KV is a key-value store in a psql table. For future use...
|
||||
type KV struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
func (h *Headscale) initDB() error {
|
||||
db, err := h.openDB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.db = db
|
||||
|
||||
if h.dbType == Postgres {
|
||||
db.Exec(`create extension if not exists "uuid-ossp";`)
|
||||
}
|
||||
|
||||
_ = db.Migrator().RenameTable("namespaces", "users")
|
||||
|
||||
err = db.AutoMigrate(&User{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_ = db.Migrator().RenameColumn(&Machine{}, "namespace_id", "user_id")
|
||||
_ = db.Migrator().RenameColumn(&PreAuthKey{}, "namespace_id", "user_id")
|
||||
|
||||
_ = db.Migrator().RenameColumn(&Machine{}, "ip_address", "ip_addresses")
|
||||
_ = db.Migrator().RenameColumn(&Machine{}, "name", "hostname")
|
||||
|
||||
// GivenName is used as the primary source of DNS names, make sure
|
||||
// the field is populated and normalized if it was not when the
|
||||
// machine was registered.
|
||||
_ = db.Migrator().RenameColumn(&Machine{}, "nickname", "given_name")
|
||||
|
||||
// If the Machine table has a column for registered,
|
||||
// find all occourences of "false" and drop them. Then
|
||||
// remove the column.
|
||||
if db.Migrator().HasColumn(&Machine{}, "registered") {
|
||||
log.Info().
|
||||
Msg(`Database has legacy "registered" column in machine, removing...`)
|
||||
|
||||
machines := Machines{}
|
||||
if err := h.db.Not("registered").Find(&machines).Error; err != nil {
|
||||
log.Error().Err(err).Msg("Error accessing db")
|
||||
}
|
||||
|
||||
for _, machine := range machines {
|
||||
log.Info().
|
||||
Str("machine", machine.Hostname).
|
||||
Str("machine_key", machine.MachineKey).
|
||||
Msg("Deleting unregistered machine")
|
||||
if err := h.db.Delete(&Machine{}, machine.ID).Error; err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("machine", machine.Hostname).
|
||||
Str("machine_key", machine.MachineKey).
|
||||
Msg("Error deleting unregistered machine")
|
||||
}
|
||||
}
|
||||
|
||||
err := db.Migrator().DropColumn(&Machine{}, "registered")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error dropping registered column")
|
||||
}
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&Route{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if db.Migrator().HasColumn(&Machine{}, "enabled_routes") {
|
||||
log.Info().Msgf("Database has legacy enabled_routes column in machine, migrating...")
|
||||
|
||||
type MachineAux struct {
|
||||
ID uint64
|
||||
EnabledRoutes IPPrefixes
|
||||
}
|
||||
|
||||
machinesAux := []MachineAux{}
|
||||
err := db.Table("machines").Select("id, enabled_routes").Scan(&machinesAux).Error
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("Error accessing db")
|
||||
}
|
||||
for _, machine := range machinesAux {
|
||||
for _, prefix := range machine.EnabledRoutes {
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("enabled_route", prefix.String()).
|
||||
Msg("Error parsing enabled_route")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
err = db.Preload("Machine").
|
||||
Where("machine_id = ? AND prefix = ?", machine.ID, IPPrefix(prefix)).
|
||||
First(&Route{}).
|
||||
Error
|
||||
if err == nil {
|
||||
log.Info().
|
||||
Str("enabled_route", prefix.String()).
|
||||
Msg("Route already migrated to new table, skipping")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
route := Route{
|
||||
MachineID: machine.ID,
|
||||
Advertised: true,
|
||||
Enabled: true,
|
||||
Prefix: IPPrefix(prefix),
|
||||
}
|
||||
if err := h.db.Create(&route).Error; err != nil {
|
||||
log.Error().Err(err).Msg("Error creating route")
|
||||
} else {
|
||||
log.Info().
|
||||
Uint64("machine_id", route.MachineID).
|
||||
Str("prefix", prefix.String()).
|
||||
Msg("Route migrated")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = db.Migrator().DropColumn(&Machine{}, "enabled_routes")
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error dropping enabled_routes column")
|
||||
}
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&Machine{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if db.Migrator().HasColumn(&Machine{}, "given_name") {
|
||||
machines := Machines{}
|
||||
if err := h.db.Find(&machines).Error; err != nil {
|
||||
log.Error().Err(err).Msg("Error accessing db")
|
||||
}
|
||||
|
||||
for item, machine := range machines {
|
||||
if machine.GivenName == "" {
|
||||
normalizedHostname, err := NormalizeToFQDNRules(
|
||||
machine.Hostname,
|
||||
h.cfg.OIDC.StripEmaildomain,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("hostname", machine.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed to normalize machine hostname in DB migration")
|
||||
}
|
||||
|
||||
err = h.RenameMachine(&machines[item], normalizedHostname)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Caller().
|
||||
Str("hostname", machine.Hostname).
|
||||
Err(err).
|
||||
Msg("Failed to save normalized machine name in DB migration")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&KV{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&PreAuthKey{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = db.AutoMigrate(&PreAuthKeyACLTag{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_ = db.Migrator().DropTable("shared_machines")
|
||||
|
||||
err = db.AutoMigrate(&APIKey{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = h.setValue("db_version", dbVersion)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (h *Headscale) openDB() (*gorm.DB, error) {
|
||||
var db *gorm.DB
|
||||
var err error
|
||||
|
||||
var log logger.Interface
|
||||
if h.dbDebug {
|
||||
log = logger.Default
|
||||
} else {
|
||||
log = logger.Default.LogMode(logger.Silent)
|
||||
}
|
||||
|
||||
switch h.dbType {
|
||||
case Sqlite:
|
||||
db, err = gorm.Open(
|
||||
sqlite.Open(h.dbString+"?_synchronous=1&_journal_mode=WAL"),
|
||||
&gorm.Config{
|
||||
DisableForeignKeyConstraintWhenMigrating: true,
|
||||
Logger: log,
|
||||
},
|
||||
)
|
||||
|
||||
db.Exec("PRAGMA foreign_keys=ON")
|
||||
|
||||
// The pure Go SQLite library does not handle locking in
|
||||
// the same way as the C based one and we cant use the gorm
|
||||
// connection pool as of 2022/02/23.
|
||||
sqlDB, _ := db.DB()
|
||||
sqlDB.SetMaxIdleConns(1)
|
||||
sqlDB.SetMaxOpenConns(1)
|
||||
sqlDB.SetConnMaxIdleTime(time.Hour)
|
||||
|
||||
case Postgres:
|
||||
db, err = gorm.Open(postgres.Open(h.dbString), &gorm.Config{
|
||||
DisableForeignKeyConstraintWhenMigrating: true,
|
||||
Logger: log,
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// getValue returns the value for the given key in KV.
|
||||
func (h *Headscale) getValue(key string) (string, error) {
|
||||
var row KV
|
||||
if result := h.db.First(&row, "key = ?", key); errors.Is(
|
||||
result.Error,
|
||||
gorm.ErrRecordNotFound,
|
||||
) {
|
||||
return "", errValueNotFound
|
||||
}
|
||||
|
||||
return row.Value, nil
|
||||
}
|
||||
|
||||
// setValue sets value for the given key in KV.
|
||||
func (h *Headscale) setValue(key string, value string) error {
|
||||
keyValue := KV{
|
||||
Key: key,
|
||||
Value: value,
|
||||
}
|
||||
|
||||
if _, err := h.getValue(key); err == nil {
|
||||
h.db.Model(&keyValue).Where("key = ?", key).Update("value", value)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := h.db.Create(keyValue).Error; err != nil {
|
||||
return fmt.Errorf("failed to create key value pair in the database: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Headscale) pingDB(ctx context.Context) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, time.Second)
|
||||
defer cancel()
|
||||
db, err := h.db.DB()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return db.PingContext(ctx)
|
||||
}
|
||||
|
||||
// This is a "wrapper" type around tailscales
|
||||
// Hostinfo to allow us to add database "serialization"
|
||||
// methods. This allows us to use a typed values throughout
|
||||
// the code and not have to marshal/unmarshal and error
|
||||
// check all over the code.
|
||||
type HostInfo tailcfg.Hostinfo
|
||||
|
||||
func (hi *HostInfo) Scan(destination interface{}) error {
|
||||
switch value := destination.(type) {
|
||||
case []byte:
|
||||
return json.Unmarshal(value, hi)
|
||||
|
||||
case string:
|
||||
return json.Unmarshal([]byte(value), hi)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("%w: unexpected data type %T", ErrMachineAddressesInvalid, destination)
|
||||
}
|
||||
}
|
||||
|
||||
// Value return json value, implement driver.Valuer interface.
|
||||
func (hi HostInfo) Value() (driver.Value, error) {
|
||||
bytes, err := json.Marshal(hi)
|
||||
|
||||
return string(bytes), err
|
||||
}
|
||||
|
||||
type IPPrefix netip.Prefix
|
||||
|
||||
func (i *IPPrefix) Scan(destination interface{}) error {
|
||||
switch value := destination.(type) {
|
||||
case string:
|
||||
prefix, err := netip.ParsePrefix(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*i = IPPrefix(prefix)
|
||||
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("%w: unexpected data type %T", ErrCannotParsePrefix, destination)
|
||||
}
|
||||
}
|
||||
|
||||
// Value return json value, implement driver.Valuer interface.
|
||||
func (i IPPrefix) Value() (driver.Value, error) {
|
||||
prefixStr := netip.Prefix(i).String()
|
||||
|
||||
return prefixStr, nil
|
||||
}
|
||||
|
||||
type IPPrefixes []netip.Prefix
|
||||
|
||||
func (i *IPPrefixes) Scan(destination interface{}) error {
|
||||
switch value := destination.(type) {
|
||||
case []byte:
|
||||
return json.Unmarshal(value, i)
|
||||
|
||||
case string:
|
||||
return json.Unmarshal([]byte(value), i)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("%w: unexpected data type %T", ErrMachineAddressesInvalid, destination)
|
||||
}
|
||||
}
|
||||
|
||||
// Value return json value, implement driver.Valuer interface.
|
||||
func (i IPPrefixes) Value() (driver.Value, error) {
|
||||
bytes, err := json.Marshal(i)
|
||||
|
||||
return string(bytes), err
|
||||
}
|
||||
|
||||
type StringList []string
|
||||
|
||||
func (i *StringList) Scan(destination interface{}) error {
|
||||
switch value := destination.(type) {
|
||||
case []byte:
|
||||
return json.Unmarshal(value, i)
|
||||
|
||||
case string:
|
||||
return json.Unmarshal([]byte(value), i)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("%w: unexpected data type %T", ErrMachineAddressesInvalid, destination)
|
||||
}
|
||||
}
|
||||
|
||||
// Value return json value, implement driver.Valuer interface.
|
||||
func (i StringList) Value() (driver.Value, error) {
|
||||
bytes, err := json.Marshal(i)
|
||||
|
||||
return string(bytes), err
|
||||
}
|
394
dns_test.go
394
dns_test.go
@@ -1,394 +0,0 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/dnstype"
|
||||
)
|
||||
|
||||
func (s *Suite) TestMagicDNSRootDomains100(c *check.C) {
|
||||
prefixes := []netip.Prefix{
|
||||
netip.MustParsePrefix("100.64.0.0/10"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
found := false
|
||||
for _, domain := range domains {
|
||||
if domain == "64.100.in-addr.arpa." {
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(found, check.Equals, true)
|
||||
|
||||
found = false
|
||||
for _, domain := range domains {
|
||||
if domain == "100.100.in-addr.arpa." {
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(found, check.Equals, true)
|
||||
|
||||
found = false
|
||||
for _, domain := range domains {
|
||||
if domain == "127.100.in-addr.arpa." {
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(found, check.Equals, true)
|
||||
}
|
||||
|
||||
func (s *Suite) TestMagicDNSRootDomains172(c *check.C) {
|
||||
prefixes := []netip.Prefix{
|
||||
netip.MustParsePrefix("172.16.0.0/16"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
found := false
|
||||
for _, domain := range domains {
|
||||
if domain == "0.16.172.in-addr.arpa." {
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(found, check.Equals, true)
|
||||
|
||||
found = false
|
||||
for _, domain := range domains {
|
||||
if domain == "255.16.172.in-addr.arpa." {
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
c.Assert(found, check.Equals, true)
|
||||
}
|
||||
|
||||
// Happens when netmask is a multiple of 4 bits (sounds likely).
|
||||
func (s *Suite) TestMagicDNSRootDomainsIPv6Single(c *check.C) {
|
||||
prefixes := []netip.Prefix{
|
||||
netip.MustParsePrefix("fd7a:115c:a1e0::/48"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
c.Assert(len(domains), check.Equals, 1)
|
||||
c.Assert(
|
||||
domains[0].WithTrailingDot(),
|
||||
check.Equals,
|
||||
"0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa.",
|
||||
)
|
||||
}
|
||||
|
||||
func (s *Suite) TestMagicDNSRootDomainsIPv6SingleMultiple(c *check.C) {
|
||||
prefixes := []netip.Prefix{
|
||||
netip.MustParsePrefix("fd7a:115c:a1e0::/50"),
|
||||
}
|
||||
domains := generateMagicDNSRootDomains(prefixes)
|
||||
|
||||
yieldsRoot := func(dom string) bool {
|
||||
for _, candidate := range domains {
|
||||
if candidate.WithTrailingDot() == dom {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
c.Assert(len(domains), check.Equals, 4)
|
||||
c.Assert(yieldsRoot("0.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa."), check.Equals, true)
|
||||
c.Assert(yieldsRoot("1.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa."), check.Equals, true)
|
||||
c.Assert(yieldsRoot("2.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa."), check.Equals, true)
|
||||
c.Assert(yieldsRoot("3.0.e.1.a.c.5.1.1.a.7.d.f.ip6.arpa."), check.Equals, true)
|
||||
}
|
||||
|
||||
func (s *Suite) TestDNSConfigMapResponseWithMagicDNS(c *check.C) {
|
||||
userShared1, err := app.CreateUser("shared1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
userShared2, err := app.CreateUser("shared2")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
userShared3, err := app.CreateUser("shared3")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKeyInShared1, err := app.CreatePreAuthKey(
|
||||
userShared1.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKeyInShared2, err := app.CreatePreAuthKey(
|
||||
userShared2.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKeyInShared3, err := app.CreatePreAuthKey(
|
||||
userShared3.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
PreAuthKey2InShared1, err := app.CreatePreAuthKey(
|
||||
userShared1.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine(userShared1.Name, "test_get_shared_nodes_1")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
machineInShared1 := &Machine{
|
||||
ID: 1,
|
||||
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
DiscoKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
Hostname: "test_get_shared_nodes_1",
|
||||
UserID: userShared1.ID,
|
||||
User: *userShared1,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
|
||||
AuthKeyID: uint(preAuthKeyInShared1.ID),
|
||||
}
|
||||
app.db.Save(machineInShared1)
|
||||
|
||||
_, err = app.GetMachine(userShared1.Name, machineInShared1.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared2 := &Machine{
|
||||
ID: 2,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Hostname: "test_get_shared_nodes_2",
|
||||
UserID: userShared2.ID,
|
||||
User: *userShared2,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")},
|
||||
AuthKeyID: uint(preAuthKeyInShared2.ID),
|
||||
}
|
||||
app.db.Save(machineInShared2)
|
||||
|
||||
_, err = app.GetMachine(userShared2.Name, machineInShared2.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared3 := &Machine{
|
||||
ID: 3,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Hostname: "test_get_shared_nodes_3",
|
||||
UserID: userShared3.ID,
|
||||
User: *userShared3,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
|
||||
AuthKeyID: uint(preAuthKeyInShared3.ID),
|
||||
}
|
||||
app.db.Save(machineInShared3)
|
||||
|
||||
_, err = app.GetMachine(userShared3.Name, machineInShared3.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machine2InShared1 := &Machine{
|
||||
ID: 4,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Hostname: "test_get_shared_nodes_4",
|
||||
UserID: userShared1.ID,
|
||||
User: *userShared1,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.4")},
|
||||
AuthKeyID: uint(PreAuthKey2InShared1.ID),
|
||||
}
|
||||
app.db.Save(machine2InShared1)
|
||||
|
||||
baseDomain := "foobar.headscale.net"
|
||||
dnsConfigOrig := tailcfg.DNSConfig{
|
||||
Routes: make(map[string][]*dnstype.Resolver),
|
||||
Domains: []string{baseDomain},
|
||||
Proxied: true,
|
||||
}
|
||||
|
||||
peersOfMachineInShared1, err := app.getPeers(machineInShared1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
dnsConfig := getMapResponseDNSConfig(
|
||||
&dnsConfigOrig,
|
||||
baseDomain,
|
||||
*machineInShared1,
|
||||
peersOfMachineInShared1,
|
||||
)
|
||||
c.Assert(dnsConfig, check.NotNil)
|
||||
|
||||
c.Assert(len(dnsConfig.Routes), check.Equals, 3)
|
||||
|
||||
domainRouteShared1 := fmt.Sprintf("%s.%s", userShared1.Name, baseDomain)
|
||||
_, ok := dnsConfig.Routes[domainRouteShared1]
|
||||
c.Assert(ok, check.Equals, true)
|
||||
|
||||
domainRouteShared2 := fmt.Sprintf("%s.%s", userShared2.Name, baseDomain)
|
||||
_, ok = dnsConfig.Routes[domainRouteShared2]
|
||||
c.Assert(ok, check.Equals, true)
|
||||
|
||||
domainRouteShared3 := fmt.Sprintf("%s.%s", userShared3.Name, baseDomain)
|
||||
_, ok = dnsConfig.Routes[domainRouteShared3]
|
||||
c.Assert(ok, check.Equals, true)
|
||||
}
|
||||
|
||||
func (s *Suite) TestDNSConfigMapResponseWithoutMagicDNS(c *check.C) {
|
||||
userShared1, err := app.CreateUser("shared1")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
userShared2, err := app.CreateUser("shared2")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
userShared3, err := app.CreateUser("shared3")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKeyInShared1, err := app.CreatePreAuthKey(
|
||||
userShared1.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKeyInShared2, err := app.CreatePreAuthKey(
|
||||
userShared2.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKeyInShared3, err := app.CreatePreAuthKey(
|
||||
userShared3.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
preAuthKey2InShared1, err := app.CreatePreAuthKey(
|
||||
userShared1.Name,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine(userShared1.Name, "test_get_shared_nodes_1")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
machineInShared1 := &Machine{
|
||||
ID: 1,
|
||||
MachineKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
NodeKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
DiscoKey: "686824e749f3b7f2a5927ee6c1e422aee5292592d9179a271ed7b3e659b44a66",
|
||||
Hostname: "test_get_shared_nodes_1",
|
||||
UserID: userShared1.ID,
|
||||
User: *userShared1,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.1")},
|
||||
AuthKeyID: uint(preAuthKeyInShared1.ID),
|
||||
}
|
||||
app.db.Save(machineInShared1)
|
||||
|
||||
_, err = app.GetMachine(userShared1.Name, machineInShared1.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared2 := &Machine{
|
||||
ID: 2,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Hostname: "test_get_shared_nodes_2",
|
||||
UserID: userShared2.ID,
|
||||
User: *userShared2,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.2")},
|
||||
AuthKeyID: uint(preAuthKeyInShared2.ID),
|
||||
}
|
||||
app.db.Save(machineInShared2)
|
||||
|
||||
_, err = app.GetMachine(userShared2.Name, machineInShared2.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machineInShared3 := &Machine{
|
||||
ID: 3,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Hostname: "test_get_shared_nodes_3",
|
||||
UserID: userShared3.ID,
|
||||
User: *userShared3,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.3")},
|
||||
AuthKeyID: uint(preAuthKeyInShared3.ID),
|
||||
}
|
||||
app.db.Save(machineInShared3)
|
||||
|
||||
_, err = app.GetMachine(userShared3.Name, machineInShared3.Hostname)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
machine2InShared1 := &Machine{
|
||||
ID: 4,
|
||||
MachineKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
NodeKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
DiscoKey: "dec46ef9dc45c7d2f03bfcd5a640d9e24e3cc68ce3d9da223867c9bc6d5e9863",
|
||||
Hostname: "test_get_shared_nodes_4",
|
||||
UserID: userShared1.ID,
|
||||
User: *userShared1,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
IPAddresses: []netip.Addr{netip.MustParseAddr("100.64.0.4")},
|
||||
AuthKeyID: uint(preAuthKey2InShared1.ID),
|
||||
}
|
||||
app.db.Save(machine2InShared1)
|
||||
|
||||
baseDomain := "foobar.headscale.net"
|
||||
dnsConfigOrig := tailcfg.DNSConfig{
|
||||
Routes: make(map[string][]*dnstype.Resolver),
|
||||
Domains: []string{baseDomain},
|
||||
Proxied: false,
|
||||
}
|
||||
|
||||
peersOfMachine1Shared1, err := app.getPeers(machineInShared1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
dnsConfig := getMapResponseDNSConfig(
|
||||
&dnsConfigOrig,
|
||||
baseDomain,
|
||||
*machineInShared1,
|
||||
peersOfMachine1Shared1,
|
||||
)
|
||||
c.Assert(dnsConfig, check.NotNil)
|
||||
c.Assert(len(dnsConfig.Routes), check.Equals, 0)
|
||||
c.Assert(len(dnsConfig.Domains), check.Equals, 1)
|
||||
}
|
@@ -18,23 +18,25 @@ An example use case is to serve apps on the same host via a reverse proxy like N
|
||||
|
||||
1. Change the `config.yaml` to contain the desired records like so:
|
||||
|
||||
```yaml
|
||||
dns_config:
|
||||
...
|
||||
extra_records:
|
||||
- name: "prometheus.myvpn.example.com"
|
||||
type: "A"
|
||||
value: "100.64.0.3"
|
||||
```yaml
|
||||
dns_config:
|
||||
...
|
||||
extra_records:
|
||||
- name: "prometheus.myvpn.example.com"
|
||||
type: "A"
|
||||
value: "100.64.0.3"
|
||||
|
||||
- name: "grafana.myvpn.example.com"
|
||||
type: "A"
|
||||
value: "100.64.0.3"
|
||||
...
|
||||
```
|
||||
- name: "grafana.myvpn.example.com"
|
||||
type: "A"
|
||||
value: "100.64.0.3"
|
||||
...
|
||||
```
|
||||
|
||||
2. Restart your headscale instance.
|
||||
1. Restart your headscale instance.
|
||||
|
||||
Beware of the limitations listed later on!
|
||||
!!! warning
|
||||
|
||||
Beware of the limitations listed later on!
|
||||
|
||||
### 2. Verify that the records are set
|
||||
|
||||
|
@@ -14,6 +14,8 @@ If the node is already registered, it can advertise exit capabilities like this:
|
||||
$ sudo tailscale set --advertise-exit-node
|
||||
```
|
||||
|
||||
To use a node as an exit node, IP forwarding must be enabled on the node. Check the official [Tailscale documentation](https://tailscale.com/kb/1019/subnets/?tab=linux#enable-ip-forwarding) for how to enable IP fowarding.
|
||||
|
||||
## On the control server
|
||||
|
||||
```console
|
||||
|
57
docs/faq.md
Normal file
57
docs/faq.md
Normal file
@@ -0,0 +1,57 @@
|
||||
---
|
||||
hide:
|
||||
- navigation
|
||||
---
|
||||
|
||||
# Frequently Asked Questions
|
||||
|
||||
## What is the design goal of headscale?
|
||||
|
||||
`headscale` aims to implement a self-hosted, open source alternative to the [Tailscale](https://tailscale.com/)
|
||||
control server.
|
||||
`headscale`'s goal is to provide self-hosters and hobbyists with an open-source
|
||||
server they can use for their projects and labs.
|
||||
It implements a narrow scope, a _single_ Tailnet, suitable for a personal use, or a small
|
||||
open-source organisation.
|
||||
|
||||
## How can I contribute?
|
||||
|
||||
Headscale is "Open Source, acknowledged contribution", this means that any
|
||||
contribution will have to be discussed with the Maintainers before being submitted.
|
||||
|
||||
Headscale is open to code contributions for bug fixes without discussion.
|
||||
|
||||
If you find mistakes in the documentation, please also submit a fix to the documentation.
|
||||
|
||||
## Why is 'acknowledged contribution' the chosen model?
|
||||
|
||||
Both maintainers have full-time jobs and families, and we want to avoid burnout. We also want to avoid frustration from contributors when their PRs are not accepted.
|
||||
|
||||
We are more than happy to exchange emails, or to have dedicated calls before a PR is submitted.
|
||||
|
||||
## When/Why is Feature X going to be implemented?
|
||||
|
||||
We don't know. We might be working on it. If you want to help, please send us a PR.
|
||||
|
||||
Please be aware that there are a number of reasons why we might not accept specific contributions:
|
||||
|
||||
- It is not possible to implement the feature in a way that makes sense in a self-hosted environment.
|
||||
- Given that we are reverse-engineering Tailscale to satify our own curiosity, we might be interested in implementing the feature ourselves.
|
||||
- You are not sending unit and integration tests with it.
|
||||
|
||||
## Do you support Y method of deploying Headscale?
|
||||
|
||||
We currently support deploying `headscale` using our binaries and the DEB packages. Both can be found in the
|
||||
[GitHub releases page](https://github.com/juanfont/headscale/releases).
|
||||
|
||||
In addition to that, there are semi-official RPM packages by the Fedora infra team https://copr.fedorainfracloud.org/coprs/jonathanspw/headscale/
|
||||
|
||||
For convenience, we also build Docker images with `headscale`. But **please be aware that we don't officially support deploying `headscale` using Docker**. We have a [Discord channel](https://discord.com/channels/896711691637780480/1070619770942148618) where you can ask for Docker-specific help to the community.
|
||||
|
||||
## Why is my reverse proxy not working with Headscale?
|
||||
|
||||
We don't know. We don't use reverse proxies with `headscale` ourselves, so we don't have any experience with them. We have [community documentation](https://headscale.net/reverse-proxy/) on how to configure various reverse proxies, and a dedicated [Discord channel](https://discord.com/channels/896711691637780480/1070619818346164324) where you can ask for help to the community.
|
||||
|
||||
## Can I use headscale and tailscale on the same machine?
|
||||
|
||||
Running headscale on a machine that is also in the tailnet can cause problems with subnet routers, traffic relay nodes, and MagicDNS. It might work, but it is not supported.
|
@@ -4,9 +4,40 @@ hide:
|
||||
- toc
|
||||
---
|
||||
|
||||
# headscale documentation
|
||||
# headscale
|
||||
|
||||
This site contains the official and community contributed documentation for `headscale`.
|
||||
`headscale` is an open source, self-hosted implementation of the Tailscale control server.
|
||||
|
||||
If you are having trouble with following the documentation or get unexpected results,
|
||||
please ask on [Discord](https://discord.gg/c84AZQhmpx) instead of opening an Issue.
|
||||
This page contains the documentation for the latest version of headscale. Please also check our [FAQ](/faq/).
|
||||
|
||||
Join our [Discord](https://discord.gg/c84AZQhmpx) server for a chat and community support.
|
||||
|
||||
## Design goal
|
||||
|
||||
Headscale aims to implement a self-hosted, open source alternative to the Tailscale
|
||||
control server.
|
||||
Headscale's goal is to provide self-hosters and hobbyists with an open-source
|
||||
server they can use for their projects and labs.
|
||||
It implements a narrower scope, a single Tailnet, suitable for a personal use, or a small
|
||||
open-source organisation.
|
||||
|
||||
## Supporting headscale
|
||||
|
||||
If you like `headscale` and find it useful, there is a sponsorship and donation
|
||||
buttons available in the repo.
|
||||
|
||||
## Contributing
|
||||
|
||||
Headscale is "Open Source, acknowledged contribution", this means that any
|
||||
contribution will have to be discussed with the Maintainers before being submitted.
|
||||
|
||||
This model has been chosen to reduce the risk of burnout by limiting the
|
||||
maintenance overhead of reviewing and validating third-party code.
|
||||
|
||||
Headscale is open to code contributions for bug fixes without discussion.
|
||||
|
||||
If you find mistakes in the documentation, please submit a fix to the documentation.
|
||||
|
||||
## About
|
||||
|
||||
`headscale` is maintained by [Kristoffer Dalby](https://kradalby.no/) and [Juan Font](https://font.eu).
|
||||
|
@@ -24,6 +24,8 @@ oidc:
|
||||
# It resolves environment variables, making integration to systemd's
|
||||
# `LoadCredential` straightforward:
|
||||
#client_secret_path: "${CREDENTIALS_DIRECTORY}/oidc_client_secret"
|
||||
# as third option, it's also possible to load the oidc secret from environment variables
|
||||
# set HEADSCALE_OIDC_CLIENT_SECRET to the required value
|
||||
|
||||
# Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
|
||||
# parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".
|
||||
|
@@ -16,7 +16,7 @@ WorkingDirectory=/var/lib/headscale
|
||||
ReadWritePaths=/var/lib/headscale /var/run
|
||||
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE CAP_CHOWN
|
||||
CapabilityBoundingSet=CAP_CHOWN
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_CHOWN
|
||||
LockPersonality=true
|
||||
NoNewPrivileges=true
|
||||
PrivateDevices=true
|
||||
@@ -26,7 +26,6 @@ ProcSubset=pid
|
||||
ProtectClock=true
|
||||
ProtectControlGroups=true
|
||||
ProtectHome=true
|
||||
ProtectHome=yes
|
||||
ProtectHostname=true
|
||||
ProtectKernelLogs=true
|
||||
ProtectKernelModules=true
|
||||
|
@@ -64,6 +64,7 @@ summary() {
|
||||
echo ""
|
||||
echo " Please follow the next steps to start the software:"
|
||||
echo ""
|
||||
echo " sudo systemctl enable headscale"
|
||||
echo " sudo systemctl start headscale"
|
||||
echo ""
|
||||
echo " Configuration settings can be adjusted here:"
|
||||
|
5
docs/requirements.txt
Normal file
5
docs/requirements.txt
Normal file
@@ -0,0 +1,5 @@
|
||||
cairosvg~=2.7.1
|
||||
mkdocs-material~=9.4.14
|
||||
mkdocs-minify-plugin~=0.7.1
|
||||
pillow~=10.1.0
|
||||
|
@@ -33,8 +33,7 @@ The following example configuration can be used in your nginx setup, substitutin
|
||||
|
||||
```Nginx
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default keep-alive;
|
||||
'websocket' upgrade;
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
|
||||
@@ -61,7 +60,7 @@ server {
|
||||
proxy_buffering off;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
add_header Strict-Transport-Security "max-age=15552000; includeSubDomains" always;
|
||||
}
|
||||
}
|
||||
|
@@ -17,100 +17,124 @@ not work with alternatives like [Podman](https://podman.io). The Docker image ca
|
||||
|
||||
1. Prepare a directory on the host Docker node in your directory of choice, used to hold `headscale` configuration and the [SQLite](https://www.sqlite.org/) database:
|
||||
|
||||
```shell
|
||||
mkdir -p ./headscale/config
|
||||
cd ./headscale
|
||||
```
|
||||
```shell
|
||||
mkdir -p ./headscale/config
|
||||
cd ./headscale
|
||||
```
|
||||
|
||||
2. Create an empty SQlite datebase in the headscale directory:
|
||||
1. Create an empty SQlite datebase in the headscale directory:
|
||||
|
||||
```shell
|
||||
touch ./config/db.sqlite
|
||||
```
|
||||
```shell
|
||||
touch ./config/db.sqlite
|
||||
```
|
||||
|
||||
3. **(Strongly Recommended)** Download a copy of the [example configuration][config-example.yaml](https://github.com/juanfont/headscale/blob/main/config-example.yaml) from the headscale repository.
|
||||
1. **(Strongly Recommended)** Download a copy of the [example configuration](https://github.com/juanfont/headscale/blob/main/config-example.yaml) from the headscale repository.
|
||||
|
||||
Using wget:
|
||||
- Using `wget`:
|
||||
|
||||
```shell
|
||||
wget -O ./config/config.yaml https://raw.githubusercontent.com/juanfont/headscale/main/config-example.yaml
|
||||
```
|
||||
```shell
|
||||
wget -O ./config/config.yaml https://raw.githubusercontent.com/juanfont/headscale/main/config-example.yaml
|
||||
```
|
||||
|
||||
Using curl:
|
||||
- Using `curl`:
|
||||
|
||||
```shell
|
||||
curl https://raw.githubusercontent.com/juanfont/headscale/main/config-example.yaml -o ./config/config.yaml
|
||||
```
|
||||
```shell
|
||||
curl https://raw.githubusercontent.com/juanfont/headscale/main/config-example.yaml -o ./config/config.yaml
|
||||
```
|
||||
|
||||
**(Advanced)** If you would like to hand craft a config file **instead** of downloading the example config file, create a blank `headscale` configuration in the headscale directory to edit:
|
||||
- **(Advanced)** If you would like to hand craft a config file **instead** of downloading the example config file, create a blank `headscale` configuration in the headscale directory to edit:
|
||||
|
||||
```shell
|
||||
touch ./config/config.yaml
|
||||
```
|
||||
```shell
|
||||
touch ./config/config.yaml
|
||||
```
|
||||
|
||||
Modify the config file to your preferences before launching Docker container.
|
||||
Here are some settings that you likely want:
|
||||
Modify the config file to your preferences before launching Docker container.
|
||||
Here are some settings that you likely want:
|
||||
|
||||
```yaml
|
||||
# Change to your hostname or host IP
|
||||
server_url: http://your-host-name:8080
|
||||
# Listen to 0.0.0.0 so it's accessible outside the container
|
||||
metrics_listen_addr: 0.0.0.0:9090
|
||||
# The default /var/lib/headscale path is not writable in the container
|
||||
private_key_path: /etc/headscale/private.key
|
||||
# The default /var/lib/headscale path is not writable in the container
|
||||
noise:
|
||||
private_key_path: /etc/headscale/noise_private.key
|
||||
# The default /var/lib/headscale path is not writable in the container
|
||||
db_type: sqlite3
|
||||
db_path: /etc/headscale/db.sqlite
|
||||
```
|
||||
```yaml
|
||||
# Change to your hostname or host IP
|
||||
server_url: http://your-host-name:8080
|
||||
# Listen to 0.0.0.0 so it's accessible outside the container
|
||||
metrics_listen_addr: 0.0.0.0:9090
|
||||
# The default /var/lib/headscale path is not writable in the container
|
||||
noise:
|
||||
private_key_path: /etc/headscale/noise_private.key
|
||||
# The default /var/lib/headscale path is not writable in the container
|
||||
derp:
|
||||
private_key_path: /etc/headscale/private.key
|
||||
# The default /var/run/headscale path is not writable in the container
|
||||
unix_socket: /etc/headscale/headscale.sock
|
||||
# The default /var/lib/headscale path is not writable in the container
|
||||
database.type: sqlite3
|
||||
database.sqlite.path: /etc/headscale/db.sqlite
|
||||
```
|
||||
|
||||
4. Start the headscale server while working in the host headscale directory:
|
||||
Alternatively, you can mount `/var/lib` and `/var/run` from your host system by adding
|
||||
`--volume $(pwd)/lib:/var/lib/headscale` and `--volume $(pwd)/run:/var/run/headscale`
|
||||
in the next step.
|
||||
|
||||
```shell
|
||||
docker run \
|
||||
--name headscale \
|
||||
--detach \
|
||||
--volume $(pwd)/config:/etc/headscale/ \
|
||||
--publish 127.0.0.1:8080:8080 \
|
||||
--publish 127.0.0.1:9090:9090 \
|
||||
headscale/headscale:<VERSION> \
|
||||
headscale serve
|
||||
1. Start the headscale server while working in the host headscale directory:
|
||||
|
||||
```
|
||||
```shell
|
||||
docker run \
|
||||
--name headscale \
|
||||
--detach \
|
||||
--volume $(pwd)/config:/etc/headscale/ \
|
||||
--publish 127.0.0.1:8080:8080 \
|
||||
--publish 127.0.0.1:9090:9090 \
|
||||
headscale/headscale:<VERSION> \
|
||||
headscale serve
|
||||
```
|
||||
|
||||
Note: use `0.0.0.0:8080:8080` instead of `127.0.0.1:8080:8080` if you want to expose the container externally.
|
||||
Note: use `0.0.0.0:8080:8080` instead of `127.0.0.1:8080:8080` if you want to expose the container externally.
|
||||
|
||||
This command will mount `config/` under `/etc/headscale`, forward port 8080 out of the container so the
|
||||
`headscale` instance becomes available and then detach so headscale runs in the background.
|
||||
This command will mount `config/` under `/etc/headscale`, forward port 8080 out of the container so the
|
||||
`headscale` instance becomes available and then detach so headscale runs in the background.
|
||||
|
||||
5. Verify `headscale` is running:
|
||||
Example `docker-compose.yaml`
|
||||
|
||||
Follow the container logs:
|
||||
```yaml
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
headscale:
|
||||
image: headscale/headscale:0.22.3
|
||||
restart: unless-stopped
|
||||
container_name: headscale
|
||||
ports:
|
||||
- "127.0.0.1:8080:8080"
|
||||
- "127.0.0.1:9090:9090"
|
||||
volumes:
|
||||
# pls change [config_path] to the fullpath of the config folder just created
|
||||
- [config_path]:/etc/headscale
|
||||
command: headscale serve
|
||||
```
|
||||
|
||||
```shell
|
||||
docker logs --follow headscale
|
||||
```
|
||||
1. Verify `headscale` is running:
|
||||
Follow the container logs:
|
||||
|
||||
Verify running containers:
|
||||
```shell
|
||||
docker logs --follow headscale
|
||||
```
|
||||
|
||||
```shell
|
||||
docker ps
|
||||
```
|
||||
Verify running containers:
|
||||
|
||||
Verify `headscale` is available:
|
||||
```shell
|
||||
docker ps
|
||||
```
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
Verify `headscale` is available:
|
||||
|
||||
6. Create a user ([tailnet](https://tailscale.com/kb/1136/tailnet/)):
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
|
||||
```shell
|
||||
docker exec headscale \
|
||||
headscale users create myfirstuser
|
||||
```
|
||||
1. Create a user ([tailnet](https://tailscale.com/kb/1136/tailnet/)):
|
||||
|
||||
```shell
|
||||
docker exec headscale \
|
||||
headscale users create myfirstuser
|
||||
```
|
||||
|
||||
### Register a machine (normal login)
|
||||
|
||||
|
197
docs/running-headscale-linux-manual.md
Normal file
197
docs/running-headscale-linux-manual.md
Normal file
@@ -0,0 +1,197 @@
|
||||
# Running headscale on Linux
|
||||
|
||||
## Note: Outdated and "advanced"
|
||||
|
||||
This documentation is considered the "legacy"/advanced/manual version of the documentation, you most likely do not
|
||||
want to use this documentation and rather look at the distro specific documentation (TODO LINK)[].
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing a user how-to set up and run `headscale` on Linux.
|
||||
In additional to the "get up and running section", there is an optional [SystemD section](#running-headscale-in-the-background-with-systemd)
|
||||
describing how to make `headscale` run properly in a server environment.
|
||||
|
||||
## Configure and run `headscale`
|
||||
|
||||
1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases):
|
||||
|
||||
```shell
|
||||
wget --output-document=/usr/local/bin/headscale \
|
||||
https://github.com/juanfont/headscale/releases/download/v<HEADSCALE VERSION>/headscale_<HEADSCALE VERSION>_linux_<ARCH>
|
||||
```
|
||||
|
||||
1. Make `headscale` executable:
|
||||
|
||||
```shell
|
||||
chmod +x /usr/local/bin/headscale
|
||||
```
|
||||
|
||||
1. Prepare a directory to hold `headscale` configuration and the [SQLite](https://www.sqlite.org/) database:
|
||||
|
||||
```shell
|
||||
# Directory for configuration
|
||||
|
||||
mkdir -p /etc/headscale
|
||||
|
||||
# Directory for Database, and other variable data (like certificates)
|
||||
mkdir -p /var/lib/headscale
|
||||
# or if you create a headscale user:
|
||||
useradd \
|
||||
--create-home \
|
||||
--home-dir /var/lib/headscale/ \
|
||||
--system \
|
||||
--user-group \
|
||||
--shell /usr/sbin/nologin \
|
||||
headscale
|
||||
```
|
||||
|
||||
1. Create an empty SQLite database:
|
||||
|
||||
```shell
|
||||
touch /var/lib/headscale/db.sqlite
|
||||
```
|
||||
|
||||
1. Create a `headscale` configuration:
|
||||
|
||||
```shell
|
||||
touch /etc/headscale/config.yaml
|
||||
```
|
||||
|
||||
**(Strongly Recommended)** Download a copy of the [example configuration][config-example.yaml](https://github.com/juanfont/headscale/blob/main/config-example.yaml) from the headscale repository.
|
||||
|
||||
1. Start the headscale server:
|
||||
|
||||
```shell
|
||||
headscale serve
|
||||
```
|
||||
|
||||
This command will start `headscale` in the current terminal session.
|
||||
|
||||
---
|
||||
|
||||
To continue the tutorial, open a new terminal and let it run in the background.
|
||||
Alternatively use terminal emulators like [tmux](https://github.com/tmux/tmux) or [screen](https://www.gnu.org/software/screen/).
|
||||
|
||||
To run `headscale` in the background, please follow the steps in the [SystemD section](#running-headscale-in-the-background-with-systemd) before continuing.
|
||||
|
||||
1. Verify `headscale` is running:
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
|
||||
1. Create a user ([tailnet](https://tailscale.com/kb/1136/tailnet/)):
|
||||
|
||||
```shell
|
||||
headscale users create myfirstuser
|
||||
```
|
||||
|
||||
### Register a machine (normal login)
|
||||
|
||||
On a client machine, execute the `tailscale` login command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server YOUR_HEADSCALE_URL
|
||||
```
|
||||
|
||||
Register the machine:
|
||||
|
||||
```shell
|
||||
headscale --user myfirstuser nodes register --key <YOUR_MACHINE_KEY>
|
||||
```
|
||||
|
||||
### Register machine using a pre authenticated key
|
||||
|
||||
Generate a key using the command line:
|
||||
|
||||
```shell
|
||||
headscale --user myfirstuser preauthkeys create --reusable --expiration 24h
|
||||
```
|
||||
|
||||
This will return a pre-authenticated key that can be used to connect a node to `headscale` during the `tailscale` command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
## Running `headscale` in the background with SystemD
|
||||
|
||||
:warning: **Deprecated**: This part is very outdated and you should use the [pre-packaged Headscale for this](./running-headscale-linux.md)
|
||||
|
||||
This section demonstrates how to run `headscale` as a service in the background with [SystemD](https://www.freedesktop.org/wiki/Software/systemd/).
|
||||
This should work on most modern Linux distributions.
|
||||
|
||||
1. Create a SystemD service configuration at `/etc/systemd/system/headscale.service` containing:
|
||||
|
||||
```systemd
|
||||
[Unit]
|
||||
Description=headscale controller
|
||||
After=syslog.target
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=headscale
|
||||
Group=headscale
|
||||
ExecStart=/usr/local/bin/headscale serve
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
# Optional security enhancements
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
WorkingDirectory=/var/lib/headscale
|
||||
ReadWritePaths=/var/lib/headscale /var/run/headscale
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
RuntimeDirectory=headscale
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
Note that when running as the headscale user ensure that, either you add your current user to the headscale group:
|
||||
|
||||
```shell
|
||||
usermod -a -G headscale current_user
|
||||
```
|
||||
|
||||
or run all headscale commands as the headscale user:
|
||||
|
||||
```shell
|
||||
su - headscale
|
||||
```
|
||||
|
||||
1. In `/etc/headscale/config.yaml`, override the default `headscale` unix socket with path that is writable by the `headscale` user or group:
|
||||
|
||||
```yaml
|
||||
unix_socket: /var/run/headscale/headscale.sock
|
||||
```
|
||||
|
||||
1. Reload SystemD to load the new configuration file:
|
||||
|
||||
```shell
|
||||
systemctl daemon-reload
|
||||
```
|
||||
|
||||
1. Enable and start the new `headscale` service:
|
||||
|
||||
```shell
|
||||
systemctl enable --now headscale
|
||||
```
|
||||
|
||||
1. Verify the headscale service:
|
||||
|
||||
```shell
|
||||
systemctl status headscale
|
||||
```
|
||||
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
|
||||
`headscale` will now run in the background and start at boot.
|
@@ -1,83 +1,65 @@
|
||||
# Running headscale on Linux
|
||||
|
||||
## Requirements
|
||||
|
||||
- Ubuntu 20.04 or newer, Debian 11 or newer.
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing a user how-to set up and run `headscale` on Linux.
|
||||
In additional to the "get up and running section", there is an optional [SystemD section](#running-headscale-in-the-background-with-systemd)
|
||||
describing how to make `headscale` run properly in a server environment.
|
||||
Get Headscale up and running.
|
||||
|
||||
## Configure and run `headscale`
|
||||
This includes running Headscale with SystemD.
|
||||
|
||||
1. Download the latest [`headscale` binary from GitHub's release page](https://github.com/juanfont/headscale/releases):
|
||||
## Migrating from manual install
|
||||
|
||||
```shell
|
||||
wget --output-document=/usr/local/bin/headscale \
|
||||
https://github.com/juanfont/headscale/releases/download/v<HEADSCALE VERSION>/headscale_<HEADSCALE VERSION>_linux_<ARCH>
|
||||
```
|
||||
If you are migrating from the old manual install, the best thing would be to remove
|
||||
the files installed by following [the guide in reverse](./running-headscale-linux-manual.md).
|
||||
|
||||
2. Make `headscale` executable:
|
||||
You should _not_ delete the database (`/var/lib/headscale/db.sqlite`) and the
|
||||
configuration (`/etc/headscale/config.yaml`).
|
||||
|
||||
```shell
|
||||
chmod +x /usr/local/bin/headscale
|
||||
```
|
||||
## Installation
|
||||
|
||||
3. Prepare a directory to hold `headscale` configuration and the [SQLite](https://www.sqlite.org/) database:
|
||||
1. Download the latest Headscale package for your platform (`.deb` for Ubuntu and Debian) from [Headscale's releases page](https://github.com/juanfont/headscale/releases):
|
||||
|
||||
```shell
|
||||
# Directory for configuration
|
||||
```shell
|
||||
wget --output-document=headscale.deb \
|
||||
https://github.com/juanfont/headscale/releases/download/v<HEADSCALE VERSION>/headscale_<HEADSCALE VERSION>_linux_<ARCH>.deb
|
||||
```
|
||||
|
||||
mkdir -p /etc/headscale
|
||||
1. Install Headscale:
|
||||
|
||||
# Directory for Database, and other variable data (like certificates)
|
||||
mkdir -p /var/lib/headscale
|
||||
# or if you create a headscale user:
|
||||
useradd \
|
||||
--create-home \
|
||||
--home-dir /var/lib/headscale/ \
|
||||
--system \
|
||||
--user-group \
|
||||
--shell /usr/bin/nologin \
|
||||
headscale
|
||||
```
|
||||
```shell
|
||||
sudo apt install headscale.deb
|
||||
```
|
||||
|
||||
4. Create an empty SQLite database:
|
||||
1. Enable Headscale service, this will start Headscale at boot:
|
||||
|
||||
```shell
|
||||
touch /var/lib/headscale/db.sqlite
|
||||
```
|
||||
```shell
|
||||
sudo systemctl enable headscale
|
||||
```
|
||||
|
||||
5. Create a `headscale` configuration:
|
||||
1. Configure Headscale by editing the configuration file:
|
||||
|
||||
```shell
|
||||
touch /etc/headscale/config.yaml
|
||||
```
|
||||
```shell
|
||||
nano /etc/headscale/config.yaml
|
||||
```
|
||||
|
||||
**(Strongly Recommended)** Download a copy of the [example configuration][config-example.yaml](https://github.com/juanfont/headscale/blob/main/config-example.yaml) from the headscale repository.
|
||||
1. Start Headscale:
|
||||
|
||||
6. Start the headscale server:
|
||||
```shell
|
||||
sudo systemctl start headscale
|
||||
```
|
||||
|
||||
```shell
|
||||
headscale serve
|
||||
```
|
||||
1. Check that Headscale is running as intended:
|
||||
|
||||
This command will start `headscale` in the current terminal session.
|
||||
```shell
|
||||
systemctl status headscale
|
||||
```
|
||||
|
||||
---
|
||||
## Using Headscale
|
||||
|
||||
To continue the tutorial, open a new terminal and let it run in the background.
|
||||
Alternatively use terminal emulators like [tmux](https://github.com/tmux/tmux) or [screen](https://www.gnu.org/software/screen/).
|
||||
|
||||
To run `headscale` in the background, please follow the steps in the [SystemD section](#running-headscale-in-the-background-with-systemd) before continuing.
|
||||
|
||||
7. Verify `headscale` is running:
|
||||
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
|
||||
8. Create a user ([tailnet](https://tailscale.com/kb/1136/tailnet/)):
|
||||
### Create a user
|
||||
|
||||
```shell
|
||||
headscale users create myfirstuser
|
||||
@@ -85,16 +67,16 @@ headscale users create myfirstuser
|
||||
|
||||
### Register a machine (normal login)
|
||||
|
||||
On a client machine, execute the `tailscale` login command:
|
||||
On a client machine, run the `tailscale` login command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server YOUR_HEADSCALE_URL
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL>
|
||||
```
|
||||
|
||||
Register the machine:
|
||||
|
||||
```shell
|
||||
headscale --user myfirstuser nodes register --key <YOU_+MACHINE_KEY>
|
||||
headscale --user myfirstuser nodes register --key <YOUR_MACHINE_KEY>
|
||||
```
|
||||
|
||||
### Register machine using a pre authenticated key
|
||||
@@ -105,87 +87,9 @@ Generate a key using the command line:
|
||||
headscale --user myfirstuser preauthkeys create --reusable --expiration 24h
|
||||
```
|
||||
|
||||
This will return a pre-authenticated key that can be used to connect a node to `headscale` during the `tailscale` command:
|
||||
This will return a pre-authenticated key that is used to
|
||||
connect a node to `headscale` during the `tailscale` command:
|
||||
|
||||
```shell
|
||||
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
|
||||
```
|
||||
|
||||
## Running `headscale` in the background with SystemD
|
||||
|
||||
This section demonstrates how to run `headscale` as a service in the background with [SystemD](https://www.freedesktop.org/wiki/Software/systemd/).
|
||||
This should work on most modern Linux distributions.
|
||||
|
||||
1. Create a SystemD service configuration at `/etc/systemd/system/headscale.service` containing:
|
||||
|
||||
```systemd
|
||||
[Unit]
|
||||
Description=headscale controller
|
||||
After=syslog.target
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=headscale
|
||||
Group=headscale
|
||||
ExecStart=/usr/local/bin/headscale serve
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
# Optional security enhancements
|
||||
NoNewPrivileges=yes
|
||||
PrivateTmp=yes
|
||||
ProtectSystem=strict
|
||||
ProtectHome=yes
|
||||
WorkingDirectory=/var/lib/headscale
|
||||
ReadWritePaths=/var/lib/headscale /var/run/headscale
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
RuntimeDirectory=headscale
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
Note that when running as the headscale user ensure that, either you add your current user to the headscale group:
|
||||
|
||||
```shell
|
||||
usermod -a -G headscale current_user
|
||||
```
|
||||
|
||||
or run all headscale commands as the headscale user:
|
||||
|
||||
```shell
|
||||
su - headscale
|
||||
```
|
||||
|
||||
2. In `/etc/headscale/config.yaml`, override the default `headscale` unix socket with path that is writable by the `headscale` user or group:
|
||||
|
||||
```yaml
|
||||
unix_socket: /var/run/headscale/headscale.sock
|
||||
```
|
||||
|
||||
3. Reload SystemD to load the new configuration file:
|
||||
|
||||
```shell
|
||||
systemctl daemon-reload
|
||||
```
|
||||
|
||||
4. Enable and start the new `headscale` service:
|
||||
|
||||
```shell
|
||||
systemctl enable --now headscale
|
||||
```
|
||||
|
||||
5. Verify the headscale service:
|
||||
|
||||
```shell
|
||||
systemctl status headscale
|
||||
```
|
||||
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
|
||||
`headscale` will now run in the background and start at boot.
|
||||
|
@@ -15,115 +15,116 @@ describing how to make `headscale` run properly in a server environment.
|
||||
|
||||
## Install `headscale`
|
||||
|
||||
1. Install from ports (Not Recommend)
|
||||
1. Install from ports (not recommended)
|
||||
|
||||
As of OpenBSD 7.2, there's a headscale in ports collection, however, it's severely outdated(v0.12.4).
|
||||
You can install it via `pkg_add headscale`.
|
||||
!!! info
|
||||
|
||||
2. Install from source on OpenBSD 7.2
|
||||
As of OpenBSD 7.2, there's a headscale in ports collection, however, it's severely outdated(v0.12.4). You can install it via `pkg_add headscale`.
|
||||
|
||||
```shell
|
||||
# Install prerequistes
|
||||
pkg_add go
|
||||
1. Install from source on OpenBSD 7.2
|
||||
|
||||
git clone https://github.com/juanfont/headscale.git
|
||||
```shell
|
||||
# Install prerequistes
|
||||
pkg_add go
|
||||
|
||||
cd headscale
|
||||
git clone https://github.com/juanfont/headscale.git
|
||||
|
||||
# optionally checkout a release
|
||||
# option a. you can find offical relase at https://github.com/juanfont/headscale/releases/latest
|
||||
# option b. get latest tag, this may be a beta release
|
||||
latestTag=$(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
cd headscale
|
||||
|
||||
git checkout $latestTag
|
||||
# optionally checkout a release
|
||||
# option a. you can find offical relase at https://github.com/juanfont/headscale/releases/latest
|
||||
# option b. get latest tag, this may be a beta release
|
||||
latestTag=$(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
|
||||
go build -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$latestTag" github.com/juanfont/headscale
|
||||
git checkout $latestTag
|
||||
|
||||
# make it executable
|
||||
chmod a+x headscale
|
||||
go build -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$latestTag" github.com/juanfont/headscale
|
||||
|
||||
# copy it to /usr/local/sbin
|
||||
cp headscale /usr/local/sbin
|
||||
```
|
||||
# make it executable
|
||||
chmod a+x headscale
|
||||
|
||||
3. Install from source via cross compile
|
||||
# copy it to /usr/local/sbin
|
||||
cp headscale /usr/local/sbin
|
||||
```
|
||||
|
||||
```shell
|
||||
# Install prerequistes
|
||||
# 1. go v1.20+: headscale newer than 0.21 needs go 1.20+ to compile
|
||||
# 2. gmake: Makefile in the headscale repo is written in GNU make syntax
|
||||
1. Install from source via cross compile
|
||||
|
||||
git clone https://github.com/juanfont/headscale.git
|
||||
```shell
|
||||
# Install prerequistes
|
||||
# 1. go v1.20+: headscale newer than 0.21 needs go 1.20+ to compile
|
||||
# 2. gmake: Makefile in the headscale repo is written in GNU make syntax
|
||||
|
||||
cd headscale
|
||||
git clone https://github.com/juanfont/headscale.git
|
||||
|
||||
# optionally checkout a release
|
||||
# option a. you can find offical relase at https://github.com/juanfont/headscale/releases/latest
|
||||
# option b. get latest tag, this may be a beta release
|
||||
latestTag=$(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
cd headscale
|
||||
|
||||
git checkout $latestTag
|
||||
# optionally checkout a release
|
||||
# option a. you can find offical relase at https://github.com/juanfont/headscale/releases/latest
|
||||
# option b. get latest tag, this may be a beta release
|
||||
latestTag=$(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
|
||||
make build GOOS=openbsd
|
||||
git checkout $latestTag
|
||||
|
||||
# copy headscale to openbsd machine and put it in /usr/local/sbin
|
||||
```
|
||||
make build GOOS=openbsd
|
||||
|
||||
# copy headscale to openbsd machine and put it in /usr/local/sbin
|
||||
```
|
||||
|
||||
## Configure and run `headscale`
|
||||
|
||||
1. Prepare a directory to hold `headscale` configuration and the [SQLite](https://www.sqlite.org/) database:
|
||||
|
||||
```shell
|
||||
# Directory for configuration
|
||||
```shell
|
||||
# Directory for configuration
|
||||
|
||||
mkdir -p /etc/headscale
|
||||
mkdir -p /etc/headscale
|
||||
|
||||
# Directory for Database, and other variable data (like certificates)
|
||||
mkdir -p /var/lib/headscale
|
||||
```
|
||||
# Directory for Database, and other variable data (like certificates)
|
||||
mkdir -p /var/lib/headscale
|
||||
```
|
||||
|
||||
2. Create an empty SQLite database:
|
||||
1. Create an empty SQLite database:
|
||||
|
||||
```shell
|
||||
touch /var/lib/headscale/db.sqlite
|
||||
```
|
||||
```shell
|
||||
touch /var/lib/headscale/db.sqlite
|
||||
```
|
||||
|
||||
3. Create a `headscale` configuration:
|
||||
1. Create a `headscale` configuration:
|
||||
|
||||
```shell
|
||||
touch /etc/headscale/config.yaml
|
||||
```
|
||||
```shell
|
||||
touch /etc/headscale/config.yaml
|
||||
```
|
||||
|
||||
**(Strongly Recommended)** Download a copy of the [example configuration][config-example.yaml](https://github.com/juanfont/headscale/blob/main/config-example.yaml) from the headscale repository.
|
||||
|
||||
4. Start the headscale server:
|
||||
1. Start the headscale server:
|
||||
|
||||
```shell
|
||||
headscale serve
|
||||
```
|
||||
```shell
|
||||
headscale serve
|
||||
```
|
||||
|
||||
This command will start `headscale` in the current terminal session.
|
||||
This command will start `headscale` in the current terminal session.
|
||||
|
||||
---
|
||||
***
|
||||
|
||||
To continue the tutorial, open a new terminal and let it run in the background.
|
||||
Alternatively use terminal emulators like [tmux](https://github.com/tmux/tmux).
|
||||
To continue the tutorial, open a new terminal and let it run in the background.
|
||||
Alternatively use terminal emulators like [tmux](https://github.com/tmux/tmux).
|
||||
|
||||
To run `headscale` in the background, please follow the steps in the [rc.d section](#running-headscale-in-the-background-with-rcd) before continuing.
|
||||
To run `headscale` in the background, please follow the steps in the [rc.d section](#running-headscale-in-the-background-with-rcd) before continuing.
|
||||
|
||||
5. Verify `headscale` is running:
|
||||
1. Verify `headscale` is running:
|
||||
|
||||
Verify `headscale` is available:
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
|
||||
6. Create a user ([tailnet](https://tailscale.com/kb/1136/tailnet/)):
|
||||
1. Create a user ([tailnet](https://tailscale.com/kb/1136/tailnet/)):
|
||||
|
||||
```shell
|
||||
headscale users create myfirstuser
|
||||
```
|
||||
```shell
|
||||
headscale users create myfirstuser
|
||||
```
|
||||
|
||||
### Register a machine (normal login)
|
||||
|
||||
@@ -159,51 +160,51 @@ This section demonstrates how to run `headscale` as a service in the background
|
||||
|
||||
1. Create a rc.d service at `/etc/rc.d/headscale` containing:
|
||||
|
||||
```shell
|
||||
#!/bin/ksh
|
||||
```shell
|
||||
#!/bin/ksh
|
||||
|
||||
daemon="/usr/local/sbin/headscale"
|
||||
daemon_logger="daemon.info"
|
||||
daemon_user="root"
|
||||
daemon_flags="serve"
|
||||
daemon_timeout=60
|
||||
daemon="/usr/local/sbin/headscale"
|
||||
daemon_logger="daemon.info"
|
||||
daemon_user="root"
|
||||
daemon_flags="serve"
|
||||
daemon_timeout=60
|
||||
|
||||
. /etc/rc.d/rc.subr
|
||||
. /etc/rc.d/rc.subr
|
||||
|
||||
rc_bg=YES
|
||||
rc_reload=NO
|
||||
rc_bg=YES
|
||||
rc_reload=NO
|
||||
|
||||
rc_cmd $1
|
||||
```
|
||||
rc_cmd $1
|
||||
```
|
||||
|
||||
2. `/etc/rc.d/headscale` needs execute permission:
|
||||
1. `/etc/rc.d/headscale` needs execute permission:
|
||||
|
||||
```shell
|
||||
chmod a+x /etc/rc.d/headscale
|
||||
```
|
||||
```shell
|
||||
chmod a+x /etc/rc.d/headscale
|
||||
```
|
||||
|
||||
3. Start `headscale` service:
|
||||
1. Start `headscale` service:
|
||||
|
||||
```shell
|
||||
rcctl start headscale
|
||||
```
|
||||
```shell
|
||||
rcctl start headscale
|
||||
```
|
||||
|
||||
4. Make `headscale` service start at boot:
|
||||
1. Make `headscale` service start at boot:
|
||||
|
||||
```shell
|
||||
rcctl enable headscale
|
||||
```
|
||||
```shell
|
||||
rcctl enable headscale
|
||||
```
|
||||
|
||||
5. Verify the headscale service:
|
||||
1. Verify the headscale service:
|
||||
|
||||
```shell
|
||||
rcctl check headscale
|
||||
```
|
||||
```shell
|
||||
rcctl check headscale
|
||||
```
|
||||
|
||||
Verify `headscale` is available:
|
||||
Verify `headscale` is available:
|
||||
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
```shell
|
||||
curl http://127.0.0.1:9090/metrics
|
||||
```
|
||||
|
||||
`headscale` will now run in the background and start at boot.
|
||||
`headscale` will now run in the background and start at boot.
|
||||
|
65
docs/tls.md
65
docs/tls.md
@@ -1,8 +1,17 @@
|
||||
# Running the service via TLS (optional)
|
||||
|
||||
## Bring your own certificate
|
||||
|
||||
Headscale can be configured to expose its web service via TLS. To configure the certificate and key file manually, set the `tls_cert_path` and `tls_cert_path` configuration parameters. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.
|
||||
|
||||
```yaml
|
||||
tls_cert_path: ""
|
||||
tls_key_path: ""
|
||||
```
|
||||
|
||||
## Let's Encrypt / ACME
|
||||
|
||||
To get a certificate automatically via [Let's Encrypt](https://letsencrypt.org/), set `tls_letsencrypt_hostname` to the desired certificate hostname. This name must resolve to the IP address(es) headscale is reachable on (i.e., it must correspond to the `server_url` configuration parameter). The certificate and Let's Encrypt account credentials will be stored in the directory configured in `tls_letsencrypt_cache_dir`. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from. The certificate will automatically be renewed as needed.
|
||||
To get a certificate automatically via [Let's Encrypt](https://letsencrypt.org/), set `tls_letsencrypt_hostname` to the desired certificate hostname. This name must resolve to the IP address(es) headscale is reachable on (i.e., it must correspond to the `server_url` configuration parameter). The certificate and Let's Encrypt account credentials will be stored in the directory configured in `tls_letsencrypt_cache_dir`. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.
|
||||
|
||||
```yaml
|
||||
tls_letsencrypt_hostname: ""
|
||||
@@ -11,21 +20,57 @@ tls_letsencrypt_cache_dir: ".cache"
|
||||
tls_letsencrypt_challenge_type: HTTP-01
|
||||
```
|
||||
|
||||
### Challenge type HTTP-01
|
||||
### Challenge types
|
||||
|
||||
The default challenge type `HTTP-01` requires that headscale is reachable on port 80 for the Let's Encrypt automated validation, in addition to whatever port is configured in `listen_addr`. By default, headscale listens on port 80 on all local IPs for Let's Encrypt automated validation.
|
||||
Headscale only supports two values for `tls_letsencrypt_challenge_type`: `HTTP-01` (default) and `TLS-ALPN-01`.
|
||||
|
||||
#### HTTP-01
|
||||
|
||||
For `HTTP-01`, headscale must be reachable on port 80 for the Let's Encrypt automated validation, in addition to whatever port is configured in `listen_addr`. By default, headscale listens on port 80 on all local IPs for Let's Encrypt automated validation.
|
||||
|
||||
If you need to change the ip and/or port used by headscale for the Let's Encrypt validation process, set `tls_letsencrypt_listen` to the appropriate value. This can be handy if you are running headscale as a non-root user (or can't run `setcap`). Keep in mind, however, that Let's Encrypt will _only_ connect to port 80 for the validation callback, so if you change `tls_letsencrypt_listen` you will also need to configure something else (e.g. a firewall rule) to forward the traffic from port 80 to the ip:port combination specified in `tls_letsencrypt_listen`.
|
||||
|
||||
### Challenge type TLS-ALPN-01
|
||||
#### TLS-ALPN-01
|
||||
|
||||
Alternatively, `tls_letsencrypt_challenge_type` can be set to `TLS-ALPN-01`. In this configuration, headscale listens on the ip:port combination defined in `listen_addr`. Let's Encrypt will _only_ connect to port 443 for the validation callback, so if `listen_addr` is not set to port 443, something else (e.g. a firewall rule) will be required to forward the traffic from port 443 to the ip:port combination specified in `listen_addr`.
|
||||
For `TLS-ALPN-01`, headscale listens on the ip:port combination defined in `listen_addr`. Let's Encrypt will _only_ connect to port 443 for the validation callback, so if `listen_addr` is not set to port 443, something else (e.g. a firewall rule) will be required to forward the traffic from port 443 to the ip:port combination specified in `listen_addr`.
|
||||
|
||||
## Bring your own certificate
|
||||
### Technical description
|
||||
|
||||
headscale can also be configured to expose its web service via TLS. To configure the certificate and key file manually, set the `tls_cert_path` and `tls_cert_path` configuration parameters. If the path is relative, it will be interpreted as relative to the directory the configuration file was read from.
|
||||
Headscale uses [autocert](https://pkg.go.dev/golang.org/x/crypto/acme/autocert), a Golang library providing [ACME protocol](https://en.wikipedia.org/wiki/Automatic_Certificate_Management_Environment) verification, to facilitate certificate renewals via [Let's Encrypt](https://letsencrypt.org/about/). Certificates will be renewed automatically, and the following can be expected:
|
||||
|
||||
```yaml
|
||||
tls_cert_path: ""
|
||||
tls_key_path: ""
|
||||
- Certificates provided from Let's Encrypt have a validity of 3 months from date issued.
|
||||
- Renewals are only attempted by headscale when 30 days or less remains until certificate expiry.
|
||||
- Renewal attempts by autocert are triggered at a random interval of 30-60 minutes.
|
||||
- No log output is generated when renewals are skipped, or successful.
|
||||
|
||||
#### Checking certificate expiry
|
||||
|
||||
If you want to validate that certificate renewal completed successfully, this can be done either manually, or through external monitoring software. Two examples of doing this manually:
|
||||
|
||||
1. Open the URL for your Headscale server in your browser of choice, and manually inspecting the expiry date of the certificate you receive.
|
||||
2. Or, check remotely from CLI using `openssl`:
|
||||
|
||||
```bash
|
||||
$ openssl s_client -servername [hostname] -connect [hostname]:443 | openssl x509 -noout -dates
|
||||
(...)
|
||||
notBefore=Feb 8 09:48:26 2024 GMT
|
||||
notAfter=May 8 09:48:25 2024 GMT
|
||||
```
|
||||
|
||||
#### Log output from the autocert library
|
||||
|
||||
As these log lines are from the autocert library, they are not strictly generated by headscale itself.
|
||||
|
||||
```plaintext
|
||||
acme/autocert: missing server name
|
||||
```
|
||||
|
||||
Likely caused by an incoming connection that does not specify a hostname, for example a `curl` request directly against the IP of the server, or an unexpected hostname.
|
||||
|
||||
```plaintext
|
||||
acme/autocert: host "[foo]" not configured in HostWhitelist
|
||||
```
|
||||
|
||||
Similarly to the above, this likely indicates an invalid incoming request for an incorrect hostname, commonly just the IP itself.
|
||||
|
||||
The source code for autocert can be found [here](https://cs.opensource.google/go/x/crypto/+/refs/tags/v0.19.0:acme/autocert/autocert.go)
|
||||
|
14
docs/web-ui.md
Normal file
14
docs/web-ui.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Headscale web interface
|
||||
|
||||
!!! warning "Community contributions"
|
||||
|
||||
This page contains community contributions. The projects listed here are not
|
||||
maintained by the Headscale authors and are written by community members.
|
||||
|
||||
| Name | Repository Link | Description | Status |
|
||||
| --------------- | ------------------------------------------------------- | ------------------------------------------------------------------------- | ------ |
|
||||
| headscale-webui | [Github](https://github.com/ifargle/headscale-webui) | A simple Headscale web UI for small-scale deployments. | Alpha |
|
||||
| headscale-ui | [Github](https://github.com/gurucomputing/headscale-ui) | A web frontend for the headscale Tailscale-compatible coordination server | Alpha |
|
||||
| HeadscaleUi | [GitHub](https://github.com/simcu/headscale-ui) | A static headscale admin ui, no backend enviroment required | Alpha |
|
||||
|
||||
You can ask for support on our dedicated [Discord channel](https://discord.com/channels/896711691637780480/1105842846386356294).
|
@@ -18,6 +18,7 @@ You can set these using the Windows Registry Editor:
|
||||
Or via the following Powershell commands (right click Powershell icon and select "Run as administrator"):
|
||||
|
||||
```
|
||||
New-Item -Path "HKLM:\SOFTWARE\Tailscale IPN"
|
||||
New-ItemProperty -Path 'HKLM:\Software\Tailscale IPN' -Name UnattendedMode -PropertyType String -Value always
|
||||
New-ItemProperty -Path 'HKLM:\Software\Tailscale IPN' -Name LoginURL -PropertyType String -Value https://YOUR-HEADSCALE-URL
|
||||
```
|
||||
|
30
flake.lock
generated
30
flake.lock
generated
@@ -1,12 +1,15 @@
|
||||
{
|
||||
"nodes": {
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1680776469,
|
||||
"narHash": "sha256-3CXUDK/3q/kieWtdsYpDOBJw3Gw4Af6x+2EiSnIkNQw=",
|
||||
"lastModified": 1710146030,
|
||||
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "411e8764155aa9354dbcd6d5faaeb97e9e3dce24",
|
||||
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -17,11 +20,11 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1680789907,
|
||||
"narHash": "sha256-0AOMkabjbOauxspnqfzqgLKhB2gSh3sLkz1p/jIckcs=",
|
||||
"lastModified": 1712883908,
|
||||
"narHash": "sha256-icE1IJE9fHcbDfJ0+qWoDdcBXUoZCcIJxME4lMHwvSM=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "9de84cd029054adc54fdc6442e121fbc5ac33baf",
|
||||
"rev": "a0c9e3aee1000ac2bfb0e5b98c94c946a5d180a9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -36,6 +39,21 @@
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
|
298
flake.nix
298
flake.nix
@@ -6,164 +6,168 @@
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
};
|
||||
|
||||
outputs =
|
||||
{ self
|
||||
, nixpkgs
|
||||
, flake-utils
|
||||
, ...
|
||||
}:
|
||||
let
|
||||
headscaleVersion =
|
||||
if (self ? shortRev)
|
||||
then self.shortRev
|
||||
else "dev";
|
||||
in
|
||||
outputs = {
|
||||
self,
|
||||
nixpkgs,
|
||||
flake-utils,
|
||||
...
|
||||
}: let
|
||||
headscaleVersion =
|
||||
if (self ? shortRev)
|
||||
then self.shortRev
|
||||
else "dev";
|
||||
in
|
||||
{
|
||||
overlay = _: prev:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${prev.system};
|
||||
in
|
||||
rec {
|
||||
headscale = pkgs.buildGo120Module rec {
|
||||
pname = "headscale";
|
||||
version = headscaleVersion;
|
||||
src = pkgs.lib.cleanSource self;
|
||||
overlay = _: prev: let
|
||||
pkgs = nixpkgs.legacyPackages.${prev.system};
|
||||
in rec {
|
||||
headscale = pkgs.buildGo122Module rec {
|
||||
pname = "headscale";
|
||||
version = headscaleVersion;
|
||||
src = pkgs.lib.cleanSource self;
|
||||
|
||||
tags = [ "ts2019" ];
|
||||
# Only run unit tests when testing a build
|
||||
checkFlags = ["-short"];
|
||||
|
||||
# Only run unit tests when testing a build
|
||||
checkFlags = [ "-short" ];
|
||||
# When updating go.mod or go.sum, a new sha will need to be calculated,
|
||||
# update this if you have a mismatch after doing a change to thos files.
|
||||
vendorHash = "sha256-HGu/OCtjzPeBki5FSL6v1XivCJ30eqj9rL0x7ZVv1TM=";
|
||||
|
||||
# When updating go.mod or go.sum, a new sha will need to be calculated,
|
||||
# update this if you have a mismatch after doing a change to thos files.
|
||||
vendorSha256 = "sha256-+JxS4Q6rTpdBwms2nkVDY/Kluv2qu2T0BaOIjfeX85M=";
|
||||
subPackages = ["cmd/headscale"];
|
||||
|
||||
ldflags = [ "-s" "-w" "-X github.com/juanfont/headscale/cmd/headscale/cli.Version=v${version}" ];
|
||||
};
|
||||
|
||||
golines = pkgs.buildGoModule rec {
|
||||
pname = "golines";
|
||||
version = "0.11.0";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "segmentio";
|
||||
repo = "golines";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-2K9KAg8iSubiTbujyFGN3yggrL+EDyeUCs9OOta/19A=";
|
||||
};
|
||||
|
||||
vendorSha256 = "sha256-rxYuzn4ezAxaeDhxd8qdOzt+CKYIh03A9zKNdzILq18=";
|
||||
|
||||
nativeBuildInputs = [ pkgs.installShellFiles ];
|
||||
};
|
||||
|
||||
golangci-lint = prev.golangci-lint.override {
|
||||
# Override https://github.com/NixOS/nixpkgs/pull/166801 which changed this
|
||||
# to buildGo118Module because it does not build on Darwin.
|
||||
inherit (prev) buildGoModule;
|
||||
};
|
||||
|
||||
protoc-gen-grpc-gateway = pkgs.buildGoModule rec {
|
||||
pname = "grpc-gateway";
|
||||
version = "2.14.0";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "grpc-ecosystem";
|
||||
repo = "grpc-gateway";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-lnNdsDCpeSHtl2lC1IhUw11t3cnGF+37qSM7HDvKLls=";
|
||||
};
|
||||
|
||||
vendorSha256 = "sha256-dGdnDuRbwg8fU7uB5GaHEWa/zI3w06onqjturvooJQA=";
|
||||
|
||||
nativeBuildInputs = [ pkgs.installShellFiles ];
|
||||
|
||||
subPackages = [ "protoc-gen-grpc-gateway" "protoc-gen-openapiv2" ];
|
||||
};
|
||||
ldflags = ["-s" "-w" "-X github.com/juanfont/headscale/cmd/headscale/cli.Version=v${version}"];
|
||||
};
|
||||
|
||||
protoc-gen-grpc-gateway = pkgs.buildGoModule rec {
|
||||
pname = "grpc-gateway";
|
||||
version = "2.19.1";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "grpc-ecosystem";
|
||||
repo = "grpc-gateway";
|
||||
rev = "v${version}";
|
||||
sha256 = "sha256-CdGQpQfOSimeio8v1lZ7xzE/oAS2qFyu+uN+H9i7vpo=";
|
||||
};
|
||||
|
||||
vendorHash = "sha256-no7kZGpf/VOuceC3J+izGFQp5aMS3b+Rn+x4BFZ2zgs=";
|
||||
|
||||
nativeBuildInputs = [pkgs.installShellFiles];
|
||||
|
||||
subPackages = ["protoc-gen-grpc-gateway" "protoc-gen-openapiv2"];
|
||||
};
|
||||
};
|
||||
}
|
||||
// flake-utils.lib.eachDefaultSystem
|
||||
(system:
|
||||
let
|
||||
pkgs = import nixpkgs {
|
||||
overlays = [ self.overlay ];
|
||||
inherit system;
|
||||
};
|
||||
buildDeps = with pkgs; [ git go_1_20 gnumake ];
|
||||
devDeps = with pkgs;
|
||||
buildDeps
|
||||
++ [
|
||||
golangci-lint
|
||||
golines
|
||||
nodePackages.prettier
|
||||
goreleaser
|
||||
nfpm
|
||||
gotestsum
|
||||
(system: let
|
||||
pkgs = import nixpkgs {
|
||||
overlays = [self.overlay];
|
||||
inherit system;
|
||||
};
|
||||
buildDeps = with pkgs; [git go_1_22 gnumake];
|
||||
devDeps = with pkgs;
|
||||
buildDeps
|
||||
++ [
|
||||
golangci-lint
|
||||
golines
|
||||
nodePackages.prettier
|
||||
goreleaser
|
||||
nfpm
|
||||
gotestsum
|
||||
gotests
|
||||
ksh
|
||||
ko
|
||||
yq-go
|
||||
ripgrep
|
||||
|
||||
# Protobuf dependencies
|
||||
protobuf
|
||||
protoc-gen-go
|
||||
protoc-gen-go-grpc
|
||||
protoc-gen-grpc-gateway
|
||||
buf
|
||||
clang-tools # clang-format
|
||||
# 'dot' is needed for pprof graphs
|
||||
# go tool pprof -http=: <source>
|
||||
graphviz
|
||||
|
||||
# Protobuf dependencies
|
||||
protobuf
|
||||
protoc-gen-go
|
||||
protoc-gen-go-grpc
|
||||
protoc-gen-grpc-gateway
|
||||
buf
|
||||
clang-tools # clang-format
|
||||
];
|
||||
|
||||
# Add entry to build a docker image with headscale
|
||||
# caveat: only works on Linux
|
||||
#
|
||||
# Usage:
|
||||
# nix build .#headscale-docker
|
||||
# docker load < result
|
||||
headscale-docker = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "headscale";
|
||||
tag = headscaleVersion;
|
||||
contents = [pkgs.headscale];
|
||||
config.Entrypoint = [(pkgs.headscale + "/bin/headscale")];
|
||||
};
|
||||
in rec {
|
||||
# `nix develop`
|
||||
devShell = pkgs.mkShell {
|
||||
buildInputs =
|
||||
devDeps
|
||||
++ [
|
||||
(pkgs.writeShellScriptBin
|
||||
"nix-vendor-sri"
|
||||
''
|
||||
set -eu
|
||||
|
||||
OUT=$(mktemp -d -t nar-hash-XXXXXX)
|
||||
rm -rf "$OUT"
|
||||
|
||||
go mod vendor -o "$OUT"
|
||||
go run tailscale.com/cmd/nardump --sri "$OUT"
|
||||
rm -rf "$OUT"
|
||||
'')
|
||||
|
||||
(pkgs.writeShellScriptBin
|
||||
"go-mod-update-all"
|
||||
''
|
||||
cat go.mod | ${pkgs.silver-searcher}/bin/ag "\t" | ${pkgs.silver-searcher}/bin/ag -v indirect | ${pkgs.gawk}/bin/awk '{print $1}' | ${pkgs.findutils}/bin/xargs go get -u
|
||||
go mod tidy
|
||||
'')
|
||||
];
|
||||
|
||||
# Add entry to build a docker image with headscale
|
||||
# caveat: only works on Linux
|
||||
#
|
||||
# Usage:
|
||||
# nix build .#headscale-docker
|
||||
# docker load < result
|
||||
headscale-docker = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "headscale";
|
||||
tag = headscaleVersion;
|
||||
contents = [ pkgs.headscale ];
|
||||
config.Entrypoint = [ (pkgs.headscale + "/bin/headscale") ];
|
||||
};
|
||||
in
|
||||
rec {
|
||||
# `nix develop`
|
||||
devShell = pkgs.mkShell {
|
||||
buildInputs = devDeps;
|
||||
shellHook = ''
|
||||
export PATH="$PWD/result/bin:$PATH"
|
||||
'';
|
||||
};
|
||||
|
||||
shellHook = ''
|
||||
export GOFLAGS=-tags="ts2019"
|
||||
# `nix build`
|
||||
packages = with pkgs; {
|
||||
inherit headscale;
|
||||
inherit headscale-docker;
|
||||
};
|
||||
defaultPackage = pkgs.headscale;
|
||||
|
||||
# `nix run`
|
||||
apps.headscale = flake-utils.lib.mkApp {
|
||||
drv = packages.headscale;
|
||||
};
|
||||
apps.default = apps.headscale;
|
||||
|
||||
checks = {
|
||||
format =
|
||||
pkgs.runCommand "check-format"
|
||||
{
|
||||
buildInputs = with pkgs; [
|
||||
gnumake
|
||||
nixpkgs-fmt
|
||||
golangci-lint
|
||||
nodePackages.prettier
|
||||
golines
|
||||
clang-tools
|
||||
];
|
||||
} ''
|
||||
${pkgs.nixpkgs-fmt}/bin/nixpkgs-fmt ${./.}
|
||||
${pkgs.golangci-lint}/bin/golangci-lint run --fix --timeout 10m
|
||||
${pkgs.nodePackages.prettier}/bin/prettier --write '**/**.{ts,js,md,yaml,yml,sass,css,scss,html}'
|
||||
${pkgs.golines}/bin/golines --max-len=88 --base-formatter=gofumpt -w ${./.}
|
||||
${pkgs.clang-tools}/bin/clang-format -style="{BasedOnStyle: Google, IndentWidth: 4, AlignConsecutiveDeclarations: true, AlignConsecutiveAssignments: true, ColumnLimit: 0}" -i ${./.}
|
||||
'';
|
||||
};
|
||||
|
||||
# `nix build`
|
||||
packages = with pkgs; {
|
||||
inherit headscale;
|
||||
inherit headscale-docker;
|
||||
};
|
||||
defaultPackage = pkgs.headscale;
|
||||
|
||||
# `nix run`
|
||||
apps.headscale = flake-utils.lib.mkApp {
|
||||
drv = packages.headscale;
|
||||
};
|
||||
apps.default = apps.headscale;
|
||||
|
||||
checks = {
|
||||
format =
|
||||
pkgs.runCommand "check-format"
|
||||
{
|
||||
buildInputs = with pkgs; [
|
||||
gnumake
|
||||
nixpkgs-fmt
|
||||
golangci-lint
|
||||
nodePackages.prettier
|
||||
golines
|
||||
clang-tools
|
||||
];
|
||||
} ''
|
||||
${pkgs.nixpkgs-fmt}/bin/nixpkgs-fmt ${./.}
|
||||
${pkgs.golangci-lint}/bin/golangci-lint run --fix --timeout 10m
|
||||
${pkgs.nodePackages.prettier}/bin/prettier --write '**/**.{ts,js,md,yaml,yml,sass,css,scss,html}'
|
||||
${pkgs.golines}/bin/golines --max-len=88 --base-formatter=gofumpt -w ${./.}
|
||||
${pkgs.clang-tools}/bin/clang-format -style="{BasedOnStyle: Google, IndentWidth: 4, AlignConsecutiveDeclarations: true, AlignConsecutiveAssignments: true, ColumnLimit: 0}" -i ${./.}
|
||||
'';
|
||||
};
|
||||
});
|
||||
};
|
||||
});
|
||||
}
|
||||
|
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc-gen-go v1.33.0
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/apikey.proto
|
||||
|
||||
@@ -364,6 +364,91 @@ func (x *ListApiKeysResponse) GetApiKeys() []*ApiKey {
|
||||
return nil
|
||||
}
|
||||
|
||||
type DeleteApiKeyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DeleteApiKeyRequest) Reset() {
|
||||
*x = DeleteApiKeyRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_apikey_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeleteApiKeyRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeleteApiKeyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *DeleteApiKeyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_apikey_proto_msgTypes[7]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeleteApiKeyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*DeleteApiKeyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_apikey_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
func (x *DeleteApiKeyRequest) GetPrefix() string {
|
||||
if x != nil {
|
||||
return x.Prefix
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type DeleteApiKeyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *DeleteApiKeyResponse) Reset() {
|
||||
*x = DeleteApiKeyResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_headscale_v1_apikey_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeleteApiKeyResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeleteApiKeyResponse) ProtoMessage() {}
|
||||
|
||||
func (x *DeleteApiKeyResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_headscale_v1_apikey_proto_msgTypes[8]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeleteApiKeyResponse.ProtoReflect.Descriptor instead.
|
||||
func (*DeleteApiKeyResponse) Descriptor() ([]byte, []int) {
|
||||
return file_headscale_v1_apikey_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
var File_headscale_v1_apikey_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_headscale_v1_apikey_proto_rawDesc = []byte{
|
||||
@@ -404,10 +489,14 @@ var file_headscale_v1_apikey_proto_rawDesc = []byte{
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x61, 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79,
|
||||
0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x07, 0x61,
|
||||
0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
|
||||
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76,
|
||||
0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x2d, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
|
||||
0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a,
|
||||
0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70,
|
||||
0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41,
|
||||
0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x29, 0x5a,
|
||||
0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e,
|
||||
0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67,
|
||||
0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -422,7 +511,7 @@ func file_headscale_v1_apikey_proto_rawDescGZIP() []byte {
|
||||
return file_headscale_v1_apikey_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_headscale_v1_apikey_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
|
||||
var file_headscale_v1_apikey_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
|
||||
var file_headscale_v1_apikey_proto_goTypes = []interface{}{
|
||||
(*ApiKey)(nil), // 0: headscale.v1.ApiKey
|
||||
(*CreateApiKeyRequest)(nil), // 1: headscale.v1.CreateApiKeyRequest
|
||||
@@ -431,13 +520,15 @@ var file_headscale_v1_apikey_proto_goTypes = []interface{}{
|
||||
(*ExpireApiKeyResponse)(nil), // 4: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysRequest)(nil), // 5: headscale.v1.ListApiKeysRequest
|
||||
(*ListApiKeysResponse)(nil), // 6: headscale.v1.ListApiKeysResponse
|
||||
(*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp
|
||||
(*DeleteApiKeyRequest)(nil), // 7: headscale.v1.DeleteApiKeyRequest
|
||||
(*DeleteApiKeyResponse)(nil), // 8: headscale.v1.DeleteApiKeyResponse
|
||||
(*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp
|
||||
}
|
||||
var file_headscale_v1_apikey_proto_depIdxs = []int32{
|
||||
7, // 0: headscale.v1.ApiKey.expiration:type_name -> google.protobuf.Timestamp
|
||||
7, // 1: headscale.v1.ApiKey.created_at:type_name -> google.protobuf.Timestamp
|
||||
7, // 2: headscale.v1.ApiKey.last_seen:type_name -> google.protobuf.Timestamp
|
||||
7, // 3: headscale.v1.CreateApiKeyRequest.expiration:type_name -> google.protobuf.Timestamp
|
||||
9, // 0: headscale.v1.ApiKey.expiration:type_name -> google.protobuf.Timestamp
|
||||
9, // 1: headscale.v1.ApiKey.created_at:type_name -> google.protobuf.Timestamp
|
||||
9, // 2: headscale.v1.ApiKey.last_seen:type_name -> google.protobuf.Timestamp
|
||||
9, // 3: headscale.v1.CreateApiKeyRequest.expiration:type_name -> google.protobuf.Timestamp
|
||||
0, // 4: headscale.v1.ListApiKeysResponse.api_keys:type_name -> headscale.v1.ApiKey
|
||||
5, // [5:5] is the sub-list for method output_type
|
||||
5, // [5:5] is the sub-list for method input_type
|
||||
@@ -536,6 +627,30 @@ func file_headscale_v1_apikey_proto_init() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_apikey_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DeleteApiKeyRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_headscale_v1_apikey_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DeleteApiKeyResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
@@ -543,7 +658,7 @@ func file_headscale_v1_apikey_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_headscale_v1_apikey_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 7,
|
||||
NumMessages: 9,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
|
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc-gen-go v1.33.0
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/device.proto
|
||||
|
||||
|
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc-gen-go v1.33.0
|
||||
// protoc (unknown)
|
||||
// source: headscale/v1/headscale.proto
|
||||
|
||||
@@ -31,261 +31,272 @@ var file_headscale_v1_headscale_proto_rawDesc = []byte{
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x1a, 0x1a, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2e, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x32, 0x8d, 0x18, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x63, 0x0a, 0x07, 0x47, 0x65, 0x74,
|
||||
0x55, 0x73, 0x65, 0x72, 0x12, 0x1c, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x22, 0x1b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x2f, 0x61, 0x70, 0x69, 0x2f,
|
||||
0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x68,
|
||||
0x0a, 0x0a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61,
|
||||
0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65,
|
||||
0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x17, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x11, 0x3a, 0x01, 0x2a, 0x22, 0x0c, 0x2f, 0x61, 0x70, 0x69,
|
||||
0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x12, 0x82, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x55, 0x73, 0x65,
|
||||
0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x55, 0x73,
|
||||
0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x82, 0xd3, 0xe4, 0x93,
|
||||
0x02, 0x2b, 0x22, 0x29, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72,
|
||||
0x2f, 0x7b, 0x6f, 0x6c, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x6e, 0x61,
|
||||
0x6d, 0x65, 0x2f, 0x7b, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x6c, 0x0a,
|
||||
0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74,
|
||||
0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65,
|
||||
0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b,
|
||||
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x2a, 0x13, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f,
|
||||
0x75, 0x73, 0x65, 0x72, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x62, 0x0a, 0x09, 0x4c,
|
||||
0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72,
|
||||
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72,
|
||||
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x82, 0xd3, 0xe4, 0x93, 0x02,
|
||||
0x0e, 0x12, 0x0c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x12,
|
||||
0x80, 0x01, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74,
|
||||
0x74, 0x6f, 0x1a, 0x17, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x32, 0x80, 0x19, 0x0a, 0x10, 0x48, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x53,
|
||||
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x63, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65,
|
||||
0x72, 0x12, 0x1c, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x47, 0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x1d, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47,
|
||||
0x65, 0x74, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b,
|
||||
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f,
|
||||
0x75, 0x73, 0x65, 0x72, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x68, 0x0a, 0x0a, 0x43,
|
||||
0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55,
|
||||
0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
|
||||
0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x17, 0x82, 0xd3,
|
||||
0xe4, 0x93, 0x02, 0x11, 0x3a, 0x01, 0x2a, 0x22, 0x0c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x75, 0x73, 0x65, 0x72, 0x12, 0x82, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x55, 0x73, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x22,
|
||||
0x29, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x2f, 0x7b, 0x6f,
|
||||
0x6c, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x2f,
|
||||
0x7b, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x6c, 0x0a, 0x0a, 0x44, 0x65,
|
||||
0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73,
|
||||
0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55,
|
||||
0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x82, 0xd3, 0xe4,
|
||||
0x93, 0x02, 0x15, 0x2a, 0x13, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65,
|
||||
0x72, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x62, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74,
|
||||
0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0e, 0x12, 0x0c,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x12, 0x80, 0x01, 0x0a,
|
||||
0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65,
|
||||
0x79, 0x12, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65,
|
||||
0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72,
|
||||
0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x3a, 0x01, 0x2a, 0x22, 0x12, 0x2f, 0x61, 0x70,
|
||||
0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x12,
|
||||
0x87, 0x01, 0x0a, 0x10, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74,
|
||||
0x68, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74,
|
||||
0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72,
|
||||
0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x3a, 0x01, 0x2a, 0x22, 0x12,
|
||||
0x6e, 0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x3a, 0x01, 0x2a, 0x22, 0x19,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b,
|
||||
0x65, 0x79, 0x12, 0x87, 0x01, 0x0a, 0x10, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65,
|
||||
0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65,
|
||||
0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78,
|
||||
0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x3a, 0x01,
|
||||
0x2a, 0x22, 0x19, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75,
|
||||
0x74, 0x68, 0x6b, 0x65, 0x79, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x7a, 0x0a, 0x0f,
|
||||
0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x12,
|
||||
0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c,
|
||||
0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68,
|
||||
0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3,
|
||||
0xe4, 0x93, 0x02, 0x14, 0x12, 0x12, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72,
|
||||
0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x12, 0x89, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x62,
|
||||
0x75, 0x67, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12,
|
||||
0x27, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44,
|
||||
0x65, 0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x65, 0x79, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x7a, 0x0a, 0x0f, 0x4c, 0x69, 0x73,
|
||||
0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x24, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74,
|
||||
0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79,
|
||||
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02,
|
||||
0x14, 0x12, 0x12, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75,
|
||||
0x74, 0x68, 0x6b, 0x65, 0x79, 0x12, 0x7d, 0x0a, 0x0f, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x72,
|
||||
0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x72, 0x65,
|
||||
0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x3a, 0x01, 0x2a, 0x22, 0x15, 0x2f,
|
||||
0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x6d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x12, 0x75, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x2f,
|
||||
0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b,
|
||||
0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x74, 0x0a, 0x07, 0x53,
|
||||
0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x12, 0x1c, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
|
||||
0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x22, 0x2c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x3a, 0x01, 0x2a, 0x22, 0x21,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f,
|
||||
0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x74, 0x61, 0x67,
|
||||
0x73, 0x12, 0x80, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73,
|
||||
0x74, 0x65, 0x72, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x22, 0x18, 0x2f, 0x61, 0x70, 0x69,
|
||||
0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x72, 0x65, 0x67, 0x69,
|
||||
0x73, 0x74, 0x65, 0x72, 0x12, 0x7e, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24,
|
||||
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x2a, 0x1c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f,
|
||||
0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x5f, 0x69, 0x64, 0x7d, 0x12, 0x85, 0x01, 0x0a, 0x0d, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4d,
|
||||
0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4d, 0x61, 0x63, 0x68,
|
||||
0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65,
|
||||
0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x22, 0x23, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e,
|
||||
0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x90, 0x01, 0x0a,
|
||||
0x0d, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x22,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x23, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30, 0x22,
|
||||
0x2e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65,
|
||||
0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x65,
|
||||
0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65,
|
||||
0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73,
|
||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x3a, 0x01, 0x2a,
|
||||
0x22, 0x12, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f,
|
||||
0x6e, 0x6f, 0x64, 0x65, 0x12, 0x66, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x12,
|
||||
0x1c, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47,
|
||||
0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74,
|
||||
0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, 0x82, 0xd3,
|
||||
0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f,
|
||||
0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x6e, 0x0a, 0x07,
|
||||
0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x12, 0x1c, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x3a, 0x01, 0x2a, 0x22,
|
||||
0x1b, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e,
|
||||
0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x74, 0x61, 0x67, 0x73, 0x12, 0x74, 0x0a, 0x0c,
|
||||
0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x21, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x67, 0x69,
|
||||
0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52,
|
||||
0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x22, 0x15, 0x2f, 0x61, 0x70,
|
||||
0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74,
|
||||
0x65, 0x72, 0x12, 0x6f, 0x0a, 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65,
|
||||
0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x22, 0x1e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x2a, 0x16, 0x2f, 0x61, 0x70,
|
||||
0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f,
|
||||
0x69, 0x64, 0x7d, 0x12, 0x76, 0x0a, 0x0a, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4e, 0x6f, 0x64,
|
||||
0x65, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x22, 0x1d, 0x2f, 0x61,
|
||||
0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65,
|
||||
0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x0a,
|
||||
0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
|
||||
0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d,
|
||||
0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x82,
|
||||
0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x22, 0x28, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e,
|
||||
0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x65,
|
||||
0x6e, 0x61, 0x6d, 0x65, 0x2f, 0x7b, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12,
|
||||
0x6e, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x12,
|
||||
0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c,
|
||||
0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x17, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x11, 0x12, 0x0f,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12,
|
||||
0x7d, 0x0a, 0x0b, 0x4d, 0x6f, 0x76, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x12, 0x20,
|
||||
0x62, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74,
|
||||
0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74,
|
||||
0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x82,
|
||||
0xd3, 0xe4, 0x93, 0x02, 0x0e, 0x12, 0x0c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e,
|
||||
0x6f, 0x64, 0x65, 0x12, 0x6e, 0x0a, 0x08, 0x4d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12,
|
||||
0x1d, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d,
|
||||
0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f,
|
||||
0x76, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x4d, 0x6f, 0x76, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x21, 0x2f, 0x61, 0x70,
|
||||
0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61,
|
||||
0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x12, 0x64,
|
||||
0x0a, 0x09, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3,
|
||||
0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x73, 0x12, 0x7c, 0x0a, 0x0b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x22,
|
||||
0x22, 0x20, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73,
|
||||
0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x65, 0x6e, 0x61, 0x62,
|
||||
0x6c, 0x65, 0x12, 0x80, 0x01, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f,
|
||||
0x75, 0x74, 0x65, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
|
||||
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93,
|
||||
0x02, 0x23, 0x22, 0x21, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x64, 0x69,
|
||||
0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x8e, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x25, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63,
|
||||
0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x1a, 0x26, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02,
|
||||
0x25, 0x12, 0x23, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x61, 0x63, 0x68, 0x69,
|
||||
0x6e, 0x65, 0x2f, 0x7b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f,
|
||||
0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x75, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
|
||||
0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x21, 0x82, 0xd3, 0xe4, 0x93,
|
||||
0x02, 0x1b, 0x2a, 0x19, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x70, 0x0a,
|
||||
0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65,
|
||||
0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
|
||||
0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x13, 0x3a, 0x01, 0x2a, 0x22,
|
||||
0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x12,
|
||||
0x77, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12,
|
||||
0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45,
|
||||
0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x3a, 0x01,
|
||||
0x2a, 0x22, 0x15, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65,
|
||||
0x79, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x6a, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74,
|
||||
0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65,
|
||||
0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69,
|
||||
0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3,
|
||||
0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70,
|
||||
0x69, 0x6b, 0x65, 0x79, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
|
||||
0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23,
|
||||
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1d, 0x22, 0x1b, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f,
|
||||
0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x75,
|
||||
0x73, 0x65, 0x72, 0x12, 0x80, 0x01, 0x0a, 0x0f, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c,
|
||||
0x4e, 0x6f, 0x64, 0x65, 0x49, 0x50, 0x73, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x4e,
|
||||
0x6f, 0x64, 0x65, 0x49, 0x50, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63,
|
||||
0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x50, 0x73, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x22, 0x18, 0x2f, 0x61,
|
||||
0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x66,
|
||||
0x69, 0x6c, 0x6c, 0x69, 0x70, 0x73, 0x12, 0x64, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75,
|
||||
0x74, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
|
||||
0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61,
|
||||
0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x7c, 0x0a, 0x0b,
|
||||
0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c,
|
||||
0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x61,
|
||||
0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x22, 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x22, 0x20, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f,
|
||||
0x69, 0x64, 0x7d, 0x2f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x80, 0x01, 0x0a, 0x0c, 0x44,
|
||||
0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x21, 0x2e, 0x68, 0x65,
|
||||
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62,
|
||||
0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69,
|
||||
0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x21, 0x2f, 0x61, 0x70, 0x69,
|
||||
0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x7f, 0x0a,
|
||||
0x0d, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x22,
|
||||
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65,
|
||||
0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x23, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
|
||||
0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x12,
|
||||
0x1d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e,
|
||||
0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x75,
|
||||
0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x20, 0x2e,
|
||||
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c,
|
||||
0x65, 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44,
|
||||
0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x21, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x2a, 0x19, 0x2f, 0x61, 0x70, 0x69,
|
||||
0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72, 0x6f, 0x75, 0x74,
|
||||
0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x70, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41,
|
||||
0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
|
||||
0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65,
|
||||
0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
|
||||
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70,
|
||||
0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82, 0xd3,
|
||||
0xe4, 0x93, 0x02, 0x13, 0x3a, 0x01, 0x2a, 0x22, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x12, 0x77, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x69, 0x72,
|
||||
0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
|
||||
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69,
|
||||
0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61,
|
||||
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65,
|
||||
0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20,
|
||||
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x3a, 0x01, 0x2a, 0x22, 0x15, 0x2f, 0x61, 0x70, 0x69, 0x2f,
|
||||
0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65,
|
||||
0x12, 0x6a, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x12,
|
||||
0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c,
|
||||
0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
|
||||
0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61,
|
||||
0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x12, 0x76, 0x0a, 0x0c,
|
||||
0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68,
|
||||
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65,
|
||||
0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44,
|
||||
0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x22, 0x1f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x2a, 0x17, 0x2f, 0x61, 0x70,
|
||||
0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x7b, 0x70, 0x72, 0x65,
|
||||
0x66, 0x69, 0x78, 0x7d, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
|
||||
0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64,
|
||||
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62,
|
||||
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var file_headscale_v1_headscale_proto_goTypes = []interface{}{
|
||||
(*GetUserRequest)(nil), // 0: headscale.v1.GetUserRequest
|
||||
(*CreateUserRequest)(nil), // 1: headscale.v1.CreateUserRequest
|
||||
(*RenameUserRequest)(nil), // 2: headscale.v1.RenameUserRequest
|
||||
(*DeleteUserRequest)(nil), // 3: headscale.v1.DeleteUserRequest
|
||||
(*ListUsersRequest)(nil), // 4: headscale.v1.ListUsersRequest
|
||||
(*CreatePreAuthKeyRequest)(nil), // 5: headscale.v1.CreatePreAuthKeyRequest
|
||||
(*ExpirePreAuthKeyRequest)(nil), // 6: headscale.v1.ExpirePreAuthKeyRequest
|
||||
(*ListPreAuthKeysRequest)(nil), // 7: headscale.v1.ListPreAuthKeysRequest
|
||||
(*DebugCreateMachineRequest)(nil), // 8: headscale.v1.DebugCreateMachineRequest
|
||||
(*GetMachineRequest)(nil), // 9: headscale.v1.GetMachineRequest
|
||||
(*SetTagsRequest)(nil), // 10: headscale.v1.SetTagsRequest
|
||||
(*RegisterMachineRequest)(nil), // 11: headscale.v1.RegisterMachineRequest
|
||||
(*DeleteMachineRequest)(nil), // 12: headscale.v1.DeleteMachineRequest
|
||||
(*ExpireMachineRequest)(nil), // 13: headscale.v1.ExpireMachineRequest
|
||||
(*RenameMachineRequest)(nil), // 14: headscale.v1.RenameMachineRequest
|
||||
(*ListMachinesRequest)(nil), // 15: headscale.v1.ListMachinesRequest
|
||||
(*MoveMachineRequest)(nil), // 16: headscale.v1.MoveMachineRequest
|
||||
(*GetRoutesRequest)(nil), // 17: headscale.v1.GetRoutesRequest
|
||||
(*EnableRouteRequest)(nil), // 18: headscale.v1.EnableRouteRequest
|
||||
(*DisableRouteRequest)(nil), // 19: headscale.v1.DisableRouteRequest
|
||||
(*GetMachineRoutesRequest)(nil), // 20: headscale.v1.GetMachineRoutesRequest
|
||||
(*DeleteRouteRequest)(nil), // 21: headscale.v1.DeleteRouteRequest
|
||||
(*CreateApiKeyRequest)(nil), // 22: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 23: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 24: headscale.v1.ListApiKeysRequest
|
||||
(*GetUserResponse)(nil), // 25: headscale.v1.GetUserResponse
|
||||
(*CreateUserResponse)(nil), // 26: headscale.v1.CreateUserResponse
|
||||
(*RenameUserResponse)(nil), // 27: headscale.v1.RenameUserResponse
|
||||
(*DeleteUserResponse)(nil), // 28: headscale.v1.DeleteUserResponse
|
||||
(*ListUsersResponse)(nil), // 29: headscale.v1.ListUsersResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 30: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 31: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 32: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateMachineResponse)(nil), // 33: headscale.v1.DebugCreateMachineResponse
|
||||
(*GetMachineResponse)(nil), // 34: headscale.v1.GetMachineResponse
|
||||
(*SetTagsResponse)(nil), // 35: headscale.v1.SetTagsResponse
|
||||
(*RegisterMachineResponse)(nil), // 36: headscale.v1.RegisterMachineResponse
|
||||
(*DeleteMachineResponse)(nil), // 37: headscale.v1.DeleteMachineResponse
|
||||
(*ExpireMachineResponse)(nil), // 38: headscale.v1.ExpireMachineResponse
|
||||
(*RenameMachineResponse)(nil), // 39: headscale.v1.RenameMachineResponse
|
||||
(*ListMachinesResponse)(nil), // 40: headscale.v1.ListMachinesResponse
|
||||
(*MoveMachineResponse)(nil), // 41: headscale.v1.MoveMachineResponse
|
||||
(*GetRoutesResponse)(nil), // 42: headscale.v1.GetRoutesResponse
|
||||
(*EnableRouteResponse)(nil), // 43: headscale.v1.EnableRouteResponse
|
||||
(*DisableRouteResponse)(nil), // 44: headscale.v1.DisableRouteResponse
|
||||
(*GetMachineRoutesResponse)(nil), // 45: headscale.v1.GetMachineRoutesResponse
|
||||
(*DeleteRouteResponse)(nil), // 46: headscale.v1.DeleteRouteResponse
|
||||
(*CreateApiKeyResponse)(nil), // 47: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 48: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 49: headscale.v1.ListApiKeysResponse
|
||||
(*GetUserRequest)(nil), // 0: headscale.v1.GetUserRequest
|
||||
(*CreateUserRequest)(nil), // 1: headscale.v1.CreateUserRequest
|
||||
(*RenameUserRequest)(nil), // 2: headscale.v1.RenameUserRequest
|
||||
(*DeleteUserRequest)(nil), // 3: headscale.v1.DeleteUserRequest
|
||||
(*ListUsersRequest)(nil), // 4: headscale.v1.ListUsersRequest
|
||||
(*CreatePreAuthKeyRequest)(nil), // 5: headscale.v1.CreatePreAuthKeyRequest
|
||||
(*ExpirePreAuthKeyRequest)(nil), // 6: headscale.v1.ExpirePreAuthKeyRequest
|
||||
(*ListPreAuthKeysRequest)(nil), // 7: headscale.v1.ListPreAuthKeysRequest
|
||||
(*DebugCreateNodeRequest)(nil), // 8: headscale.v1.DebugCreateNodeRequest
|
||||
(*GetNodeRequest)(nil), // 9: headscale.v1.GetNodeRequest
|
||||
(*SetTagsRequest)(nil), // 10: headscale.v1.SetTagsRequest
|
||||
(*RegisterNodeRequest)(nil), // 11: headscale.v1.RegisterNodeRequest
|
||||
(*DeleteNodeRequest)(nil), // 12: headscale.v1.DeleteNodeRequest
|
||||
(*ExpireNodeRequest)(nil), // 13: headscale.v1.ExpireNodeRequest
|
||||
(*RenameNodeRequest)(nil), // 14: headscale.v1.RenameNodeRequest
|
||||
(*ListNodesRequest)(nil), // 15: headscale.v1.ListNodesRequest
|
||||
(*MoveNodeRequest)(nil), // 16: headscale.v1.MoveNodeRequest
|
||||
(*BackfillNodeIPsRequest)(nil), // 17: headscale.v1.BackfillNodeIPsRequest
|
||||
(*GetRoutesRequest)(nil), // 18: headscale.v1.GetRoutesRequest
|
||||
(*EnableRouteRequest)(nil), // 19: headscale.v1.EnableRouteRequest
|
||||
(*DisableRouteRequest)(nil), // 20: headscale.v1.DisableRouteRequest
|
||||
(*GetNodeRoutesRequest)(nil), // 21: headscale.v1.GetNodeRoutesRequest
|
||||
(*DeleteRouteRequest)(nil), // 22: headscale.v1.DeleteRouteRequest
|
||||
(*CreateApiKeyRequest)(nil), // 23: headscale.v1.CreateApiKeyRequest
|
||||
(*ExpireApiKeyRequest)(nil), // 24: headscale.v1.ExpireApiKeyRequest
|
||||
(*ListApiKeysRequest)(nil), // 25: headscale.v1.ListApiKeysRequest
|
||||
(*DeleteApiKeyRequest)(nil), // 26: headscale.v1.DeleteApiKeyRequest
|
||||
(*GetUserResponse)(nil), // 27: headscale.v1.GetUserResponse
|
||||
(*CreateUserResponse)(nil), // 28: headscale.v1.CreateUserResponse
|
||||
(*RenameUserResponse)(nil), // 29: headscale.v1.RenameUserResponse
|
||||
(*DeleteUserResponse)(nil), // 30: headscale.v1.DeleteUserResponse
|
||||
(*ListUsersResponse)(nil), // 31: headscale.v1.ListUsersResponse
|
||||
(*CreatePreAuthKeyResponse)(nil), // 32: headscale.v1.CreatePreAuthKeyResponse
|
||||
(*ExpirePreAuthKeyResponse)(nil), // 33: headscale.v1.ExpirePreAuthKeyResponse
|
||||
(*ListPreAuthKeysResponse)(nil), // 34: headscale.v1.ListPreAuthKeysResponse
|
||||
(*DebugCreateNodeResponse)(nil), // 35: headscale.v1.DebugCreateNodeResponse
|
||||
(*GetNodeResponse)(nil), // 36: headscale.v1.GetNodeResponse
|
||||
(*SetTagsResponse)(nil), // 37: headscale.v1.SetTagsResponse
|
||||
(*RegisterNodeResponse)(nil), // 38: headscale.v1.RegisterNodeResponse
|
||||
(*DeleteNodeResponse)(nil), // 39: headscale.v1.DeleteNodeResponse
|
||||
(*ExpireNodeResponse)(nil), // 40: headscale.v1.ExpireNodeResponse
|
||||
(*RenameNodeResponse)(nil), // 41: headscale.v1.RenameNodeResponse
|
||||
(*ListNodesResponse)(nil), // 42: headscale.v1.ListNodesResponse
|
||||
(*MoveNodeResponse)(nil), // 43: headscale.v1.MoveNodeResponse
|
||||
(*BackfillNodeIPsResponse)(nil), // 44: headscale.v1.BackfillNodeIPsResponse
|
||||
(*GetRoutesResponse)(nil), // 45: headscale.v1.GetRoutesResponse
|
||||
(*EnableRouteResponse)(nil), // 46: headscale.v1.EnableRouteResponse
|
||||
(*DisableRouteResponse)(nil), // 47: headscale.v1.DisableRouteResponse
|
||||
(*GetNodeRoutesResponse)(nil), // 48: headscale.v1.GetNodeRoutesResponse
|
||||
(*DeleteRouteResponse)(nil), // 49: headscale.v1.DeleteRouteResponse
|
||||
(*CreateApiKeyResponse)(nil), // 50: headscale.v1.CreateApiKeyResponse
|
||||
(*ExpireApiKeyResponse)(nil), // 51: headscale.v1.ExpireApiKeyResponse
|
||||
(*ListApiKeysResponse)(nil), // 52: headscale.v1.ListApiKeysResponse
|
||||
(*DeleteApiKeyResponse)(nil), // 53: headscale.v1.DeleteApiKeyResponse
|
||||
}
|
||||
var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
0, // 0: headscale.v1.HeadscaleService.GetUser:input_type -> headscale.v1.GetUserRequest
|
||||
@@ -296,50 +307,54 @@ var file_headscale_v1_headscale_proto_depIdxs = []int32{
|
||||
5, // 5: headscale.v1.HeadscaleService.CreatePreAuthKey:input_type -> headscale.v1.CreatePreAuthKeyRequest
|
||||
6, // 6: headscale.v1.HeadscaleService.ExpirePreAuthKey:input_type -> headscale.v1.ExpirePreAuthKeyRequest
|
||||
7, // 7: headscale.v1.HeadscaleService.ListPreAuthKeys:input_type -> headscale.v1.ListPreAuthKeysRequest
|
||||
8, // 8: headscale.v1.HeadscaleService.DebugCreateMachine:input_type -> headscale.v1.DebugCreateMachineRequest
|
||||
9, // 9: headscale.v1.HeadscaleService.GetMachine:input_type -> headscale.v1.GetMachineRequest
|
||||
8, // 8: headscale.v1.HeadscaleService.DebugCreateNode:input_type -> headscale.v1.DebugCreateNodeRequest
|
||||
9, // 9: headscale.v1.HeadscaleService.GetNode:input_type -> headscale.v1.GetNodeRequest
|
||||
10, // 10: headscale.v1.HeadscaleService.SetTags:input_type -> headscale.v1.SetTagsRequest
|
||||
11, // 11: headscale.v1.HeadscaleService.RegisterMachine:input_type -> headscale.v1.RegisterMachineRequest
|
||||
12, // 12: headscale.v1.HeadscaleService.DeleteMachine:input_type -> headscale.v1.DeleteMachineRequest
|
||||
13, // 13: headscale.v1.HeadscaleService.ExpireMachine:input_type -> headscale.v1.ExpireMachineRequest
|
||||
14, // 14: headscale.v1.HeadscaleService.RenameMachine:input_type -> headscale.v1.RenameMachineRequest
|
||||
15, // 15: headscale.v1.HeadscaleService.ListMachines:input_type -> headscale.v1.ListMachinesRequest
|
||||
16, // 16: headscale.v1.HeadscaleService.MoveMachine:input_type -> headscale.v1.MoveMachineRequest
|
||||
17, // 17: headscale.v1.HeadscaleService.GetRoutes:input_type -> headscale.v1.GetRoutesRequest
|
||||
18, // 18: headscale.v1.HeadscaleService.EnableRoute:input_type -> headscale.v1.EnableRouteRequest
|
||||
19, // 19: headscale.v1.HeadscaleService.DisableRoute:input_type -> headscale.v1.DisableRouteRequest
|
||||
20, // 20: headscale.v1.HeadscaleService.GetMachineRoutes:input_type -> headscale.v1.GetMachineRoutesRequest
|
||||
21, // 21: headscale.v1.HeadscaleService.DeleteRoute:input_type -> headscale.v1.DeleteRouteRequest
|
||||
22, // 22: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
23, // 23: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
24, // 24: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
25, // 25: headscale.v1.HeadscaleService.GetUser:output_type -> headscale.v1.GetUserResponse
|
||||
26, // 26: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
|
||||
27, // 27: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
|
||||
28, // 28: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
|
||||
29, // 29: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
|
||||
30, // 30: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
31, // 31: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
32, // 32: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
33, // 33: headscale.v1.HeadscaleService.DebugCreateMachine:output_type -> headscale.v1.DebugCreateMachineResponse
|
||||
34, // 34: headscale.v1.HeadscaleService.GetMachine:output_type -> headscale.v1.GetMachineResponse
|
||||
35, // 35: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
36, // 36: headscale.v1.HeadscaleService.RegisterMachine:output_type -> headscale.v1.RegisterMachineResponse
|
||||
37, // 37: headscale.v1.HeadscaleService.DeleteMachine:output_type -> headscale.v1.DeleteMachineResponse
|
||||
38, // 38: headscale.v1.HeadscaleService.ExpireMachine:output_type -> headscale.v1.ExpireMachineResponse
|
||||
39, // 39: headscale.v1.HeadscaleService.RenameMachine:output_type -> headscale.v1.RenameMachineResponse
|
||||
40, // 40: headscale.v1.HeadscaleService.ListMachines:output_type -> headscale.v1.ListMachinesResponse
|
||||
41, // 41: headscale.v1.HeadscaleService.MoveMachine:output_type -> headscale.v1.MoveMachineResponse
|
||||
42, // 42: headscale.v1.HeadscaleService.GetRoutes:output_type -> headscale.v1.GetRoutesResponse
|
||||
43, // 43: headscale.v1.HeadscaleService.EnableRoute:output_type -> headscale.v1.EnableRouteResponse
|
||||
44, // 44: headscale.v1.HeadscaleService.DisableRoute:output_type -> headscale.v1.DisableRouteResponse
|
||||
45, // 45: headscale.v1.HeadscaleService.GetMachineRoutes:output_type -> headscale.v1.GetMachineRoutesResponse
|
||||
46, // 46: headscale.v1.HeadscaleService.DeleteRoute:output_type -> headscale.v1.DeleteRouteResponse
|
||||
47, // 47: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
48, // 48: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
49, // 49: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
25, // [25:50] is the sub-list for method output_type
|
||||
0, // [0:25] is the sub-list for method input_type
|
||||
11, // 11: headscale.v1.HeadscaleService.RegisterNode:input_type -> headscale.v1.RegisterNodeRequest
|
||||
12, // 12: headscale.v1.HeadscaleService.DeleteNode:input_type -> headscale.v1.DeleteNodeRequest
|
||||
13, // 13: headscale.v1.HeadscaleService.ExpireNode:input_type -> headscale.v1.ExpireNodeRequest
|
||||
14, // 14: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest
|
||||
15, // 15: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest
|
||||
16, // 16: headscale.v1.HeadscaleService.MoveNode:input_type -> headscale.v1.MoveNodeRequest
|
||||
17, // 17: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest
|
||||
18, // 18: headscale.v1.HeadscaleService.GetRoutes:input_type -> headscale.v1.GetRoutesRequest
|
||||
19, // 19: headscale.v1.HeadscaleService.EnableRoute:input_type -> headscale.v1.EnableRouteRequest
|
||||
20, // 20: headscale.v1.HeadscaleService.DisableRoute:input_type -> headscale.v1.DisableRouteRequest
|
||||
21, // 21: headscale.v1.HeadscaleService.GetNodeRoutes:input_type -> headscale.v1.GetNodeRoutesRequest
|
||||
22, // 22: headscale.v1.HeadscaleService.DeleteRoute:input_type -> headscale.v1.DeleteRouteRequest
|
||||
23, // 23: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
|
||||
24, // 24: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
|
||||
25, // 25: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
|
||||
26, // 26: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest
|
||||
27, // 27: headscale.v1.HeadscaleService.GetUser:output_type -> headscale.v1.GetUserResponse
|
||||
28, // 28: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
|
||||
29, // 29: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
|
||||
30, // 30: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
|
||||
31, // 31: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
|
||||
32, // 32: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
|
||||
33, // 33: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
|
||||
34, // 34: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
|
||||
35, // 35: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
|
||||
36, // 36: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
|
||||
37, // 37: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
|
||||
38, // 38: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
|
||||
39, // 39: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
|
||||
40, // 40: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
|
||||
41, // 41: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
|
||||
42, // 42: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
|
||||
43, // 43: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse
|
||||
44, // 44: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse
|
||||
45, // 45: headscale.v1.HeadscaleService.GetRoutes:output_type -> headscale.v1.GetRoutesResponse
|
||||
46, // 46: headscale.v1.HeadscaleService.EnableRoute:output_type -> headscale.v1.EnableRouteResponse
|
||||
47, // 47: headscale.v1.HeadscaleService.DisableRoute:output_type -> headscale.v1.DisableRouteResponse
|
||||
48, // 48: headscale.v1.HeadscaleService.GetNodeRoutes:output_type -> headscale.v1.GetNodeRoutesResponse
|
||||
49, // 49: headscale.v1.HeadscaleService.DeleteRoute:output_type -> headscale.v1.DeleteRouteResponse
|
||||
50, // 50: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
|
||||
51, // 51: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
|
||||
52, // 52: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
|
||||
53, // 53: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse
|
||||
27, // [27:54] is the sub-list for method output_type
|
||||
0, // [0:27] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
@@ -352,7 +367,7 @@ func file_headscale_v1_headscale_proto_init() {
|
||||
}
|
||||
file_headscale_v1_user_proto_init()
|
||||
file_headscale_v1_preauthkey_proto_init()
|
||||
file_headscale_v1_machine_proto_init()
|
||||
file_headscale_v1_node_proto_init()
|
||||
file_headscale_v1_routes_proto_init()
|
||||
file_headscale_v1_apikey_proto_init()
|
||||
type x struct{}
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc-gen-go-grpc v1.3.0
|
||||
// - protoc (unknown)
|
||||
// source: headscale/v1/headscale.proto
|
||||
|
||||
@@ -18,6 +18,36 @@ import (
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
const (
|
||||
HeadscaleService_GetUser_FullMethodName = "/headscale.v1.HeadscaleService/GetUser"
|
||||
HeadscaleService_CreateUser_FullMethodName = "/headscale.v1.HeadscaleService/CreateUser"
|
||||
HeadscaleService_RenameUser_FullMethodName = "/headscale.v1.HeadscaleService/RenameUser"
|
||||
HeadscaleService_DeleteUser_FullMethodName = "/headscale.v1.HeadscaleService/DeleteUser"
|
||||
HeadscaleService_ListUsers_FullMethodName = "/headscale.v1.HeadscaleService/ListUsers"
|
||||
HeadscaleService_CreatePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/CreatePreAuthKey"
|
||||
HeadscaleService_ExpirePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpirePreAuthKey"
|
||||
HeadscaleService_ListPreAuthKeys_FullMethodName = "/headscale.v1.HeadscaleService/ListPreAuthKeys"
|
||||
HeadscaleService_DebugCreateNode_FullMethodName = "/headscale.v1.HeadscaleService/DebugCreateNode"
|
||||
HeadscaleService_GetNode_FullMethodName = "/headscale.v1.HeadscaleService/GetNode"
|
||||
HeadscaleService_SetTags_FullMethodName = "/headscale.v1.HeadscaleService/SetTags"
|
||||
HeadscaleService_RegisterNode_FullMethodName = "/headscale.v1.HeadscaleService/RegisterNode"
|
||||
HeadscaleService_DeleteNode_FullMethodName = "/headscale.v1.HeadscaleService/DeleteNode"
|
||||
HeadscaleService_ExpireNode_FullMethodName = "/headscale.v1.HeadscaleService/ExpireNode"
|
||||
HeadscaleService_RenameNode_FullMethodName = "/headscale.v1.HeadscaleService/RenameNode"
|
||||
HeadscaleService_ListNodes_FullMethodName = "/headscale.v1.HeadscaleService/ListNodes"
|
||||
HeadscaleService_MoveNode_FullMethodName = "/headscale.v1.HeadscaleService/MoveNode"
|
||||
HeadscaleService_BackfillNodeIPs_FullMethodName = "/headscale.v1.HeadscaleService/BackfillNodeIPs"
|
||||
HeadscaleService_GetRoutes_FullMethodName = "/headscale.v1.HeadscaleService/GetRoutes"
|
||||
HeadscaleService_EnableRoute_FullMethodName = "/headscale.v1.HeadscaleService/EnableRoute"
|
||||
HeadscaleService_DisableRoute_FullMethodName = "/headscale.v1.HeadscaleService/DisableRoute"
|
||||
HeadscaleService_GetNodeRoutes_FullMethodName = "/headscale.v1.HeadscaleService/GetNodeRoutes"
|
||||
HeadscaleService_DeleteRoute_FullMethodName = "/headscale.v1.HeadscaleService/DeleteRoute"
|
||||
HeadscaleService_CreateApiKey_FullMethodName = "/headscale.v1.HeadscaleService/CreateApiKey"
|
||||
HeadscaleService_ExpireApiKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpireApiKey"
|
||||
HeadscaleService_ListApiKeys_FullMethodName = "/headscale.v1.HeadscaleService/ListApiKeys"
|
||||
HeadscaleService_DeleteApiKey_FullMethodName = "/headscale.v1.HeadscaleService/DeleteApiKey"
|
||||
)
|
||||
|
||||
// HeadscaleServiceClient is the client API for HeadscaleService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
@@ -32,26 +62,28 @@ type HeadscaleServiceClient interface {
|
||||
CreatePreAuthKey(ctx context.Context, in *CreatePreAuthKeyRequest, opts ...grpc.CallOption) (*CreatePreAuthKeyResponse, error)
|
||||
ExpirePreAuthKey(ctx context.Context, in *ExpirePreAuthKeyRequest, opts ...grpc.CallOption) (*ExpirePreAuthKeyResponse, error)
|
||||
ListPreAuthKeys(ctx context.Context, in *ListPreAuthKeysRequest, opts ...grpc.CallOption) (*ListPreAuthKeysResponse, error)
|
||||
// --- Machine start ---
|
||||
DebugCreateMachine(ctx context.Context, in *DebugCreateMachineRequest, opts ...grpc.CallOption) (*DebugCreateMachineResponse, error)
|
||||
GetMachine(ctx context.Context, in *GetMachineRequest, opts ...grpc.CallOption) (*GetMachineResponse, error)
|
||||
// --- Node start ---
|
||||
DebugCreateNode(ctx context.Context, in *DebugCreateNodeRequest, opts ...grpc.CallOption) (*DebugCreateNodeResponse, error)
|
||||
GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error)
|
||||
SetTags(ctx context.Context, in *SetTagsRequest, opts ...grpc.CallOption) (*SetTagsResponse, error)
|
||||
RegisterMachine(ctx context.Context, in *RegisterMachineRequest, opts ...grpc.CallOption) (*RegisterMachineResponse, error)
|
||||
DeleteMachine(ctx context.Context, in *DeleteMachineRequest, opts ...grpc.CallOption) (*DeleteMachineResponse, error)
|
||||
ExpireMachine(ctx context.Context, in *ExpireMachineRequest, opts ...grpc.CallOption) (*ExpireMachineResponse, error)
|
||||
RenameMachine(ctx context.Context, in *RenameMachineRequest, opts ...grpc.CallOption) (*RenameMachineResponse, error)
|
||||
ListMachines(ctx context.Context, in *ListMachinesRequest, opts ...grpc.CallOption) (*ListMachinesResponse, error)
|
||||
MoveMachine(ctx context.Context, in *MoveMachineRequest, opts ...grpc.CallOption) (*MoveMachineResponse, error)
|
||||
RegisterNode(ctx context.Context, in *RegisterNodeRequest, opts ...grpc.CallOption) (*RegisterNodeResponse, error)
|
||||
DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*DeleteNodeResponse, error)
|
||||
ExpireNode(ctx context.Context, in *ExpireNodeRequest, opts ...grpc.CallOption) (*ExpireNodeResponse, error)
|
||||
RenameNode(ctx context.Context, in *RenameNodeRequest, opts ...grpc.CallOption) (*RenameNodeResponse, error)
|
||||
ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error)
|
||||
MoveNode(ctx context.Context, in *MoveNodeRequest, opts ...grpc.CallOption) (*MoveNodeResponse, error)
|
||||
BackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error)
|
||||
// --- Route start ---
|
||||
GetRoutes(ctx context.Context, in *GetRoutesRequest, opts ...grpc.CallOption) (*GetRoutesResponse, error)
|
||||
EnableRoute(ctx context.Context, in *EnableRouteRequest, opts ...grpc.CallOption) (*EnableRouteResponse, error)
|
||||
DisableRoute(ctx context.Context, in *DisableRouteRequest, opts ...grpc.CallOption) (*DisableRouteResponse, error)
|
||||
GetMachineRoutes(ctx context.Context, in *GetMachineRoutesRequest, opts ...grpc.CallOption) (*GetMachineRoutesResponse, error)
|
||||
GetNodeRoutes(ctx context.Context, in *GetNodeRoutesRequest, opts ...grpc.CallOption) (*GetNodeRoutesResponse, error)
|
||||
DeleteRoute(ctx context.Context, in *DeleteRouteRequest, opts ...grpc.CallOption) (*DeleteRouteResponse, error)
|
||||
// --- ApiKeys start ---
|
||||
CreateApiKey(ctx context.Context, in *CreateApiKeyRequest, opts ...grpc.CallOption) (*CreateApiKeyResponse, error)
|
||||
ExpireApiKey(ctx context.Context, in *ExpireApiKeyRequest, opts ...grpc.CallOption) (*ExpireApiKeyResponse, error)
|
||||
ListApiKeys(ctx context.Context, in *ListApiKeysRequest, opts ...grpc.CallOption) (*ListApiKeysResponse, error)
|
||||
DeleteApiKey(ctx context.Context, in *DeleteApiKeyRequest, opts ...grpc.CallOption) (*DeleteApiKeyResponse, error)
|
||||
}
|
||||
|
||||
type headscaleServiceClient struct {
|
||||
@@ -64,7 +96,7 @@ func NewHeadscaleServiceClient(cc grpc.ClientConnInterface) HeadscaleServiceClie
|
||||
|
||||
func (c *headscaleServiceClient) GetUser(ctx context.Context, in *GetUserRequest, opts ...grpc.CallOption) (*GetUserResponse, error) {
|
||||
out := new(GetUserResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/GetUser", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_GetUser_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -73,7 +105,7 @@ func (c *headscaleServiceClient) GetUser(ctx context.Context, in *GetUserRequest
|
||||
|
||||
func (c *headscaleServiceClient) CreateUser(ctx context.Context, in *CreateUserRequest, opts ...grpc.CallOption) (*CreateUserResponse, error) {
|
||||
out := new(CreateUserResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/CreateUser", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_CreateUser_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -82,7 +114,7 @@ func (c *headscaleServiceClient) CreateUser(ctx context.Context, in *CreateUserR
|
||||
|
||||
func (c *headscaleServiceClient) RenameUser(ctx context.Context, in *RenameUserRequest, opts ...grpc.CallOption) (*RenameUserResponse, error) {
|
||||
out := new(RenameUserResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/RenameUser", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_RenameUser_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -91,7 +123,7 @@ func (c *headscaleServiceClient) RenameUser(ctx context.Context, in *RenameUserR
|
||||
|
||||
func (c *headscaleServiceClient) DeleteUser(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*DeleteUserResponse, error) {
|
||||
out := new(DeleteUserResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/DeleteUser", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DeleteUser_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -100,7 +132,7 @@ func (c *headscaleServiceClient) DeleteUser(ctx context.Context, in *DeleteUserR
|
||||
|
||||
func (c *headscaleServiceClient) ListUsers(ctx context.Context, in *ListUsersRequest, opts ...grpc.CallOption) (*ListUsersResponse, error) {
|
||||
out := new(ListUsersResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ListUsers", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ListUsers_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -109,7 +141,7 @@ func (c *headscaleServiceClient) ListUsers(ctx context.Context, in *ListUsersReq
|
||||
|
||||
func (c *headscaleServiceClient) CreatePreAuthKey(ctx context.Context, in *CreatePreAuthKeyRequest, opts ...grpc.CallOption) (*CreatePreAuthKeyResponse, error) {
|
||||
out := new(CreatePreAuthKeyResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/CreatePreAuthKey", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_CreatePreAuthKey_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -118,7 +150,7 @@ func (c *headscaleServiceClient) CreatePreAuthKey(ctx context.Context, in *Creat
|
||||
|
||||
func (c *headscaleServiceClient) ExpirePreAuthKey(ctx context.Context, in *ExpirePreAuthKeyRequest, opts ...grpc.CallOption) (*ExpirePreAuthKeyResponse, error) {
|
||||
out := new(ExpirePreAuthKeyResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ExpirePreAuthKey", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ExpirePreAuthKey_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -127,25 +159,25 @@ func (c *headscaleServiceClient) ExpirePreAuthKey(ctx context.Context, in *Expir
|
||||
|
||||
func (c *headscaleServiceClient) ListPreAuthKeys(ctx context.Context, in *ListPreAuthKeysRequest, opts ...grpc.CallOption) (*ListPreAuthKeysResponse, error) {
|
||||
out := new(ListPreAuthKeysResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ListPreAuthKeys", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ListPreAuthKeys_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DebugCreateMachine(ctx context.Context, in *DebugCreateMachineRequest, opts ...grpc.CallOption) (*DebugCreateMachineResponse, error) {
|
||||
out := new(DebugCreateMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/DebugCreateMachine", in, out, opts...)
|
||||
func (c *headscaleServiceClient) DebugCreateNode(ctx context.Context, in *DebugCreateNodeRequest, opts ...grpc.CallOption) (*DebugCreateNodeResponse, error) {
|
||||
out := new(DebugCreateNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DebugCreateNode_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) GetMachine(ctx context.Context, in *GetMachineRequest, opts ...grpc.CallOption) (*GetMachineResponse, error) {
|
||||
out := new(GetMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/GetMachine", in, out, opts...)
|
||||
func (c *headscaleServiceClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) {
|
||||
out := new(GetNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_GetNode_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -154,61 +186,70 @@ func (c *headscaleServiceClient) GetMachine(ctx context.Context, in *GetMachineR
|
||||
|
||||
func (c *headscaleServiceClient) SetTags(ctx context.Context, in *SetTagsRequest, opts ...grpc.CallOption) (*SetTagsResponse, error) {
|
||||
out := new(SetTagsResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/SetTags", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_SetTags_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) RegisterMachine(ctx context.Context, in *RegisterMachineRequest, opts ...grpc.CallOption) (*RegisterMachineResponse, error) {
|
||||
out := new(RegisterMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/RegisterMachine", in, out, opts...)
|
||||
func (c *headscaleServiceClient) RegisterNode(ctx context.Context, in *RegisterNodeRequest, opts ...grpc.CallOption) (*RegisterNodeResponse, error) {
|
||||
out := new(RegisterNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_RegisterNode_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DeleteMachine(ctx context.Context, in *DeleteMachineRequest, opts ...grpc.CallOption) (*DeleteMachineResponse, error) {
|
||||
out := new(DeleteMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/DeleteMachine", in, out, opts...)
|
||||
func (c *headscaleServiceClient) DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*DeleteNodeResponse, error) {
|
||||
out := new(DeleteNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DeleteNode_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ExpireMachine(ctx context.Context, in *ExpireMachineRequest, opts ...grpc.CallOption) (*ExpireMachineResponse, error) {
|
||||
out := new(ExpireMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ExpireMachine", in, out, opts...)
|
||||
func (c *headscaleServiceClient) ExpireNode(ctx context.Context, in *ExpireNodeRequest, opts ...grpc.CallOption) (*ExpireNodeResponse, error) {
|
||||
out := new(ExpireNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ExpireNode_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) RenameMachine(ctx context.Context, in *RenameMachineRequest, opts ...grpc.CallOption) (*RenameMachineResponse, error) {
|
||||
out := new(RenameMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/RenameMachine", in, out, opts...)
|
||||
func (c *headscaleServiceClient) RenameNode(ctx context.Context, in *RenameNodeRequest, opts ...grpc.CallOption) (*RenameNodeResponse, error) {
|
||||
out := new(RenameNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_RenameNode_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) ListMachines(ctx context.Context, in *ListMachinesRequest, opts ...grpc.CallOption) (*ListMachinesResponse, error) {
|
||||
out := new(ListMachinesResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ListMachines", in, out, opts...)
|
||||
func (c *headscaleServiceClient) ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error) {
|
||||
out := new(ListNodesResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ListNodes_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) MoveMachine(ctx context.Context, in *MoveMachineRequest, opts ...grpc.CallOption) (*MoveMachineResponse, error) {
|
||||
out := new(MoveMachineResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/MoveMachine", in, out, opts...)
|
||||
func (c *headscaleServiceClient) MoveNode(ctx context.Context, in *MoveNodeRequest, opts ...grpc.CallOption) (*MoveNodeResponse, error) {
|
||||
out := new(MoveNodeResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_MoveNode_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) BackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error) {
|
||||
out := new(BackfillNodeIPsResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_BackfillNodeIPs_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -217,7 +258,7 @@ func (c *headscaleServiceClient) MoveMachine(ctx context.Context, in *MoveMachin
|
||||
|
||||
func (c *headscaleServiceClient) GetRoutes(ctx context.Context, in *GetRoutesRequest, opts ...grpc.CallOption) (*GetRoutesResponse, error) {
|
||||
out := new(GetRoutesResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/GetRoutes", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_GetRoutes_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -226,7 +267,7 @@ func (c *headscaleServiceClient) GetRoutes(ctx context.Context, in *GetRoutesReq
|
||||
|
||||
func (c *headscaleServiceClient) EnableRoute(ctx context.Context, in *EnableRouteRequest, opts ...grpc.CallOption) (*EnableRouteResponse, error) {
|
||||
out := new(EnableRouteResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/EnableRoute", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_EnableRoute_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -235,16 +276,16 @@ func (c *headscaleServiceClient) EnableRoute(ctx context.Context, in *EnableRout
|
||||
|
||||
func (c *headscaleServiceClient) DisableRoute(ctx context.Context, in *DisableRouteRequest, opts ...grpc.CallOption) (*DisableRouteResponse, error) {
|
||||
out := new(DisableRouteResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/DisableRoute", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DisableRoute_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) GetMachineRoutes(ctx context.Context, in *GetMachineRoutesRequest, opts ...grpc.CallOption) (*GetMachineRoutesResponse, error) {
|
||||
out := new(GetMachineRoutesResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/GetMachineRoutes", in, out, opts...)
|
||||
func (c *headscaleServiceClient) GetNodeRoutes(ctx context.Context, in *GetNodeRoutesRequest, opts ...grpc.CallOption) (*GetNodeRoutesResponse, error) {
|
||||
out := new(GetNodeRoutesResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_GetNodeRoutes_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -253,7 +294,7 @@ func (c *headscaleServiceClient) GetMachineRoutes(ctx context.Context, in *GetMa
|
||||
|
||||
func (c *headscaleServiceClient) DeleteRoute(ctx context.Context, in *DeleteRouteRequest, opts ...grpc.CallOption) (*DeleteRouteResponse, error) {
|
||||
out := new(DeleteRouteResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/DeleteRoute", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DeleteRoute_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -262,7 +303,7 @@ func (c *headscaleServiceClient) DeleteRoute(ctx context.Context, in *DeleteRout
|
||||
|
||||
func (c *headscaleServiceClient) CreateApiKey(ctx context.Context, in *CreateApiKeyRequest, opts ...grpc.CallOption) (*CreateApiKeyResponse, error) {
|
||||
out := new(CreateApiKeyResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/CreateApiKey", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_CreateApiKey_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -271,7 +312,7 @@ func (c *headscaleServiceClient) CreateApiKey(ctx context.Context, in *CreateApi
|
||||
|
||||
func (c *headscaleServiceClient) ExpireApiKey(ctx context.Context, in *ExpireApiKeyRequest, opts ...grpc.CallOption) (*ExpireApiKeyResponse, error) {
|
||||
out := new(ExpireApiKeyResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ExpireApiKey", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ExpireApiKey_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -280,7 +321,16 @@ func (c *headscaleServiceClient) ExpireApiKey(ctx context.Context, in *ExpireApi
|
||||
|
||||
func (c *headscaleServiceClient) ListApiKeys(ctx context.Context, in *ListApiKeysRequest, opts ...grpc.CallOption) (*ListApiKeysResponse, error) {
|
||||
out := new(ListApiKeysResponse)
|
||||
err := c.cc.Invoke(ctx, "/headscale.v1.HeadscaleService/ListApiKeys", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_ListApiKeys_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *headscaleServiceClient) DeleteApiKey(ctx context.Context, in *DeleteApiKeyRequest, opts ...grpc.CallOption) (*DeleteApiKeyResponse, error) {
|
||||
out := new(DeleteApiKeyResponse)
|
||||
err := c.cc.Invoke(ctx, HeadscaleService_DeleteApiKey_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -301,26 +351,28 @@ type HeadscaleServiceServer interface {
|
||||
CreatePreAuthKey(context.Context, *CreatePreAuthKeyRequest) (*CreatePreAuthKeyResponse, error)
|
||||
ExpirePreAuthKey(context.Context, *ExpirePreAuthKeyRequest) (*ExpirePreAuthKeyResponse, error)
|
||||
ListPreAuthKeys(context.Context, *ListPreAuthKeysRequest) (*ListPreAuthKeysResponse, error)
|
||||
// --- Machine start ---
|
||||
DebugCreateMachine(context.Context, *DebugCreateMachineRequest) (*DebugCreateMachineResponse, error)
|
||||
GetMachine(context.Context, *GetMachineRequest) (*GetMachineResponse, error)
|
||||
// --- Node start ---
|
||||
DebugCreateNode(context.Context, *DebugCreateNodeRequest) (*DebugCreateNodeResponse, error)
|
||||
GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error)
|
||||
SetTags(context.Context, *SetTagsRequest) (*SetTagsResponse, error)
|
||||
RegisterMachine(context.Context, *RegisterMachineRequest) (*RegisterMachineResponse, error)
|
||||
DeleteMachine(context.Context, *DeleteMachineRequest) (*DeleteMachineResponse, error)
|
||||
ExpireMachine(context.Context, *ExpireMachineRequest) (*ExpireMachineResponse, error)
|
||||
RenameMachine(context.Context, *RenameMachineRequest) (*RenameMachineResponse, error)
|
||||
ListMachines(context.Context, *ListMachinesRequest) (*ListMachinesResponse, error)
|
||||
MoveMachine(context.Context, *MoveMachineRequest) (*MoveMachineResponse, error)
|
||||
RegisterNode(context.Context, *RegisterNodeRequest) (*RegisterNodeResponse, error)
|
||||
DeleteNode(context.Context, *DeleteNodeRequest) (*DeleteNodeResponse, error)
|
||||
ExpireNode(context.Context, *ExpireNodeRequest) (*ExpireNodeResponse, error)
|
||||
RenameNode(context.Context, *RenameNodeRequest) (*RenameNodeResponse, error)
|
||||
ListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error)
|
||||
MoveNode(context.Context, *MoveNodeRequest) (*MoveNodeResponse, error)
|
||||
BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error)
|
||||
// --- Route start ---
|
||||
GetRoutes(context.Context, *GetRoutesRequest) (*GetRoutesResponse, error)
|
||||
EnableRoute(context.Context, *EnableRouteRequest) (*EnableRouteResponse, error)
|
||||
DisableRoute(context.Context, *DisableRouteRequest) (*DisableRouteResponse, error)
|
||||
GetMachineRoutes(context.Context, *GetMachineRoutesRequest) (*GetMachineRoutesResponse, error)
|
||||
GetNodeRoutes(context.Context, *GetNodeRoutesRequest) (*GetNodeRoutesResponse, error)
|
||||
DeleteRoute(context.Context, *DeleteRouteRequest) (*DeleteRouteResponse, error)
|
||||
// --- ApiKeys start ---
|
||||
CreateApiKey(context.Context, *CreateApiKeyRequest) (*CreateApiKeyResponse, error)
|
||||
ExpireApiKey(context.Context, *ExpireApiKeyRequest) (*ExpireApiKeyResponse, error)
|
||||
ListApiKeys(context.Context, *ListApiKeysRequest) (*ListApiKeysResponse, error)
|
||||
DeleteApiKey(context.Context, *DeleteApiKeyRequest) (*DeleteApiKeyResponse, error)
|
||||
mustEmbedUnimplementedHeadscaleServiceServer()
|
||||
}
|
||||
|
||||
@@ -352,32 +404,35 @@ func (UnimplementedHeadscaleServiceServer) ExpirePreAuthKey(context.Context, *Ex
|
||||
func (UnimplementedHeadscaleServiceServer) ListPreAuthKeys(context.Context, *ListPreAuthKeysRequest) (*ListPreAuthKeysResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListPreAuthKeys not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DebugCreateMachine(context.Context, *DebugCreateMachineRequest) (*DebugCreateMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DebugCreateMachine not implemented")
|
||||
func (UnimplementedHeadscaleServiceServer) DebugCreateNode(context.Context, *DebugCreateNodeRequest) (*DebugCreateNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DebugCreateNode not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) GetMachine(context.Context, *GetMachineRequest) (*GetMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetMachine not implemented")
|
||||
func (UnimplementedHeadscaleServiceServer) GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetNode not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) SetTags(context.Context, *SetTagsRequest) (*SetTagsResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SetTags not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) RegisterMachine(context.Context, *RegisterMachineRequest) (*RegisterMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RegisterMachine not implemented")
|
||||
func (UnimplementedHeadscaleServiceServer) RegisterNode(context.Context, *RegisterNodeRequest) (*RegisterNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RegisterNode not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DeleteMachine(context.Context, *DeleteMachineRequest) (*DeleteMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteMachine not implemented")
|
||||
func (UnimplementedHeadscaleServiceServer) DeleteNode(context.Context, *DeleteNodeRequest) (*DeleteNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteNode not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ExpireMachine(context.Context, *ExpireMachineRequest) (*ExpireMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ExpireMachine not implemented")
|
||||
func (UnimplementedHeadscaleServiceServer) ExpireNode(context.Context, *ExpireNodeRequest) (*ExpireNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ExpireNode not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) RenameMachine(context.Context, *RenameMachineRequest) (*RenameMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RenameMachine not implemented")
|
||||
func (UnimplementedHeadscaleServiceServer) RenameNode(context.Context, *RenameNodeRequest) (*RenameNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RenameNode not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) ListMachines(context.Context, *ListMachinesRequest) (*ListMachinesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListMachines not implemented")
|
||||
func (UnimplementedHeadscaleServiceServer) ListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListNodes not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) MoveMachine(context.Context, *MoveMachineRequest) (*MoveMachineResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method MoveMachine not implemented")
|
||||
func (UnimplementedHeadscaleServiceServer) MoveNode(context.Context, *MoveNodeRequest) (*MoveNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method MoveNode not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method BackfillNodeIPs not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) GetRoutes(context.Context, *GetRoutesRequest) (*GetRoutesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetRoutes not implemented")
|
||||
@@ -388,8 +443,8 @@ func (UnimplementedHeadscaleServiceServer) EnableRoute(context.Context, *EnableR
|
||||
func (UnimplementedHeadscaleServiceServer) DisableRoute(context.Context, *DisableRouteRequest) (*DisableRouteResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DisableRoute not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) GetMachineRoutes(context.Context, *GetMachineRoutesRequest) (*GetMachineRoutesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetMachineRoutes not implemented")
|
||||
func (UnimplementedHeadscaleServiceServer) GetNodeRoutes(context.Context, *GetNodeRoutesRequest) (*GetNodeRoutesResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetNodeRoutes not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DeleteRoute(context.Context, *DeleteRouteRequest) (*DeleteRouteResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteRoute not implemented")
|
||||
@@ -403,6 +458,9 @@ func (UnimplementedHeadscaleServiceServer) ExpireApiKey(context.Context, *Expire
|
||||
func (UnimplementedHeadscaleServiceServer) ListApiKeys(context.Context, *ListApiKeysRequest) (*ListApiKeysResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListApiKeys not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) DeleteApiKey(context.Context, *DeleteApiKeyRequest) (*DeleteApiKeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteApiKey not implemented")
|
||||
}
|
||||
func (UnimplementedHeadscaleServiceServer) mustEmbedUnimplementedHeadscaleServiceServer() {}
|
||||
|
||||
// UnsafeHeadscaleServiceServer may be embedded to opt out of forward compatibility for this service.
|
||||
@@ -426,7 +484,7 @@ func _HeadscaleService_GetUser_Handler(srv interface{}, ctx context.Context, dec
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/GetUser",
|
||||
FullMethod: HeadscaleService_GetUser_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).GetUser(ctx, req.(*GetUserRequest))
|
||||
@@ -444,7 +502,7 @@ func _HeadscaleService_CreateUser_Handler(srv interface{}, ctx context.Context,
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/CreateUser",
|
||||
FullMethod: HeadscaleService_CreateUser_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).CreateUser(ctx, req.(*CreateUserRequest))
|
||||
@@ -462,7 +520,7 @@ func _HeadscaleService_RenameUser_Handler(srv interface{}, ctx context.Context,
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/RenameUser",
|
||||
FullMethod: HeadscaleService_RenameUser_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).RenameUser(ctx, req.(*RenameUserRequest))
|
||||
@@ -480,7 +538,7 @@ func _HeadscaleService_DeleteUser_Handler(srv interface{}, ctx context.Context,
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/DeleteUser",
|
||||
FullMethod: HeadscaleService_DeleteUser_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).DeleteUser(ctx, req.(*DeleteUserRequest))
|
||||
@@ -498,7 +556,7 @@ func _HeadscaleService_ListUsers_Handler(srv interface{}, ctx context.Context, d
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ListUsers",
|
||||
FullMethod: HeadscaleService_ListUsers_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ListUsers(ctx, req.(*ListUsersRequest))
|
||||
@@ -516,7 +574,7 @@ func _HeadscaleService_CreatePreAuthKey_Handler(srv interface{}, ctx context.Con
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/CreatePreAuthKey",
|
||||
FullMethod: HeadscaleService_CreatePreAuthKey_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).CreatePreAuthKey(ctx, req.(*CreatePreAuthKeyRequest))
|
||||
@@ -534,7 +592,7 @@ func _HeadscaleService_ExpirePreAuthKey_Handler(srv interface{}, ctx context.Con
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ExpirePreAuthKey",
|
||||
FullMethod: HeadscaleService_ExpirePreAuthKey_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ExpirePreAuthKey(ctx, req.(*ExpirePreAuthKeyRequest))
|
||||
@@ -552,7 +610,7 @@ func _HeadscaleService_ListPreAuthKeys_Handler(srv interface{}, ctx context.Cont
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ListPreAuthKeys",
|
||||
FullMethod: HeadscaleService_ListPreAuthKeys_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ListPreAuthKeys(ctx, req.(*ListPreAuthKeysRequest))
|
||||
@@ -560,38 +618,38 @@ func _HeadscaleService_ListPreAuthKeys_Handler(srv interface{}, ctx context.Cont
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_DebugCreateMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DebugCreateMachineRequest)
|
||||
func _HeadscaleService_DebugCreateNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DebugCreateNodeRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).DebugCreateMachine(ctx, in)
|
||||
return srv.(HeadscaleServiceServer).DebugCreateNode(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/DebugCreateMachine",
|
||||
FullMethod: HeadscaleService_DebugCreateNode_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).DebugCreateMachine(ctx, req.(*DebugCreateMachineRequest))
|
||||
return srv.(HeadscaleServiceServer).DebugCreateNode(ctx, req.(*DebugCreateNodeRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_GetMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetMachineRequest)
|
||||
func _HeadscaleService_GetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetNodeRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).GetMachine(ctx, in)
|
||||
return srv.(HeadscaleServiceServer).GetNode(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/GetMachine",
|
||||
FullMethod: HeadscaleService_GetNode_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).GetMachine(ctx, req.(*GetMachineRequest))
|
||||
return srv.(HeadscaleServiceServer).GetNode(ctx, req.(*GetNodeRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
@@ -606,7 +664,7 @@ func _HeadscaleService_SetTags_Handler(srv interface{}, ctx context.Context, dec
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/SetTags",
|
||||
FullMethod: HeadscaleService_SetTags_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).SetTags(ctx, req.(*SetTagsRequest))
|
||||
@@ -614,110 +672,128 @@ func _HeadscaleService_SetTags_Handler(srv interface{}, ctx context.Context, dec
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_RegisterMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RegisterMachineRequest)
|
||||
func _HeadscaleService_RegisterNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RegisterNodeRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).RegisterMachine(ctx, in)
|
||||
return srv.(HeadscaleServiceServer).RegisterNode(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/RegisterMachine",
|
||||
FullMethod: HeadscaleService_RegisterNode_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).RegisterMachine(ctx, req.(*RegisterMachineRequest))
|
||||
return srv.(HeadscaleServiceServer).RegisterNode(ctx, req.(*RegisterNodeRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_DeleteMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeleteMachineRequest)
|
||||
func _HeadscaleService_DeleteNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeleteNodeRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).DeleteMachine(ctx, in)
|
||||
return srv.(HeadscaleServiceServer).DeleteNode(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/DeleteMachine",
|
||||
FullMethod: HeadscaleService_DeleteNode_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).DeleteMachine(ctx, req.(*DeleteMachineRequest))
|
||||
return srv.(HeadscaleServiceServer).DeleteNode(ctx, req.(*DeleteNodeRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_ExpireMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ExpireMachineRequest)
|
||||
func _HeadscaleService_ExpireNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ExpireNodeRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).ExpireMachine(ctx, in)
|
||||
return srv.(HeadscaleServiceServer).ExpireNode(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ExpireMachine",
|
||||
FullMethod: HeadscaleService_ExpireNode_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ExpireMachine(ctx, req.(*ExpireMachineRequest))
|
||||
return srv.(HeadscaleServiceServer).ExpireNode(ctx, req.(*ExpireNodeRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_RenameMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RenameMachineRequest)
|
||||
func _HeadscaleService_RenameNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RenameNodeRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).RenameMachine(ctx, in)
|
||||
return srv.(HeadscaleServiceServer).RenameNode(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/RenameMachine",
|
||||
FullMethod: HeadscaleService_RenameNode_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).RenameMachine(ctx, req.(*RenameMachineRequest))
|
||||
return srv.(HeadscaleServiceServer).RenameNode(ctx, req.(*RenameNodeRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_ListMachines_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListMachinesRequest)
|
||||
func _HeadscaleService_ListNodes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListNodesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).ListMachines(ctx, in)
|
||||
return srv.(HeadscaleServiceServer).ListNodes(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ListMachines",
|
||||
FullMethod: HeadscaleService_ListNodes_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ListMachines(ctx, req.(*ListMachinesRequest))
|
||||
return srv.(HeadscaleServiceServer).ListNodes(ctx, req.(*ListNodesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_MoveMachine_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(MoveMachineRequest)
|
||||
func _HeadscaleService_MoveNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(MoveNodeRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).MoveMachine(ctx, in)
|
||||
return srv.(HeadscaleServiceServer).MoveNode(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/MoveMachine",
|
||||
FullMethod: HeadscaleService_MoveNode_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).MoveMachine(ctx, req.(*MoveMachineRequest))
|
||||
return srv.(HeadscaleServiceServer).MoveNode(ctx, req.(*MoveNodeRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_BackfillNodeIPs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(BackfillNodeIPsRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).BackfillNodeIPs(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_BackfillNodeIPs_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).BackfillNodeIPs(ctx, req.(*BackfillNodeIPsRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
@@ -732,7 +808,7 @@ func _HeadscaleService_GetRoutes_Handler(srv interface{}, ctx context.Context, d
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/GetRoutes",
|
||||
FullMethod: HeadscaleService_GetRoutes_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).GetRoutes(ctx, req.(*GetRoutesRequest))
|
||||
@@ -750,7 +826,7 @@ func _HeadscaleService_EnableRoute_Handler(srv interface{}, ctx context.Context,
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/EnableRoute",
|
||||
FullMethod: HeadscaleService_EnableRoute_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).EnableRoute(ctx, req.(*EnableRouteRequest))
|
||||
@@ -768,7 +844,7 @@ func _HeadscaleService_DisableRoute_Handler(srv interface{}, ctx context.Context
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/DisableRoute",
|
||||
FullMethod: HeadscaleService_DisableRoute_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).DisableRoute(ctx, req.(*DisableRouteRequest))
|
||||
@@ -776,20 +852,20 @@ func _HeadscaleService_DisableRoute_Handler(srv interface{}, ctx context.Context
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_GetMachineRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetMachineRoutesRequest)
|
||||
func _HeadscaleService_GetNodeRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetNodeRoutesRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).GetMachineRoutes(ctx, in)
|
||||
return srv.(HeadscaleServiceServer).GetNodeRoutes(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/GetMachineRoutes",
|
||||
FullMethod: HeadscaleService_GetNodeRoutes_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).GetMachineRoutes(ctx, req.(*GetMachineRoutesRequest))
|
||||
return srv.(HeadscaleServiceServer).GetNodeRoutes(ctx, req.(*GetNodeRoutesRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
@@ -804,7 +880,7 @@ func _HeadscaleService_DeleteRoute_Handler(srv interface{}, ctx context.Context,
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/DeleteRoute",
|
||||
FullMethod: HeadscaleService_DeleteRoute_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).DeleteRoute(ctx, req.(*DeleteRouteRequest))
|
||||
@@ -822,7 +898,7 @@ func _HeadscaleService_CreateApiKey_Handler(srv interface{}, ctx context.Context
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/CreateApiKey",
|
||||
FullMethod: HeadscaleService_CreateApiKey_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).CreateApiKey(ctx, req.(*CreateApiKeyRequest))
|
||||
@@ -840,7 +916,7 @@ func _HeadscaleService_ExpireApiKey_Handler(srv interface{}, ctx context.Context
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ExpireApiKey",
|
||||
FullMethod: HeadscaleService_ExpireApiKey_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ExpireApiKey(ctx, req.(*ExpireApiKeyRequest))
|
||||
@@ -858,7 +934,7 @@ func _HeadscaleService_ListApiKeys_Handler(srv interface{}, ctx context.Context,
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/headscale.v1.HeadscaleService/ListApiKeys",
|
||||
FullMethod: HeadscaleService_ListApiKeys_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).ListApiKeys(ctx, req.(*ListApiKeysRequest))
|
||||
@@ -866,6 +942,24 @@ func _HeadscaleService_ListApiKeys_Handler(srv interface{}, ctx context.Context,
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _HeadscaleService_DeleteApiKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeleteApiKeyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(HeadscaleServiceServer).DeleteApiKey(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: HeadscaleService_DeleteApiKey_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(HeadscaleServiceServer).DeleteApiKey(ctx, req.(*DeleteApiKeyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// HeadscaleService_ServiceDesc is the grpc.ServiceDesc for HeadscaleService service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
@@ -906,40 +1000,44 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
|
||||
Handler: _HeadscaleService_ListPreAuthKeys_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DebugCreateMachine",
|
||||
Handler: _HeadscaleService_DebugCreateMachine_Handler,
|
||||
MethodName: "DebugCreateNode",
|
||||
Handler: _HeadscaleService_DebugCreateNode_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetMachine",
|
||||
Handler: _HeadscaleService_GetMachine_Handler,
|
||||
MethodName: "GetNode",
|
||||
Handler: _HeadscaleService_GetNode_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "SetTags",
|
||||
Handler: _HeadscaleService_SetTags_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "RegisterMachine",
|
||||
Handler: _HeadscaleService_RegisterMachine_Handler,
|
||||
MethodName: "RegisterNode",
|
||||
Handler: _HeadscaleService_RegisterNode_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteMachine",
|
||||
Handler: _HeadscaleService_DeleteMachine_Handler,
|
||||
MethodName: "DeleteNode",
|
||||
Handler: _HeadscaleService_DeleteNode_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ExpireMachine",
|
||||
Handler: _HeadscaleService_ExpireMachine_Handler,
|
||||
MethodName: "ExpireNode",
|
||||
Handler: _HeadscaleService_ExpireNode_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "RenameMachine",
|
||||
Handler: _HeadscaleService_RenameMachine_Handler,
|
||||
MethodName: "RenameNode",
|
||||
Handler: _HeadscaleService_RenameNode_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ListMachines",
|
||||
Handler: _HeadscaleService_ListMachines_Handler,
|
||||
MethodName: "ListNodes",
|
||||
Handler: _HeadscaleService_ListNodes_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "MoveMachine",
|
||||
Handler: _HeadscaleService_MoveMachine_Handler,
|
||||
MethodName: "MoveNode",
|
||||
Handler: _HeadscaleService_MoveNode_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "BackfillNodeIPs",
|
||||
Handler: _HeadscaleService_BackfillNodeIPs_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetRoutes",
|
||||
@@ -954,8 +1052,8 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
|
||||
Handler: _HeadscaleService_DisableRoute_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetMachineRoutes",
|
||||
Handler: _HeadscaleService_GetMachineRoutes_Handler,
|
||||
MethodName: "GetNodeRoutes",
|
||||
Handler: _HeadscaleService_GetNodeRoutes_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteRoute",
|
||||
@@ -973,6 +1071,10 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "ListApiKeys",
|
||||
Handler: _HeadscaleService_ListApiKeys_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteApiKey",
|
||||
Handler: _HeadscaleService_DeleteApiKey_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "headscale/v1/headscale.proto",
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user