Compare commits

..

2 Commits

Author SHA1 Message Date
Florian Preinstorfer
2a8bb7cb1c Set doc version to 0.25.1 2025-03-21 06:53:39 +01:00
Florian Preinstorfer
1bb11278f0 Set doc version to 0.25.0 2025-03-21 06:53:02 +01:00
181 changed files with 17010 additions and 22430 deletions

15
.coderabbit.yaml Normal file
View File

@@ -0,0 +1,15 @@
# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json
language: "en-GB"
early_access: false
reviews:
profile: "chill"
request_changes_workflow: false
high_level_summary: true
poem: true
review_status: true
collapse_walkthrough: false
auto_review:
enabled: true
drafts: true
chat:
auto_reply: true

View File

@@ -44,16 +44,10 @@ body:
attributes:
label: Environment
description: |
Please provide information about your environment.
If you are using a container, always provide the headscale version and not only the Docker image version.
Please do not put "latest".
If you are experiencing a problem during an upgrade, please provide the versions of the old and new versions of Headscale and Tailscale.
examples:
- **OS**: Ubuntu 24.04
- **Headscale version**: 0.24.3
- **Tailscale version**: 1.80.0
- **OS**: Ubuntu 20.04
- **Headscale version**: 0.22.3
- **Tailscale version**: 1.64.0
value: |
- OS:
- Headscale version:
@@ -71,27 +65,19 @@ body:
required: false
- type: textarea
attributes:
label: Debug information
label: Anything else?
description: |
Links? References? Anything that will give us more context about the issue you are encountering.
If **any** of these are omitted we will likely close your issue, do **not** ignore them.
Links? References? Anything that will give us more context about the issue you are encountering!
- Client netmap dump (see below)
- Policy configuration
- ACL configuration
- Headscale configuration
- Headscale log (with `trace` enabled)
Dump the netmap of tailscale clients:
`tailscale debug netmap > DESCRIPTIVE_NAME.json`
Dump the status of tailscale clients:
`tailscale status --json > DESCRIPTIVE_NAME.json`
Get the logs of a Tailscale client that is not working as expected.
`tailscale daemon-logs`
Please provide information describing the netmap, which client, which headscale version etc.
Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in.
**Ensure** you use formatting for files you attach.
Do **not** paste in long files.
validations:
required: true
required: false

View File

@@ -17,12 +17,12 @@ jobs:
runs-on: ubuntu-latest
permissions: write-all
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
uses: dorny/paths-filter@v3
with:
filters: |
files:
@@ -31,13 +31,10 @@ jobs:
- '**/*.go'
- 'integration_test/'
- 'config-example.yaml'
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
- uses: DeterminateSystems/nix-installer-action@main
if: steps.changed-files.outputs.files == 'true'
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
- uses: DeterminateSystems/magic-nix-cache-action@main
if: steps.changed-files.outputs.files == 'true'
with:
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
- name: Run nix build
id: build
@@ -55,7 +52,7 @@ jobs:
exit $BUILD_STATUS
- name: Nix gosum diverging
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
uses: actions/github-script@v6
if: failure() && steps.build.outcome == 'failure'
with:
github-token: ${{secrets.GITHUB_TOKEN}}
@@ -67,7 +64,7 @@ jobs:
body: 'Nix build failed with wrong gosum, please update "vendorSha256" (${{ steps.build.outputs.OLD_HASH }}) for the "headscale" package in flake.nix with the new SHA: ${{ steps.build.outputs.NEW_HASH }}'
})
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
- uses: actions/upload-artifact@v4
if: steps.changed-files.outputs.files == 'true'
with:
name: headscale-linux
@@ -86,16 +83,13 @@ jobs:
- "GOARCH=arm64 GOOS=darwin"
- "GOARCH=amd64 GOOS=darwin"
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
with:
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
- uses: actions/checkout@v4
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- name: Run go cross compile
run: env ${{ matrix.env }} nix develop --command -- go build -o "headscale" ./cmd/headscale
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
- uses: actions/upload-artifact@v4
with:
name: "headscale-${{ matrix.env }}"
path: "headscale"

View File

@@ -10,12 +10,12 @@ jobs:
check-tests:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
uses: dorny/paths-filter@v3
with:
filters: |
files:
@@ -24,13 +24,10 @@ jobs:
- '**/*.go'
- 'integration_test/'
- 'config-example.yaml'
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
- uses: DeterminateSystems/nix-installer-action@main
if: steps.changed-files.outputs.files == 'true'
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
- uses: DeterminateSystems/magic-nix-cache-action@main
if: steps.changed-files.outputs.files == 'true'
with:
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
- name: Generate and check integration tests
if: steps.changed-files.outputs.files == 'true'

View File

@@ -21,15 +21,15 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install python
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
uses: actions/setup-python@v5
with:
python-version: 3.x
- name: Setup cache
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
uses: actions/cache@v4
with:
key: ${{ github.ref }}
path: .cache

View File

@@ -11,13 +11,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
uses: actions/checkout@v4
- name: Install python
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
uses: actions/setup-python@v5
with:
python-version: 3.x
- name: Setup cache
uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3
uses: actions/cache@v4
with:
key: ${{ github.ref }}
path: .cache

View File

@@ -38,13 +38,12 @@ func findTests() []string {
return tests
}
func updateYAML(tests []string, testPath string) {
func updateYAML(tests []string) {
testsForYq := fmt.Sprintf("[%s]", strings.Join(tests, ", "))
yqCommand := fmt.Sprintf(
"yq eval '.jobs.integration-test.strategy.matrix.test = %s' %s -i",
"yq eval '.jobs.integration-test.strategy.matrix.test = %s' ./test-integration.yaml -i",
testsForYq,
testPath,
)
cmd := exec.Command("bash", "-c", yqCommand)
@@ -59,7 +58,7 @@ func updateYAML(tests []string, testPath string) {
log.Fatalf("failed to run yq command: %s", err)
}
fmt.Printf("YAML file (%s) updated successfully\n", testPath)
fmt.Println("YAML file updated successfully")
}
func main() {
@@ -70,5 +69,5 @@ func main() {
quotedTests[i] = fmt.Sprintf("\"%s\"", test)
}
updateYAML(quotedTests, "./test-integration.yaml")
updateYAML(quotedTests)
}

View File

@@ -11,13 +11,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
with:
# [Required] Access token with `workflow` scope.
token: ${{ secrets.WORKFLOW_SECRET }}
- name: Run GitHub Actions Version Updater
uses: saadmk11/github-actions-version-updater@64be81ba69383f81f2be476703ea6570c4c8686e # v0.8.1
uses: saadmk11/github-actions-version-updater@v0.8.1
with:
# [Required] Access token with `workflow` scope.
token: ${{ secrets.WORKFLOW_SECRET }}

View File

@@ -10,12 +10,12 @@ jobs:
golangci-lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
uses: dorny/paths-filter@v3
with:
filters: |
files:
@@ -24,27 +24,24 @@ jobs:
- '**/*.go'
- 'integration_test/'
- 'config-example.yaml'
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
- uses: DeterminateSystems/nix-installer-action@main
if: steps.changed-files.outputs.files == 'true'
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
- uses: DeterminateSystems/magic-nix-cache-action@main
if: steps.changed-files.outputs.files == 'true'
with:
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
- name: golangci-lint
if: steps.changed-files.outputs.files == 'true'
run: nix develop --command -- golangci-lint run --new-from-rev=${{github.event.pull_request.base.sha}} --format=colored-line-number
run: nix develop --command -- golangci-lint run --new-from-rev=${{github.event.pull_request.base.sha}} --out-format=colored-line-number
prettier-lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
uses: dorny/paths-filter@v3
with:
filters: |
files:
@@ -58,13 +55,10 @@ jobs:
- '**/*.css'
- '**/*.scss'
- '**/*.html'
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
- uses: DeterminateSystems/nix-installer-action@main
if: steps.changed-files.outputs.files == 'true'
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
- uses: DeterminateSystems/magic-nix-cache-action@main
if: steps.changed-files.outputs.files == 'true'
with:
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
- name: Prettify code
if: steps.changed-files.outputs.files == 'true'
@@ -73,12 +67,9 @@ jobs:
proto-lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
with:
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
- uses: actions/checkout@v4
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- name: Buf lint
run: nix develop --command -- buf lint proto

View File

@@ -13,28 +13,25 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Login to DockerHub
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GHCR
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
with:
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
- uses: DeterminateSystems/nix-installer-action@main
- uses: DeterminateSystems/magic-nix-cache-action@main
- name: Run goreleaser
run: nix develop --command -- goreleaser release --clean

View File

@@ -12,7 +12,7 @@ jobs:
issues: write
pull-requests: write
steps:
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
- uses: actions/stale@v9
with:
days-before-issue-stale: 90
days-before-issue-close: 7

View File

@@ -22,11 +22,8 @@ jobs:
- TestACLNamedHostsCanReach
- TestACLDevice1CanAccessDevice2
- TestPolicyUpdateWhileRunningWithCLIInDatabase
- TestACLAutogroupMember
- TestACLAutogroupTagged
- TestAuthKeyLogoutAndReloginSameUser
- TestAuthKeyLogoutAndReloginNewUser
- TestAuthKeyLogoutAndReloginSameUserExpiredKey
- TestOIDCAuthenticationPingAll
- TestOIDCExpireNodesBasedOnTokenExpiry
- TestOIDC024UserCreation
@@ -51,6 +48,7 @@ jobs:
- TestDERPVerifyEndpoint
- TestResolveMagicDNS
- TestResolveMagicDNSExtraRecordsPath
- TestValidateResolvConf
- TestDERPServerScenario
- TestDERPServerWebsocketScenario
- TestPingAllByIP
@@ -67,13 +65,11 @@ jobs:
- Test2118DeletingOnlineNodePanics
- TestEnablingRoutes
- TestHASubnetRouterFailover
- TestEnableDisableAutoApprovedRoute
- TestAutoApprovedSubRoute2068
- TestSubnetRouteACL
- TestEnablingExitRoutes
- TestSubnetRouterMultiNetwork
- TestSubnetRouterMultiNetworkExitNode
- TestAutoApproveMultiNetwork
- TestSubnetRouteACLFiltering
- TestHeadscale
- TestCreateTailscale
- TestTailscaleNodesJoiningHeadcale
- TestSSHOneUserToAll
- TestSSHMultipleUsersAllToAll
@@ -92,12 +88,12 @@ jobs:
# that triggered the build.
HAS_TAILSCALE_SECRET: ${{ secrets.TS_OAUTH_CLIENT_ID }}
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
uses: dorny/paths-filter@v3
with:
filters: |
files:
@@ -108,7 +104,7 @@ jobs:
- 'config-example.yaml'
- name: Tailscale
if: ${{ env.HAS_TAILSCALE_SECRET }}
uses: tailscale/github-action@6986d2c82a91fbac2949fe01f5bab95cf21b5102 # v3.2.2
uses: tailscale/github-action@v2
with:
oauth-client-id: ${{ secrets.TS_OAUTH_CLIENT_ID }}
oauth-secret: ${{ secrets.TS_OAUTH_SECRET }}
@@ -116,42 +112,45 @@ jobs:
- name: Setup SSH server for Actor
if: ${{ env.HAS_TAILSCALE_SECRET }}
uses: alexellis/setup-sshd-actor@master
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
- uses: DeterminateSystems/nix-installer-action@main
if: steps.changed-files.outputs.files == 'true'
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
- uses: DeterminateSystems/magic-nix-cache-action@main
if: steps.changed-files.outputs.files == 'true'
with:
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
- uses: satackey/action-docker-layer-caching@main
if: steps.changed-files.outputs.files == 'true'
continue-on-error: true
- name: Run Integration Test
uses: Wandalen/wretry.action@e68c23e6309f2871ca8ae4763e7629b9c258e1ea # v3.8.0
uses: Wandalen/wretry.action@master
if: steps.changed-files.outputs.files == 'true'
env:
USE_POSTGRES: ${{ matrix.database == 'postgres' && '1' || '0' }}
with:
# Our integration tests are started like a thundering herd, often
# hitting limits of the various external repositories we depend on
# like docker hub. This will retry jobs every 5 min, 10 times,
# hopefully letting us avoid manual intervention and restarting jobs.
# One could of course argue that we should invest in trying to avoid
# this, but currently it seems like a larger investment to be cleverer
# about this.
# Some of the jobs might still require manual restart as they are really
# slow and this will cause them to eventually be killed by Github actions.
attempt_delay: 300000 # 5 min
attempt_limit: 10
attempt_limit: 5
command: |
nix develop --command -- hi run "^${{ matrix.test }}$" \
--timeout=120m \
--postgres=${{ matrix.database == 'postgres' && 'true' || 'false' }}
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
nix develop --command -- docker run \
--tty --rm \
--volume ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
--volume $PWD:$PWD -w $PWD/integration \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume $PWD/control_logs:/tmp/control \
--env HEADSCALE_INTEGRATION_POSTGRES=${{env.USE_POSTGRES}} \
golang:1 \
go run gotest.tools/gotestsum@latest -- ./... \
-failfast \
-timeout 120m \
-parallel 1 \
-run "^${{ matrix.test }}$"
- uses: actions/upload-artifact@v4
if: always() && steps.changed-files.outputs.files == 'true'
with:
name: ${{ matrix.test }}-${{matrix.database}}-logs
path: "control_logs/*/*.log"
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
path: "control_logs/*.log"
- uses: actions/upload-artifact@v4
if: always() && steps.changed-files.outputs.files == 'true'
with:
name: ${{ matrix.test }}-${{matrix.database}}-archives
path: "control_logs/*/*.tar"
name: ${{ matrix.test }}-${{matrix.database}}-pprof
path: "control_logs/*.pprof.tar"
- name: Setup a blocking tmux session
if: ${{ env.HAS_TAILSCALE_SECRET }}
uses: alexellis/block-with-tmux-action@master

View File

@@ -11,13 +11,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v4
with:
fetch-depth: 2
- name: Get changed files
id: changed-files
uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2
uses: dorny/paths-filter@v3
with:
filters: |
files:
@@ -27,13 +27,10 @@ jobs:
- 'integration_test/'
- 'config-example.yaml'
- uses: nixbuild/nix-quick-install-action@889f3180bb5f064ee9e3201428d04ae9e41d54ad # v31
- uses: DeterminateSystems/nix-installer-action@main
if: steps.changed-files.outputs.files == 'true'
- uses: nix-community/cache-nix-action@135667ec418502fa5a3598af6fb9eb733888ce6a # v6.1.3
- uses: DeterminateSystems/magic-nix-cache-action@main
if: steps.changed-files.outputs.files == 'true'
with:
primary-key: nix-${{ runner.os }}-${{ runner.arch }}-${{ hashFiles('**/*.nix', '**/flake.lock') }}
restore-prefixes-first-match: nix-${{ runner.os }}-${{ runner.arch }}
- name: Run tests
if: steps.changed-files.outputs.files == 'true'

View File

@@ -10,10 +10,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
uses: actions/checkout@v4
- name: Install Nix
uses: DeterminateSystems/nix-installer-action@21a544727d0c62386e78b4befe52d19ad12692e3 # v17
uses: DeterminateSystems/nix-installer-action@main
- name: Update flake.lock
uses: DeterminateSystems/update-flake-lock@428c2b58a4b7414dabd372acb6a03dba1084d3ab # v25
uses: DeterminateSystems/update-flake-lock@main
with:
pr-title: "Update flake.lock"

2
.gitignore vendored
View File

@@ -20,9 +20,9 @@ vendor/
dist/
/headscale
config.json
config.yaml
config*.yaml
!config-example.yaml
derp.yaml
*.hujson
*.key

View File

@@ -1,79 +1,70 @@
---
version: "2"
run:
timeout: 10m
build-tags:
- ts2019
issues:
skip-dirs:
- gen
linters:
default: all
enable-all: true
disable:
- cyclop
- depguard
- dupl
- exhaustruct
- funlen
- revive
- lll
- gofmt
- gochecknoglobals
- gochecknoinits
- gocognit
- godox
- interfacebloat
- ireturn
- lll
- maintidx
- makezero
- musttag
- nestif
- nolintlint
- paralleltest
- revive
- funlen
- tagliatelle
- testpackage
- wrapcheck
- wsl
settings:
gocritic:
disabled-checks:
- appendAssign
- ifElseChain
nlreturn:
block-size: 4
varnamelen:
ignore-names:
- err
- db
- id
- ip
- ok
- c
- tt
- tx
- rx
- sb
- wg
- pr
- p
- p2
ignore-type-assert-ok: true
ignore-map-index-ok: true
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
paths:
- third_party$
- builtin$
- examples$
- gen
- godox
- ireturn
- execinquery
- exhaustruct
- nolintlint
- musttag # causes issues with imported libs
- depguard
- exportloopref
formatters:
enable:
- gci
- gofmt
- gofumpt
- goimports
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$
- gen
# We should strive to enable these:
- wrapcheck
- dupl
- makezero
- maintidx
# Limits the methods of an interface to 10. We have more in integration tests
- interfacebloat
# We might want to enable this, but it might be a lot of work
- cyclop
- nestif
- wsl # might be incompatible with gofumpt
- testpackage
- paralleltest
linters-settings:
varnamelen:
ignore-type-assert-ok: true
ignore-map-index-ok: true
ignore-names:
- err
- db
- id
- ip
- ok
- c
- tt
- tx
- rx
gocritic:
disabled-checks:
- appendAssign
# TODO(kradalby): Remove this
- ifElseChain
nlreturn:
block-size: 4

View File

@@ -2,12 +2,11 @@
version: 2
before:
hooks:
- go mod tidy -compat=1.24
- go mod tidy -compat=1.22
- go mod vendor
release:
prerelease: auto
draft: true
builds:
- id: headscale
@@ -28,17 +27,14 @@ builds:
flags:
- -mod=readonly
ldflags:
- -s -w
- -X github.com/juanfont/headscale/hscontrol/types.Version={{ .Version }}
- -X github.com/juanfont/headscale/hscontrol/types.GitCommitHash={{ .Commit }}
- -s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=v{{.Version}}
tags:
- ts2019
archives:
- id: golang-cross
name_template: '{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ with .Arm }}v{{ . }}{{ end }}{{ with .Mips }}_{{ . }}{{ end }}{{ if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }}'
formats:
- binary
format: binary
source:
enabled: true
@@ -57,22 +53,15 @@ nfpms:
# List file contents: dpkg -c dist/headscale...deb
# Package metadata: dpkg --info dist/headscale....deb
#
- ids:
- builds:
- headscale
package_name: headscale
priority: optional
vendor: headscale
maintainer: Kristoffer Dalby <kristoffer@dalby.cc>
homepage: https://github.com/juanfont/headscale
description: |-
Open source implementation of the Tailscale control server.
Headscale aims to implement a self-hosted, open source alternative to the
Tailscale control server. Headscale's goal is to provide self-hosters and
hobbyists with an open-source server they can use for their projects and
labs. It implements a narrow scope, a single Tailscale network (tailnet),
suitable for a personal use, or a small open-source organisation.
license: BSD
bindir: /usr/bin
section: net
formats:
- deb
contents:
@@ -81,21 +70,15 @@ nfpms:
type: config|noreplace
file_info:
mode: 0644
- src: ./packaging/systemd/headscale.service
- src: ./docs/packaging/headscale.systemd.service
dst: /usr/lib/systemd/system/headscale.service
- dst: /var/lib/headscale
type: dir
- src: LICENSE
dst: /usr/share/doc/headscale/copyright
- dst: /var/run/headscale
type: dir
scripts:
postinstall: ./packaging/deb/postinst
postremove: ./packaging/deb/postrm
preremove: ./packaging/deb/prerm
deb:
lintian_overrides:
- no-changelog # Our CHANGELOG.md uses a different formatting
- no-manual-page
- statically-linked-binary
postinstall: ./docs/packaging/postinstall.sh
postremove: ./docs/packaging/postremove.sh
kos:
- id: ghcr

View File

@@ -2,189 +2,8 @@
## Next
### BREAKING
- Policy: Zero or empty destination port is no longer allowed
[#2606](https://github.com/juanfont/headscale/pull/2606)
### Changes
- Remove policy v1 code [#2600](https://github.com/juanfont/headscale/pull/2600)
- Refactor Debian/Ubuntu packaging and drop support for Ubuntu 20.04.
[#2614](https://github.com/juanfont/headscale/pull/2614)
- Support client verify for DERP
[#2046](https://github.com/juanfont/headscale/pull/2046)
## 0.26.1 (2025-06-06)
### Changes
- Ensure nodes are matching both node key and machine key when connecting.
[#2642](https://github.com/juanfont/headscale/pull/2642)
## 0.26.0 (2025-05-14)
### BREAKING
#### Routes
Route internals have been rewritten, removing the dedicated route table in the
database. This was done to simplify the codebase, which had grown unnecessarily
complex after the routes were split into separate tables. The overhead of having
to go via the database and keeping the state in sync made the code very hard to
reason about and prone to errors. The majority of the route state is only
relevant when headscale is running, and is now only kept in memory. As part of
this, the CLI and API has been simplified to reflect the changes;
```console
$ headscale nodes list-routes
ID | Hostname | Approved | Available | Serving (Primary)
1 | ts-head-ruqsg8 | | 0.0.0.0/0, ::/0 |
2 | ts-unstable-fq7ob4 | | 0.0.0.0/0, ::/0 |
$ headscale nodes approve-routes --identifier 1 --routes 0.0.0.0/0,::/0
Node updated
$ headscale nodes list-routes
ID | Hostname | Approved | Available | Serving (Primary)
1 | ts-head-ruqsg8 | 0.0.0.0/0, ::/0 | 0.0.0.0/0, ::/0 | 0.0.0.0/0, ::/0
2 | ts-unstable-fq7ob4 | | 0.0.0.0/0, ::/0 |
```
Note that if an exit route is approved (0.0.0.0/0 or ::/0), both IPv4 and IPv6
will be approved.
- Route API and CLI has been removed
[#2422](https://github.com/juanfont/headscale/pull/2422)
- Routes are now managed via the Node API
[#2422](https://github.com/juanfont/headscale/pull/2422)
- Only routes accessible to the node will be sent to the node
[#2561](https://github.com/juanfont/headscale/pull/2561)
#### Policy v2
This release introduces a new policy implementation. The new policy is a
complete rewrite, and it introduces some significant quality and consistency
improvements. In principle, there are not really any new features, but some long
standing bugs should have been resolved, or be easier to fix in the future. The
new policy code passes all of our tests.
**Changes**
- The policy is validated and "resolved" when loading, providing errors for
invalid rules and conditions.
- Previously this was done as a mix between load and runtime (when it was
applied to a node).
- This means that when you convert the first time, what was previously a
policy that loaded, but failed at runtime, will now fail at load time.
- Error messages should be more descriptive and informative.
- There is still work to be here, but it is already improved with "typing"
(e.g. only Users can be put in Groups)
- All users must contain an `@` character.
- If your user naturally contains and `@`, like an email, this will just work.
- If its based on usernames, or other identifiers not containing an `@`, an
`@` should be appended at the end. For example, if your user is `john`, it
must be written as `john@` in the policy.
<details>
<summary>Migration notes when the policy is stored in the database.</summary>
This section **only** applies if the policy is stored in the database and
Headscale 0.26 doesn't start due to a policy error
(`failed to load ACL policy`).
- Start Headscale 0.26 with the environment variable `HEADSCALE_POLICY_V1=1`
set. You can check that Headscale picked up the environment variable by
observing this message during startup: `Using policy manager version: 1`
- Dump the policy to a file: `headscale policy get > policy.json`
- Edit `policy.json` and migrate to policy V2. Use the command
`headscale policy check --file policy.json` to check for policy errors.
- Load the modified policy: `headscale policy set --file policy.json`
- Restart Headscale **without** the environment variable `HEADSCALE_POLICY_V1`.
Headscale should now print the message `Using policy manager version: 2` and
startup successfully.
</details>
**SSH**
The SSH policy has been reworked to be more consistent with the rest of the
policy. In addition, several inconsistencies between our implementation and
Tailscale's upstream has been closed and this might be a breaking change for
some users. Please refer to the
[upstream documentation](https://tailscale.com/kb/1337/acl-syntax#tailscale-ssh)
for more information on which types are allowed in `src`, `dst` and `users`.
There is one large inconsistency left, we allow `*` as a destination as we
currently do not support `autogroup:self`, `autogroup:member` and
`autogroup:tagged`. The support for `*` will be removed when we have support for
the autogroups.
**Current state**
The new policy is passing all tests, both integration and unit tests. This does
not mean it is perfect, but it is a good start. Corner cases that is currently
working in v1 and not tested might be broken in v2 (and vice versa).
**We do need help testing this code**
#### Other breaking changes
- Disallow `server_url` and `base_domain` to be equal
[#2544](https://github.com/juanfont/headscale/pull/2544)
- Return full user in API for pre auth keys instead of string
[#2542](https://github.com/juanfont/headscale/pull/2542)
- Pre auth key API/CLI now uses ID over username
[#2542](https://github.com/juanfont/headscale/pull/2542)
- A non-empty list of global nameservers needs to be specified via
`dns.nameservers.global` if the configuration option `dns.override_local_dns`
is enabled or is not specified in the configuration file. This aligns with
behaviour of tailscale.com.
[#2438](https://github.com/juanfont/headscale/pull/2438)
### Changes
- Use Go 1.24 [#2427](https://github.com/juanfont/headscale/pull/2427)
- Add `headscale policy check` command to check policy
[#2553](https://github.com/juanfont/headscale/pull/2553)
- `oidc.map_legacy_users` and `oidc.strip_email_domain` has been removed
[#2411](https://github.com/juanfont/headscale/pull/2411)
- Add more information to `/debug` endpoint
[#2420](https://github.com/juanfont/headscale/pull/2420)
- It is now possible to inspect running goroutines and take profiles
- View of config, policy, filter, ssh policy per node, connected nodes and
DERPmap
- OIDC: Fetch UserInfo to get EmailVerified if necessary
[#2493](https://github.com/juanfont/headscale/pull/2493)
- If a OIDC provider doesn't include the `email_verified` claim in its ID
tokens, Headscale will attempt to get it from the UserInfo endpoint.
- OIDC: Try to populate name, email and username from UserInfo
[#2545](https://github.com/juanfont/headscale/pull/2545)
- Improve performance by only querying relevant nodes from the database for node
updates [#2509](https://github.com/juanfont/headscale/pull/2509)
- node FQDNs in the netmap will now contain a dot (".") at the end. This aligns
with behaviour of tailscale.com
[#2503](https://github.com/juanfont/headscale/pull/2503)
- Restore support for "Override local DNS"
[#2438](https://github.com/juanfont/headscale/pull/2438)
- Add documentation for routes
[#2496](https://github.com/juanfont/headscale/pull/2496)
- Add support for `autogroup:member`, `autogroup:tagged`
[#2572](https://github.com/juanfont/headscale/pull/2572)
## 0.25.1 (2025-02-25)
### Changes
- Fix issue where registration errors are sent correctly
[#2435](https://github.com/juanfont/headscale/pull/2435)
- Fix issue where routes passed on registration were not saved
[#2444](https://github.com/juanfont/headscale/pull/2444)
- Fix issue where registration page was displayed twice
[#2445](https://github.com/juanfont/headscale/pull/2445)
## 0.25.0 (2025-02-11)
## 0.25.0 (2025-02-xx)
### BREAKING
@@ -198,7 +17,7 @@ working in v1 and not tested might be broken in v2 (and vice versa).
- A logged out node logging in with the same user will replace the existing
node.
- Remove support for Tailscale clients older than 1.62 (Capability version 87)
[#2405](https://github.com/juanfont/headscale/pull/2405)
[#2405](https://github.com/juanfont/headscale/pull/2405)
### Changes
@@ -218,13 +37,10 @@ working in v1 and not tested might be broken in v2 (and vice versa).
[#2396](https://github.com/juanfont/headscale/pull/2396)
- Rehaul HTTP errors, return better status code and errors to users
[#2398](https://github.com/juanfont/headscale/pull/2398)
- Print headscale version and commit on server startup
[#2415](https://github.com/juanfont/headscale/pull/2415)
## 0.24.3 (2025-02-07)
### Changes
- Fix migration error caused by nodes having invalid auth keys
[#2412](https://github.com/juanfont/headscale/pull/2412)
- Pre auth keys belonging to a user are no longer deleted with the user

View File

@@ -2,7 +2,7 @@
# and are in no way endorsed by Headscale's maintainers as an
# official nor supported release or distribution.
FROM docker.io/golang:1.24-bookworm
FROM docker.io/golang:1.23-bookworm
ARG VERSION=dev
ENV GOPATH /go
WORKDIR /go/src/headscale
@@ -18,7 +18,7 @@ RUN go mod download
COPY . .
RUN CGO_ENABLED=0 GOOS=linux go install -a ./cmd/headscale && test -e /go/bin/headscale
RUN CGO_ENABLED=0 GOOS=linux go install -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$VERSION" -a ./cmd/headscale && test -e /go/bin/headscale
# Need to reset the entrypoint or everything will run as a busybox script
ENTRYPOINT []

View File

@@ -4,7 +4,7 @@
# This Dockerfile is more or less lifted from tailscale/tailscale
# to ensure a similar build process when testing the HEAD of tailscale.
FROM golang:1.24-alpine AS build-env
FROM golang:1.23-alpine AS build-env
WORKDIR /go/src

158
Makefile
View File

@@ -1,130 +1,64 @@
# Headscale Makefile
# Modern Makefile following best practices
# Calculate version
version ?= $(shell git describe --always --tags --dirty)
# Version calculation
VERSION ?= $(shell git describe --always --tags --dirty)
rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d))
# Build configuration
# Determine if OS supports pie
GOOS ?= $(shell uname | tr '[:upper:]' '[:lower:]')
ifeq ($(filter $(GOOS), openbsd netbsd solaris plan9), )
PIE_FLAGS = -buildmode=pie
ifeq ($(filter $(GOOS), openbsd netbsd soloaris plan9), )
pieflags = -buildmode=pie
else
endif
# Tool availability check with nix warning
define check_tool
@command -v $(1) >/dev/null 2>&1 || { \
echo "Warning: $(1) not found. Run 'nix develop' to ensure all dependencies are available."; \
exit 1; \
}
endef
# Source file collections using shell find for better performance
GO_SOURCES := $(shell find . -name '*.go' -not -path './gen/*' -not -path './vendor/*')
PROTO_SOURCES := $(shell find . -name '*.proto' -not -path './gen/*' -not -path './vendor/*')
DOC_SOURCES := $(shell find . \( -name '*.md' -o -name '*.yaml' -o -name '*.yml' -o -name '*.ts' -o -name '*.js' -o -name '*.html' -o -name '*.css' -o -name '*.scss' -o -name '*.sass' \) -not -path './gen/*' -not -path './vendor/*' -not -path './node_modules/*')
# Default target
.PHONY: all
all: lint test build
# Dependency checking
.PHONY: check-deps
check-deps:
$(call check_tool,go)
$(call check_tool,golangci-lint)
$(call check_tool,gofumpt)
$(call check_tool,prettier)
$(call check_tool,clang-format)
$(call check_tool,buf)
# Build targets
.PHONY: build
build: check-deps $(GO_SOURCES) go.mod go.sum
@echo "Building headscale..."
go build $(PIE_FLAGS) -ldflags "-X main.version=$(VERSION)" -o headscale ./cmd/headscale
# Test targets
.PHONY: test
test: check-deps $(GO_SOURCES) go.mod go.sum
@echo "Running Go tests..."
go test -race ./...
# GO_SOURCES = $(wildcard *.go)
# PROTO_SOURCES = $(wildcard **/*.proto)
GO_SOURCES = $(call rwildcard,,*.go)
PROTO_SOURCES = $(call rwildcard,,*.proto)
# Formatting targets
.PHONY: fmt
build:
nix build
dev: lint test build
test:
gotestsum -- -short -race -coverprofile=coverage.out ./...
test_integration:
docker run \
-t --rm \
-v ~/.cache/hs-integration-go:/go \
--name headscale-test-suite \
-v $$PWD:$$PWD -w $$PWD/integration \
-v /var/run/docker.sock:/var/run/docker.sock \
-v $$PWD/control_logs:/tmp/control \
golang:1 \
go run gotest.tools/gotestsum@latest -- -race -failfast ./... -timeout 120m -parallel 8
lint:
golangci-lint run --fix --timeout 10m
fmt: fmt-go fmt-prettier fmt-proto
.PHONY: fmt-go
fmt-go: check-deps $(GO_SOURCES)
@echo "Formatting Go code..."
fmt-prettier:
prettier --write '**/**.{ts,js,md,yaml,yml,sass,css,scss,html}'
prettier --write --print-width 80 --prose-wrap always CHANGELOG.md
fmt-go:
# TODO(kradalby): Reeval if we want to use 88 in the future.
# golines --max-len=88 --base-formatter=gofumpt -w $(GO_SOURCES)
gofumpt -l -w .
golangci-lint run --fix
.PHONY: fmt-prettier
fmt-prettier: check-deps $(DOC_SOURCES)
@echo "Formatting documentation and config files..."
prettier --write '**/*.{ts,js,md,yaml,yml,sass,css,scss,html}'
prettier --write --print-width 80 --prose-wrap always CHANGELOG.md
.PHONY: fmt-proto
fmt-proto: check-deps $(PROTO_SOURCES)
@echo "Formatting Protocol Buffer files..."
fmt-proto:
clang-format -i $(PROTO_SOURCES)
# Linting targets
.PHONY: lint
lint: lint-go lint-proto
proto-lint:
cd proto/ && go run github.com/bufbuild/buf/cmd/buf lint
.PHONY: lint-go
lint-go: check-deps $(GO_SOURCES) go.mod go.sum
@echo "Linting Go code..."
golangci-lint run --timeout 10m
compress: build
upx --brute headscale
.PHONY: lint-proto
lint-proto: check-deps $(PROTO_SOURCES)
@echo "Linting Protocol Buffer files..."
cd proto/ && buf lint
# Code generation
.PHONY: generate
generate: check-deps $(PROTO_SOURCES)
@echo "Generating code from Protocol Buffers..."
generate:
rm -rf gen
buf generate proto
# Clean targets
.PHONY: clean
clean:
rm -rf headscale gen
# Development workflow
.PHONY: dev
dev: fmt lint test build
# Help target
.PHONY: help
help:
@echo "Headscale Development Makefile"
@echo ""
@echo "Main targets:"
@echo " all - Run lint, test, and build (default)"
@echo " build - Build headscale binary"
@echo " test - Run Go tests"
@echo " fmt - Format all code (Go, docs, proto)"
@echo " lint - Lint all code (Go, proto)"
@echo " generate - Generate code from Protocol Buffers"
@echo " dev - Full development workflow (fmt + lint + test + build)"
@echo " clean - Clean build artifacts"
@echo ""
@echo "Specific targets:"
@echo " fmt-go - Format Go code only"
@echo " fmt-prettier - Format documentation only"
@echo " fmt-proto - Format Protocol Buffer files only"
@echo " lint-go - Lint Go code only"
@echo " lint-proto - Lint Protocol Buffer files only"
@echo ""
@echo "Dependencies:"
@echo " check-deps - Verify required tools are available"
@echo ""
@echo "Note: If not running in a nix shell, ensure dependencies are available:"
@echo " nix develop"

View File

@@ -7,12 +7,8 @@ An open source, self-hosted implementation of the Tailscale control server.
Join our [Discord server](https://discord.gg/c84AZQhmpx) for a chat.
**Note:** Always select the same GitHub tag as the released version you use
to ensure you have the correct example configuration. The `main` branch might
contain unreleased changes. The documentation is available for stable and
development versions:
- [Documentation for the stable version](https://headscale.net/stable/)
- [Documentation for the development version](https://headscale.net/development/)
to ensure you have the correct example configuration and documentation.
The `main` branch might contain unreleased changes.
## What is Tailscale
@@ -139,28 +135,15 @@ make test
To build the program:
```shell
make build
nix build
```
### Development workflow
or
We recommend using Nix for dependency management to ensure you have all required tools. If you prefer to manage dependencies yourself, you can use Make directly:
**With Nix (recommended):**
```shell
nix develop
make test
make build
```
**With your own dependencies:**
```shell
make test
make build
```
The Makefile will warn you if any required tools are missing and suggest running `nix develop`. Run `make help` to see all available targets.
## Contributors
<a href="https://github.com/juanfont/headscale/graphs/contributors">

View File

@@ -13,7 +13,6 @@ import (
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/pterm/pterm"
"github.com/samber/lo"
"github.com/spf13/cobra"
"google.golang.org/grpc/status"
"tailscale.com/types/key"
@@ -28,10 +27,8 @@ func init() {
listNodesNamespaceFlag := listNodesCmd.Flags().Lookup("namespace")
listNodesNamespaceFlag.Deprecated = deprecateNamespaceMessage
listNodesNamespaceFlag.Hidden = true
nodeCmd.AddCommand(listNodesCmd)
listNodeRoutesCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
nodeCmd.AddCommand(listNodeRoutesCmd)
nodeCmd.AddCommand(listNodesCmd)
registerNodeCmd.Flags().StringP("user", "u", "", "User")
@@ -79,7 +76,7 @@ func init() {
log.Fatal(err.Error())
}
moveNodeCmd.Flags().Uint64P("user", "u", 0, "New user")
moveNodeCmd.Flags().StringP("user", "u", "", "New user")
moveNodeCmd.Flags().StringP("namespace", "n", "", "User")
moveNodeNamespaceFlag := moveNodeCmd.Flags().Lookup("namespace")
@@ -93,14 +90,14 @@ func init() {
nodeCmd.AddCommand(moveNodeCmd)
tagCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
tagCmd.MarkFlagRequired("identifier")
tagCmd.Flags().StringSliceP("tags", "t", []string{}, "List of tags to add to the node")
nodeCmd.AddCommand(tagCmd)
approveRoutesCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
approveRoutesCmd.MarkFlagRequired("identifier")
approveRoutesCmd.Flags().StringSliceP("routes", "r", []string{}, `List of routes that will be approved (comma-separated, e.g. "10.0.0.0/8,192.168.0.0/24" or empty string to remove all approved routes)`)
nodeCmd.AddCommand(approveRoutesCmd)
err = tagCmd.MarkFlagRequired("identifier")
if err != nil {
log.Fatal(err.Error())
}
tagCmd.Flags().
StringSliceP("tags", "t", []string{}, "List of tags to add to the node")
nodeCmd.AddCommand(tagCmd)
nodeCmd.AddCommand(backfillNodeIPsCmd)
}
@@ -209,72 +206,6 @@ var listNodesCmd = &cobra.Command{
},
}
var listNodeRoutesCmd = &cobra.Command{
Use: "list-routes",
Short: "List routes available on nodes",
Aliases: []string{"lsr", "routes"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
identifier, err := cmd.Flags().GetUint64("identifier")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error converting ID to integer: %s", err),
output,
)
return
}
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
request := &v1.ListNodesRequest{}
response, err := client.ListNodes(ctx, request)
if err != nil {
ErrorOutput(
err,
"Cannot get nodes: "+status.Convert(err).Message(),
output,
)
}
if output != "" {
SuccessOutput(response.GetNodes(), "", output)
}
nodes := response.GetNodes()
if identifier != 0 {
for _, node := range response.GetNodes() {
if node.GetId() == identifier {
nodes = []*v1.Node{node}
break
}
}
}
nodes = lo.Filter(nodes, func(n *v1.Node, _ int) bool {
return (n.GetSubnetRoutes() != nil && len(n.GetSubnetRoutes()) > 0) || (n.GetApprovedRoutes() != nil && len(n.GetApprovedRoutes()) > 0) || (n.GetAvailableRoutes() != nil && len(n.GetAvailableRoutes()) > 0)
})
tableData, err := nodeRoutesToPtables(nodes)
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
}
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Failed to render pterm table: %s", err),
output,
)
}
},
}
var expireNodeCmd = &cobra.Command{
Use: "expire",
Short: "Expire (log out) a node in your network",
@@ -475,7 +406,7 @@ var moveNodeCmd = &cobra.Command{
return
}
user, err := cmd.Flags().GetUint64("user")
user, err := cmd.Flags().GetString("user")
if err != nil {
ErrorOutput(
err,
@@ -726,35 +657,6 @@ func nodesToPtables(
return tableData, nil
}
func nodeRoutesToPtables(
nodes []*v1.Node,
) (pterm.TableData, error) {
tableHeader := []string{
"ID",
"Hostname",
"Approved",
"Available",
"Serving (Primary)",
}
tableData := pterm.TableData{tableHeader}
for _, node := range nodes {
nodeData := []string{
strconv.FormatUint(node.GetId(), util.Base10),
node.GetGivenName(),
strings.Join(node.GetApprovedRoutes(), ", "),
strings.Join(node.GetAvailableRoutes(), ", "),
strings.Join(node.GetSubnetRoutes(), ", "),
}
tableData = append(
tableData,
nodeData,
)
}
return tableData, nil
}
var tagCmd = &cobra.Command{
Use: "tag",
Short: "Manage the tags of a node",
@@ -812,60 +714,3 @@ var tagCmd = &cobra.Command{
}
},
}
var approveRoutesCmd = &cobra.Command{
Use: "approve-routes",
Short: "Manage the approved routes of a node",
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
// retrieve flags from CLI
identifier, err := cmd.Flags().GetUint64("identifier")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error converting ID to integer: %s", err),
output,
)
return
}
routes, err := cmd.Flags().GetStringSlice("routes")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error retrieving list of routes to add to node, %v", err),
output,
)
return
}
// Sending routes to node
request := &v1.SetApprovedRoutesRequest{
NodeId: identifier,
Routes: routes,
}
resp, err := client.SetApprovedRoutes(ctx, request)
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error while sending routes to headscale: %s", err),
output,
)
return
}
if resp != nil {
SuccessOutput(
resp.GetNode(),
"Node updated",
output,
)
}
},
}

View File

@@ -6,7 +6,6 @@ import (
"os"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
)
@@ -20,12 +19,6 @@ func init() {
log.Fatal().Err(err).Msg("")
}
policyCmd.AddCommand(setPolicy)
checkPolicy.Flags().StringP("file", "f", "", "Path to a policy file in HuJSON format")
if err := checkPolicy.MarkFlagRequired("file"); err != nil {
log.Fatal().Err(err).Msg("")
}
policyCmd.AddCommand(checkPolicy)
}
var policyCmd = &cobra.Command{
@@ -92,30 +85,3 @@ var setPolicy = &cobra.Command{
SuccessOutput(nil, "Policy updated.", "")
},
}
var checkPolicy = &cobra.Command{
Use: "check",
Short: "Check the Policy file for errors",
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
policyPath, _ := cmd.Flags().GetString("file")
f, err := os.Open(policyPath)
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error opening the policy file: %s", err), output)
}
defer f.Close()
policyBytes, err := io.ReadAll(f)
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error reading the policy file: %s", err), output)
}
_, err = policy.NewPolicyManager(policyBytes, nil, nil)
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error parsing the policy file: %s", err), output)
}
SuccessOutput(nil, "Policy is valid", "")
},
}

View File

@@ -20,7 +20,7 @@ const (
func init() {
rootCmd.AddCommand(preauthkeysCmd)
preauthkeysCmd.PersistentFlags().Uint64P("user", "u", 0, "User identifier (ID)")
preauthkeysCmd.PersistentFlags().StringP("user", "u", "", "User")
preauthkeysCmd.PersistentFlags().StringP("namespace", "n", "", "User")
pakNamespaceFlag := preauthkeysCmd.PersistentFlags().Lookup("namespace")
@@ -57,7 +57,7 @@ var listPreAuthKeys = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
user, err := cmd.Flags().GetUint64("user")
user, err := cmd.Flags().GetString("user")
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
}
@@ -112,7 +112,7 @@ var listPreAuthKeys = &cobra.Command{
aclTags = strings.TrimLeft(aclTags, ",")
tableData = append(tableData, []string{
strconv.FormatUint(key.GetId(), 10),
key.GetId(),
key.GetKey(),
strconv.FormatBool(key.GetReusable()),
strconv.FormatBool(key.GetEphemeral()),
@@ -141,7 +141,7 @@ var createPreAuthKeyCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
user, err := cmd.Flags().GetUint64("user")
user, err := cmd.Flags().GetString("user")
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
}
@@ -206,7 +206,7 @@ var expirePreAuthKeyCmd = &cobra.Command{
},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
user, err := cmd.Flags().GetUint64("user")
user, err := cmd.Flags().GetString("user")
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error getting user: %s", err), output)
}

View File

@@ -4,7 +4,6 @@ import (
"fmt"
"os"
"runtime"
"slices"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/rs/zerolog"
@@ -26,11 +25,6 @@ func init() {
return
}
if slices.Contains(os.Args, "policy") && slices.Contains(os.Args, "check") {
zerolog.SetGlobalLevel(zerolog.Disabled)
return
}
cobra.OnInitialize(initConfig)
rootCmd.PersistentFlags().
StringVarP(&cfgFile, "config", "c", "", "config file (default is /etc/headscale/config.yaml)")
@@ -64,26 +58,26 @@ func initConfig() {
zerolog.SetGlobalLevel(zerolog.Disabled)
}
logFormat := viper.GetString("log.format")
if logFormat == types.JSONLogFormat {
log.Logger = log.Output(os.Stdout)
}
// logFormat := viper.GetString("log.format")
// if logFormat == types.JSONLogFormat {
// log.Logger = log.Output(os.Stdout)
// }
disableUpdateCheck := viper.GetBool("disable_check_updates")
if !disableUpdateCheck && !machineOutput {
if (runtime.GOOS == "linux" || runtime.GOOS == "darwin") &&
types.Version != "dev" {
Version != "dev" {
githubTag := &latest.GithubTag{
Owner: "juanfont",
Repository: "headscale",
}
res, err := latest.Check(githubTag, types.Version)
res, err := latest.Check(githubTag, Version)
if err == nil && res.Outdated {
//nolint
log.Warn().Msgf(
"An updated version of Headscale has been found (%s vs. your current %s). Check it out https://github.com/juanfont/headscale/releases\n",
res.Current,
types.Version,
Version,
)
}
}

271
cmd/headscale/cli/routes.go Normal file
View File

@@ -0,0 +1,271 @@
package cli
import (
"fmt"
"log"
"net/netip"
"strconv"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/pterm/pterm"
"github.com/spf13/cobra"
"google.golang.org/grpc/status"
"tailscale.com/net/tsaddr"
)
const (
Base10 = 10
)
func init() {
rootCmd.AddCommand(routesCmd)
listRoutesCmd.Flags().Uint64P("identifier", "i", 0, "Node identifier (ID)")
routesCmd.AddCommand(listRoutesCmd)
enableRouteCmd.Flags().Uint64P("route", "r", 0, "Route identifier (ID)")
err := enableRouteCmd.MarkFlagRequired("route")
if err != nil {
log.Fatal(err.Error())
}
routesCmd.AddCommand(enableRouteCmd)
disableRouteCmd.Flags().Uint64P("route", "r", 0, "Route identifier (ID)")
err = disableRouteCmd.MarkFlagRequired("route")
if err != nil {
log.Fatal(err.Error())
}
routesCmd.AddCommand(disableRouteCmd)
deleteRouteCmd.Flags().Uint64P("route", "r", 0, "Route identifier (ID)")
err = deleteRouteCmd.MarkFlagRequired("route")
if err != nil {
log.Fatal(err.Error())
}
routesCmd.AddCommand(deleteRouteCmd)
}
var routesCmd = &cobra.Command{
Use: "routes",
Short: "Manage the routes of Headscale",
Aliases: []string{"r", "route"},
}
var listRoutesCmd = &cobra.Command{
Use: "list",
Short: "List all routes",
Aliases: []string{"ls", "show"},
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
machineID, err := cmd.Flags().GetUint64("identifier")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error getting machine id from flag: %s", err),
output,
)
}
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
var routes []*v1.Route
if machineID == 0 {
response, err := client.GetRoutes(ctx, &v1.GetRoutesRequest{})
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Cannot get nodes: %s", status.Convert(err).Message()),
output,
)
}
if output != "" {
SuccessOutput(response.GetRoutes(), "", output)
}
routes = response.GetRoutes()
} else {
response, err := client.GetNodeRoutes(ctx, &v1.GetNodeRoutesRequest{
NodeId: machineID,
})
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Cannot get routes for node %d: %s", machineID, status.Convert(err).Message()),
output,
)
}
if output != "" {
SuccessOutput(response.GetRoutes(), "", output)
}
routes = response.GetRoutes()
}
tableData := routesToPtables(routes)
if err != nil {
ErrorOutput(err, fmt.Sprintf("Error converting to table: %s", err), output)
}
err = pterm.DefaultTable.WithHasHeader().WithData(tableData).Render()
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Failed to render pterm table: %s", err),
output,
)
}
},
}
var enableRouteCmd = &cobra.Command{
Use: "enable",
Short: "Set a route as enabled",
Long: `This command will make as enabled a given route.`,
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
routeID, err := cmd.Flags().GetUint64("route")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error getting machine id from flag: %s", err),
output,
)
}
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
response, err := client.EnableRoute(ctx, &v1.EnableRouteRequest{
RouteId: routeID,
})
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Cannot enable route %d: %s", routeID, status.Convert(err).Message()),
output,
)
}
if output != "" {
SuccessOutput(response, "", output)
}
},
}
var disableRouteCmd = &cobra.Command{
Use: "disable",
Short: "Set as disabled a given route",
Long: `This command will make as disabled a given route.`,
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
routeID, err := cmd.Flags().GetUint64("route")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error getting machine id from flag: %s", err),
output,
)
}
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
response, err := client.DisableRoute(ctx, &v1.DisableRouteRequest{
RouteId: routeID,
})
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Cannot disable route %d: %s", routeID, status.Convert(err).Message()),
output,
)
}
if output != "" {
SuccessOutput(response, "", output)
}
},
}
var deleteRouteCmd = &cobra.Command{
Use: "delete",
Short: "Delete a given route",
Long: `This command will delete a given route.`,
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
routeID, err := cmd.Flags().GetUint64("route")
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Error getting machine id from flag: %s", err),
output,
)
}
ctx, client, conn, cancel := newHeadscaleCLIWithConfig()
defer cancel()
defer conn.Close()
response, err := client.DeleteRoute(ctx, &v1.DeleteRouteRequest{
RouteId: routeID,
})
if err != nil {
ErrorOutput(
err,
fmt.Sprintf("Cannot delete route %d: %s", routeID, status.Convert(err).Message()),
output,
)
}
if output != "" {
SuccessOutput(response, "", output)
}
},
}
// routesToPtables converts the list of routes to a nice table.
func routesToPtables(routes []*v1.Route) pterm.TableData {
tableData := pterm.TableData{{"ID", "Node", "Prefix", "Advertised", "Enabled", "Primary"}}
for _, route := range routes {
var isPrimaryStr string
prefix, err := netip.ParsePrefix(route.GetPrefix())
if err != nil {
log.Printf("Error parsing prefix %s: %s", route.GetPrefix(), err)
continue
}
if tsaddr.IsExitRoute(prefix) {
isPrimaryStr = "-"
} else {
isPrimaryStr = strconv.FormatBool(route.GetIsPrimary())
}
var nodeName string
if route.GetNode() != nil {
nodeName = route.GetNode().GetGivenName()
}
tableData = append(tableData,
[]string{
strconv.FormatUint(route.GetId(), Base10),
nodeName,
route.GetPrefix(),
strconv.FormatBool(route.GetAdvertised()),
strconv.FormatBool(route.GetEnabled()),
isPrimaryStr,
})
}
return tableData
}

View File

@@ -27,14 +27,14 @@ func newHeadscaleServerWithConfig() (*hscontrol.Headscale, error) {
cfg, err := types.LoadServerConfig()
if err != nil {
return nil, fmt.Errorf(
"loading configuration: %w",
"failed to load configuration while creating headscale instance: %w",
err,
)
}
app, err := hscontrol.NewHeadscale(cfg)
if err != nil {
return nil, fmt.Errorf("creating new headscale: %w", err)
return nil, err
}
return app, nil

View File

@@ -1,10 +1,11 @@
package cli
import (
"github.com/juanfont/headscale/hscontrol/types"
"github.com/spf13/cobra"
)
var Version = "dev"
func init() {
rootCmd.AddCommand(versionCmd)
}
@@ -15,9 +16,6 @@ var versionCmd = &cobra.Command{
Long: "The version of headscale.",
Run: func(cmd *cobra.Command, args []string) {
output, _ := cmd.Flags().GetString("output")
SuccessOutput(map[string]string{
"version": types.Version,
"commit": types.GitCommitHash,
}, types.Version, output)
SuccessOutput(map[string]string{"version": Version}, Version, output)
},
}

View File

@@ -1,207 +0,0 @@
package main
import (
"context"
"fmt"
"strings"
"time"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/client"
"github.com/docker/docker/errdefs"
)
// cleanupBeforeTest performs cleanup operations before running tests.
func cleanupBeforeTest(ctx context.Context) error {
if err := killTestContainers(ctx); err != nil {
return fmt.Errorf("failed to kill test containers: %w", err)
}
if err := pruneDockerNetworks(ctx); err != nil {
return fmt.Errorf("failed to prune networks: %w", err)
}
return nil
}
// cleanupAfterTest removes the test container after completion.
func cleanupAfterTest(ctx context.Context, cli *client.Client, containerID string) error {
return cli.ContainerRemove(ctx, containerID, container.RemoveOptions{
Force: true,
})
}
// killTestContainers terminates and removes all test containers.
func killTestContainers(ctx context.Context) error {
cli, err := createDockerClient()
if err != nil {
return fmt.Errorf("failed to create Docker client: %w", err)
}
defer cli.Close()
containers, err := cli.ContainerList(ctx, container.ListOptions{
All: true,
})
if err != nil {
return fmt.Errorf("failed to list containers: %w", err)
}
removed := 0
for _, cont := range containers {
shouldRemove := false
for _, name := range cont.Names {
if strings.Contains(name, "headscale-test-suite") ||
strings.Contains(name, "hs-") ||
strings.Contains(name, "ts-") ||
strings.Contains(name, "derp-") {
shouldRemove = true
break
}
}
if shouldRemove {
// First kill the container if it's running
if cont.State == "running" {
_ = cli.ContainerKill(ctx, cont.ID, "KILL")
}
// Then remove the container with retry logic
if removeContainerWithRetry(ctx, cli, cont.ID) {
removed++
}
}
}
if removed > 0 {
fmt.Printf("Removed %d test containers\n", removed)
} else {
fmt.Println("No test containers found to remove")
}
return nil
}
// removeContainerWithRetry attempts to remove a container with exponential backoff retry logic.
func removeContainerWithRetry(ctx context.Context, cli *client.Client, containerID string) bool {
maxRetries := 3
baseDelay := 100 * time.Millisecond
for attempt := 0; attempt < maxRetries; attempt++ {
err := cli.ContainerRemove(ctx, containerID, container.RemoveOptions{
Force: true,
})
if err == nil {
return true
}
// If this is the last attempt, don't wait
if attempt == maxRetries-1 {
break
}
// Wait with exponential backoff
delay := baseDelay * time.Duration(1<<attempt)
time.Sleep(delay)
}
return false
}
// pruneDockerNetworks removes unused Docker networks.
func pruneDockerNetworks(ctx context.Context) error {
cli, err := createDockerClient()
if err != nil {
return fmt.Errorf("failed to create Docker client: %w", err)
}
defer cli.Close()
report, err := cli.NetworksPrune(ctx, filters.Args{})
if err != nil {
return fmt.Errorf("failed to prune networks: %w", err)
}
if len(report.NetworksDeleted) > 0 {
fmt.Printf("Removed %d unused networks\n", len(report.NetworksDeleted))
} else {
fmt.Println("No unused networks found to remove")
}
return nil
}
// cleanOldImages removes test-related and old dangling Docker images.
func cleanOldImages(ctx context.Context) error {
cli, err := createDockerClient()
if err != nil {
return fmt.Errorf("failed to create Docker client: %w", err)
}
defer cli.Close()
images, err := cli.ImageList(ctx, image.ListOptions{
All: true,
})
if err != nil {
return fmt.Errorf("failed to list images: %w", err)
}
removed := 0
for _, img := range images {
shouldRemove := false
for _, tag := range img.RepoTags {
if strings.Contains(tag, "hs-") ||
strings.Contains(tag, "headscale-integration") ||
strings.Contains(tag, "tailscale") {
shouldRemove = true
break
}
}
if len(img.RepoTags) == 0 && time.Unix(img.Created, 0).Before(time.Now().Add(-7*24*time.Hour)) {
shouldRemove = true
}
if shouldRemove {
_, err := cli.ImageRemove(ctx, img.ID, image.RemoveOptions{
Force: true,
})
if err == nil {
removed++
}
}
}
if removed > 0 {
fmt.Printf("Removed %d test images\n", removed)
} else {
fmt.Println("No test images found to remove")
}
return nil
}
// cleanCacheVolume removes the Docker volume used for Go module cache.
func cleanCacheVolume(ctx context.Context) error {
cli, err := createDockerClient()
if err != nil {
return fmt.Errorf("failed to create Docker client: %w", err)
}
defer cli.Close()
volumeName := "hs-integration-go-cache"
err = cli.VolumeRemove(ctx, volumeName, true)
if err != nil {
if errdefs.IsNotFound(err) {
fmt.Printf("Go module cache volume not found: %s\n", volumeName)
} else if errdefs.IsConflict(err) {
fmt.Printf("Go module cache volume is in use and cannot be removed: %s\n", volumeName)
} else {
fmt.Printf("Failed to remove Go module cache volume %s: %v\n", volumeName, err)
}
} else {
fmt.Printf("Removed Go module cache volume: %s\n", volumeName)
}
return nil
}

View File

@@ -1,694 +0,0 @@
package main
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/stdcopy"
"github.com/juanfont/headscale/integration/dockertestutil"
)
var (
ErrTestFailed = errors.New("test failed")
ErrUnexpectedContainerWait = errors.New("unexpected end of container wait")
ErrNoDockerContext = errors.New("no docker context found")
)
// runTestContainer executes integration tests in a Docker container.
func runTestContainer(ctx context.Context, config *RunConfig) error {
cli, err := createDockerClient()
if err != nil {
return fmt.Errorf("failed to create Docker client: %w", err)
}
defer cli.Close()
runID := dockertestutil.GenerateRunID()
containerName := "headscale-test-suite-" + runID
logsDir := filepath.Join(config.LogsDir, runID)
if config.Verbose {
log.Printf("Run ID: %s", runID)
log.Printf("Container name: %s", containerName)
log.Printf("Logs directory: %s", logsDir)
}
absLogsDir, err := filepath.Abs(logsDir)
if err != nil {
return fmt.Errorf("failed to get absolute path for logs directory: %w", err)
}
const dirPerm = 0o755
if err := os.MkdirAll(absLogsDir, dirPerm); err != nil {
return fmt.Errorf("failed to create logs directory: %w", err)
}
if config.CleanBefore {
if config.Verbose {
log.Printf("Running pre-test cleanup...")
}
if err := cleanupBeforeTest(ctx); err != nil && config.Verbose {
log.Printf("Warning: pre-test cleanup failed: %v", err)
}
}
goTestCmd := buildGoTestCommand(config)
if config.Verbose {
log.Printf("Command: %s", strings.Join(goTestCmd, " "))
}
imageName := "golang:" + config.GoVersion
if err := ensureImageAvailable(ctx, cli, imageName, config.Verbose); err != nil {
return fmt.Errorf("failed to ensure image availability: %w", err)
}
resp, err := createGoTestContainer(ctx, cli, config, containerName, absLogsDir, goTestCmd)
if err != nil {
return fmt.Errorf("failed to create container: %w", err)
}
if config.Verbose {
log.Printf("Created container: %s", resp.ID)
}
if err := cli.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil {
return fmt.Errorf("failed to start container: %w", err)
}
log.Printf("Starting test: %s", config.TestPattern)
exitCode, err := streamAndWait(ctx, cli, resp.ID)
// Ensure all containers have finished and logs are flushed before extracting artifacts
if waitErr := waitForContainerFinalization(ctx, cli, resp.ID, config.Verbose); waitErr != nil && config.Verbose {
log.Printf("Warning: failed to wait for container finalization: %v", waitErr)
}
// Extract artifacts from test containers before cleanup
if err := extractArtifactsFromContainers(ctx, resp.ID, logsDir, config.Verbose); err != nil && config.Verbose {
log.Printf("Warning: failed to extract artifacts from containers: %v", err)
}
// Always list control files regardless of test outcome
listControlFiles(logsDir)
shouldCleanup := config.CleanAfter && (!config.KeepOnFailure || exitCode == 0)
if shouldCleanup {
if config.Verbose {
log.Printf("Running post-test cleanup...")
}
if cleanErr := cleanupAfterTest(ctx, cli, resp.ID); cleanErr != nil && config.Verbose {
log.Printf("Warning: post-test cleanup failed: %v", cleanErr)
}
}
if err != nil {
return fmt.Errorf("test execution failed: %w", err)
}
if exitCode != 0 {
return fmt.Errorf("%w: exit code %d", ErrTestFailed, exitCode)
}
log.Printf("Test completed successfully!")
return nil
}
// buildGoTestCommand constructs the go test command arguments.
func buildGoTestCommand(config *RunConfig) []string {
cmd := []string{"go", "test", "./..."}
if config.TestPattern != "" {
cmd = append(cmd, "-run", config.TestPattern)
}
if config.FailFast {
cmd = append(cmd, "-failfast")
}
cmd = append(cmd, "-timeout", config.Timeout.String())
cmd = append(cmd, "-v")
return cmd
}
// createGoTestContainer creates a Docker container configured for running integration tests.
func createGoTestContainer(ctx context.Context, cli *client.Client, config *RunConfig, containerName, logsDir string, goTestCmd []string) (container.CreateResponse, error) {
pwd, err := os.Getwd()
if err != nil {
return container.CreateResponse{}, fmt.Errorf("failed to get working directory: %w", err)
}
projectRoot := findProjectRoot(pwd)
runID := dockertestutil.ExtractRunIDFromContainerName(containerName)
env := []string{
fmt.Sprintf("HEADSCALE_INTEGRATION_POSTGRES=%d", boolToInt(config.UsePostgres)),
fmt.Sprintf("HEADSCALE_INTEGRATION_RUN_ID=%s", runID),
}
containerConfig := &container.Config{
Image: "golang:" + config.GoVersion,
Cmd: goTestCmd,
Env: env,
WorkingDir: projectRoot + "/integration",
Tty: true,
Labels: map[string]string{
"hi.run-id": runID,
"hi.test-type": "test-runner",
},
}
// Get the correct Docker socket path from the current context
dockerSocketPath := getDockerSocketPath()
if config.Verbose {
log.Printf("Using Docker socket: %s", dockerSocketPath)
}
hostConfig := &container.HostConfig{
AutoRemove: false, // We'll remove manually for better control
Binds: []string{
fmt.Sprintf("%s:%s", projectRoot, projectRoot),
fmt.Sprintf("%s:/var/run/docker.sock", dockerSocketPath),
logsDir + ":/tmp/control",
},
Mounts: []mount.Mount{
{
Type: mount.TypeVolume,
Source: "hs-integration-go-cache",
Target: "/go",
},
},
}
return cli.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, containerName)
}
// streamAndWait streams container output and waits for completion.
func streamAndWait(ctx context.Context, cli *client.Client, containerID string) (int, error) {
out, err := cli.ContainerLogs(ctx, containerID, container.LogsOptions{
ShowStdout: true,
ShowStderr: true,
Follow: true,
})
if err != nil {
return -1, fmt.Errorf("failed to get container logs: %w", err)
}
defer out.Close()
go func() {
_, _ = io.Copy(os.Stdout, out)
}()
statusCh, errCh := cli.ContainerWait(ctx, containerID, container.WaitConditionNotRunning)
select {
case err := <-errCh:
if err != nil {
return -1, fmt.Errorf("error waiting for container: %w", err)
}
case status := <-statusCh:
return int(status.StatusCode), nil
}
return -1, ErrUnexpectedContainerWait
}
// waitForContainerFinalization ensures all test containers have properly finished and flushed their output.
func waitForContainerFinalization(ctx context.Context, cli *client.Client, testContainerID string, verbose bool) error {
// First, get all related test containers
containers, err := cli.ContainerList(ctx, container.ListOptions{All: true})
if err != nil {
return fmt.Errorf("failed to list containers: %w", err)
}
testContainers := getCurrentTestContainers(containers, testContainerID, verbose)
// Wait for all test containers to reach a final state
maxWaitTime := 10 * time.Second
checkInterval := 500 * time.Millisecond
timeout := time.After(maxWaitTime)
ticker := time.NewTicker(checkInterval)
defer ticker.Stop()
for {
select {
case <-timeout:
if verbose {
log.Printf("Timeout waiting for container finalization, proceeding with artifact extraction")
}
return nil
case <-ticker.C:
allFinalized := true
for _, testCont := range testContainers {
inspect, err := cli.ContainerInspect(ctx, testCont.ID)
if err != nil {
if verbose {
log.Printf("Warning: failed to inspect container %s: %v", testCont.name, err)
}
continue
}
// Check if container is in a final state
if !isContainerFinalized(inspect.State) {
allFinalized = false
if verbose {
log.Printf("Container %s still finalizing (state: %s)", testCont.name, inspect.State.Status)
}
break
}
}
if allFinalized {
if verbose {
log.Printf("All test containers finalized, ready for artifact extraction")
}
return nil
}
}
}
}
// isContainerFinalized checks if a container has reached a final state where logs are flushed.
func isContainerFinalized(state *container.State) bool {
// Container is finalized if it's not running and has a finish time
return !state.Running && state.FinishedAt != ""
}
// findProjectRoot locates the project root by finding the directory containing go.mod.
func findProjectRoot(startPath string) string {
current := startPath
for {
if _, err := os.Stat(filepath.Join(current, "go.mod")); err == nil {
return current
}
parent := filepath.Dir(current)
if parent == current {
return startPath
}
current = parent
}
}
// boolToInt converts a boolean to an integer for environment variables.
func boolToInt(b bool) int {
if b {
return 1
}
return 0
}
// DockerContext represents Docker context information.
type DockerContext struct {
Name string `json:"Name"`
Metadata map[string]interface{} `json:"Metadata"`
Endpoints map[string]interface{} `json:"Endpoints"`
Current bool `json:"Current"`
}
// createDockerClient creates a Docker client with context detection.
func createDockerClient() (*client.Client, error) {
contextInfo, err := getCurrentDockerContext()
if err != nil {
return client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
}
var clientOpts []client.Opt
clientOpts = append(clientOpts, client.WithAPIVersionNegotiation())
if contextInfo != nil {
if endpoints, ok := contextInfo.Endpoints["docker"]; ok {
if endpointMap, ok := endpoints.(map[string]interface{}); ok {
if host, ok := endpointMap["Host"].(string); ok {
if runConfig.Verbose {
log.Printf("Using Docker host from context '%s': %s", contextInfo.Name, host)
}
clientOpts = append(clientOpts, client.WithHost(host))
}
}
}
}
if len(clientOpts) == 1 {
clientOpts = append(clientOpts, client.FromEnv)
}
return client.NewClientWithOpts(clientOpts...)
}
// getCurrentDockerContext retrieves the current Docker context information.
func getCurrentDockerContext() (*DockerContext, error) {
cmd := exec.Command("docker", "context", "inspect")
output, err := cmd.Output()
if err != nil {
return nil, fmt.Errorf("failed to get docker context: %w", err)
}
var contexts []DockerContext
if err := json.Unmarshal(output, &contexts); err != nil {
return nil, fmt.Errorf("failed to parse docker context: %w", err)
}
if len(contexts) > 0 {
return &contexts[0], nil
}
return nil, ErrNoDockerContext
}
// getDockerSocketPath returns the correct Docker socket path for the current context.
func getDockerSocketPath() string {
// Always use the default socket path for mounting since Docker handles
// the translation to the actual socket (e.g., colima socket) internally
return "/var/run/docker.sock"
}
// ensureImageAvailable pulls the specified Docker image to ensure it's available.
func ensureImageAvailable(ctx context.Context, cli *client.Client, imageName string, verbose bool) error {
if verbose {
log.Printf("Pulling image %s...", imageName)
}
reader, err := cli.ImagePull(ctx, imageName, image.PullOptions{})
if err != nil {
return fmt.Errorf("failed to pull image %s: %w", imageName, err)
}
defer reader.Close()
if verbose {
_, err = io.Copy(os.Stdout, reader)
if err != nil {
return fmt.Errorf("failed to read pull output: %w", err)
}
} else {
_, err = io.Copy(io.Discard, reader)
if err != nil {
return fmt.Errorf("failed to read pull output: %w", err)
}
log.Printf("Image %s pulled successfully", imageName)
}
return nil
}
// listControlFiles displays the headscale test artifacts created in the control logs directory.
func listControlFiles(logsDir string) {
entries, err := os.ReadDir(logsDir)
if err != nil {
log.Printf("Logs directory: %s", logsDir)
return
}
var logFiles []string
var dataFiles []string
var dataDirs []string
for _, entry := range entries {
name := entry.Name()
// Only show headscale (hs-*) files and directories
if !strings.HasPrefix(name, "hs-") {
continue
}
if entry.IsDir() {
// Include directories (pprof, mapresponses)
if strings.Contains(name, "-pprof") || strings.Contains(name, "-mapresponses") {
dataDirs = append(dataDirs, name)
}
} else {
// Include files
switch {
case strings.HasSuffix(name, ".stderr.log") || strings.HasSuffix(name, ".stdout.log"):
logFiles = append(logFiles, name)
case strings.HasSuffix(name, ".db"):
dataFiles = append(dataFiles, name)
}
}
}
log.Printf("Test artifacts saved to: %s", logsDir)
if len(logFiles) > 0 {
log.Printf("Headscale logs:")
for _, file := range logFiles {
log.Printf(" %s", file)
}
}
if len(dataFiles) > 0 || len(dataDirs) > 0 {
log.Printf("Headscale data:")
for _, file := range dataFiles {
log.Printf(" %s", file)
}
for _, dir := range dataDirs {
log.Printf(" %s/", dir)
}
}
}
// extractArtifactsFromContainers collects container logs and files from the specific test run.
func extractArtifactsFromContainers(ctx context.Context, testContainerID, logsDir string, verbose bool) error {
cli, err := createDockerClient()
if err != nil {
return fmt.Errorf("failed to create Docker client: %w", err)
}
defer cli.Close()
// List all containers
containers, err := cli.ContainerList(ctx, container.ListOptions{All: true})
if err != nil {
return fmt.Errorf("failed to list containers: %w", err)
}
// Get containers from the specific test run
currentTestContainers := getCurrentTestContainers(containers, testContainerID, verbose)
extractedCount := 0
for _, cont := range currentTestContainers {
// Extract container logs and tar files
if err := extractContainerArtifacts(ctx, cli, cont.ID, cont.name, logsDir, verbose); err != nil {
if verbose {
log.Printf("Warning: failed to extract artifacts from container %s (%s): %v", cont.name, cont.ID[:12], err)
}
} else {
if verbose {
log.Printf("Extracted artifacts from container %s (%s)", cont.name, cont.ID[:12])
}
extractedCount++
}
}
if verbose && extractedCount > 0 {
log.Printf("Extracted artifacts from %d containers", extractedCount)
}
return nil
}
// testContainer represents a container from the current test run.
type testContainer struct {
ID string
name string
}
// getCurrentTestContainers filters containers to only include those from the current test run.
func getCurrentTestContainers(containers []container.Summary, testContainerID string, verbose bool) []testContainer {
var testRunContainers []testContainer
// Find the test container to get its run ID label
var runID string
for _, cont := range containers {
if cont.ID == testContainerID {
if cont.Labels != nil {
runID = cont.Labels["hi.run-id"]
}
break
}
}
if runID == "" {
log.Printf("Error: test container %s missing required hi.run-id label", testContainerID[:12])
return testRunContainers
}
if verbose {
log.Printf("Looking for containers with run ID: %s", runID)
}
// Find all containers with the same run ID
for _, cont := range containers {
for _, name := range cont.Names {
containerName := strings.TrimPrefix(name, "/")
if strings.HasPrefix(containerName, "hs-") || strings.HasPrefix(containerName, "ts-") {
// Check if container has matching run ID label
if cont.Labels != nil && cont.Labels["hi.run-id"] == runID {
testRunContainers = append(testRunContainers, testContainer{
ID: cont.ID,
name: containerName,
})
if verbose {
log.Printf("Including container %s (run ID: %s)", containerName, runID)
}
}
break
}
}
}
return testRunContainers
}
// extractContainerArtifacts saves logs and tar files from a container.
func extractContainerArtifacts(ctx context.Context, cli *client.Client, containerID, containerName, logsDir string, verbose bool) error {
// Ensure the logs directory exists
if err := os.MkdirAll(logsDir, 0755); err != nil {
return fmt.Errorf("failed to create logs directory: %w", err)
}
// Extract container logs
if err := extractContainerLogs(ctx, cli, containerID, containerName, logsDir, verbose); err != nil {
return fmt.Errorf("failed to extract logs: %w", err)
}
// Extract tar files for headscale containers only
if strings.HasPrefix(containerName, "hs-") {
if err := extractContainerFiles(ctx, cli, containerID, containerName, logsDir, verbose); err != nil {
if verbose {
log.Printf("Warning: failed to extract files from %s: %v", containerName, err)
}
// Don't fail the whole extraction if files are missing
}
}
return nil
}
// extractContainerLogs saves the stdout and stderr logs from a container to files.
func extractContainerLogs(ctx context.Context, cli *client.Client, containerID, containerName, logsDir string, verbose bool) error {
// Get container logs
logReader, err := cli.ContainerLogs(ctx, containerID, container.LogsOptions{
ShowStdout: true,
ShowStderr: true,
Timestamps: false,
Follow: false,
Tail: "all",
})
if err != nil {
return fmt.Errorf("failed to get container logs: %w", err)
}
defer logReader.Close()
// Create log files following the headscale naming convention
stdoutPath := filepath.Join(logsDir, containerName+".stdout.log")
stderrPath := filepath.Join(logsDir, containerName+".stderr.log")
// Create buffers to capture stdout and stderr separately
var stdoutBuf, stderrBuf bytes.Buffer
// Demultiplex the Docker logs stream to separate stdout and stderr
_, err = stdcopy.StdCopy(&stdoutBuf, &stderrBuf, logReader)
if err != nil {
return fmt.Errorf("failed to demultiplex container logs: %w", err)
}
// Write stdout logs
if err := os.WriteFile(stdoutPath, stdoutBuf.Bytes(), 0644); err != nil {
return fmt.Errorf("failed to write stdout log: %w", err)
}
// Write stderr logs
if err := os.WriteFile(stderrPath, stderrBuf.Bytes(), 0644); err != nil {
return fmt.Errorf("failed to write stderr log: %w", err)
}
if verbose {
log.Printf("Saved logs for %s: %s, %s", containerName, stdoutPath, stderrPath)
}
return nil
}
// extractContainerFiles extracts database file and directories from headscale containers.
// Note: The actual file extraction is now handled by the integration tests themselves
// via SaveProfile, SaveMapResponses, and SaveDatabase functions in hsic.go
func extractContainerFiles(ctx context.Context, cli *client.Client, containerID, containerName, logsDir string, verbose bool) error {
// Files are now extracted directly by the integration tests
// This function is kept for potential future use or other file types
return nil
}
// logExtractionError logs extraction errors with appropriate level based on error type.
func logExtractionError(artifactType, containerName string, err error, verbose bool) {
if errors.Is(err, ErrFileNotFoundInTar) {
// File not found is expected and only logged in verbose mode
if verbose {
log.Printf("No %s found in container %s", artifactType, containerName)
}
} else {
// Other errors are actual failures and should be logged as warnings
log.Printf("Warning: failed to extract %s from %s: %v", artifactType, containerName, err)
}
}
// extractSingleFile copies a single file from a container.
func extractSingleFile(ctx context.Context, cli *client.Client, containerID, sourcePath, fileName, logsDir string, verbose bool) error {
tarReader, _, err := cli.CopyFromContainer(ctx, containerID, sourcePath)
if err != nil {
return fmt.Errorf("failed to copy %s from container: %w", sourcePath, err)
}
defer tarReader.Close()
// Extract the single file from the tar
filePath := filepath.Join(logsDir, fileName)
if err := extractFileFromTar(tarReader, filepath.Base(sourcePath), filePath); err != nil {
return fmt.Errorf("failed to extract file from tar: %w", err)
}
if verbose {
log.Printf("Extracted %s from %s", fileName, containerID[:12])
}
return nil
}
// extractDirectory copies a directory from a container and extracts its contents.
func extractDirectory(ctx context.Context, cli *client.Client, containerID, sourcePath, dirName, logsDir string, verbose bool) error {
tarReader, _, err := cli.CopyFromContainer(ctx, containerID, sourcePath)
if err != nil {
return fmt.Errorf("failed to copy %s from container: %w", sourcePath, err)
}
defer tarReader.Close()
// Create target directory
targetDir := filepath.Join(logsDir, dirName)
if err := os.MkdirAll(targetDir, 0755); err != nil {
return fmt.Errorf("failed to create directory %s: %w", targetDir, err)
}
// Extract the directory from the tar
if err := extractDirectoryFromTar(tarReader, targetDir); err != nil {
return fmt.Errorf("failed to extract directory from tar: %w", err)
}
if verbose {
log.Printf("Extracted %s/ from %s", dirName, containerID[:12])
}
return nil
}

View File

@@ -1,351 +0,0 @@
package main
import (
"context"
"errors"
"fmt"
"log"
"os/exec"
"strings"
)
var ErrSystemChecksFailed = errors.New("system checks failed")
// DoctorResult represents the result of a single health check.
type DoctorResult struct {
Name string
Status string // "PASS", "FAIL", "WARN"
Message string
Suggestions []string
}
// runDoctorCheck performs comprehensive pre-flight checks for integration testing.
func runDoctorCheck(ctx context.Context) error {
results := []DoctorResult{}
// Check 1: Docker binary availability
results = append(results, checkDockerBinary())
// Check 2: Docker daemon connectivity
dockerResult := checkDockerDaemon(ctx)
results = append(results, dockerResult)
// If Docker is available, run additional checks
if dockerResult.Status == "PASS" {
results = append(results, checkDockerContext(ctx))
results = append(results, checkDockerSocket(ctx))
results = append(results, checkGolangImage(ctx))
}
// Check 3: Go installation
results = append(results, checkGoInstallation())
// Check 4: Git repository
results = append(results, checkGitRepository())
// Check 5: Required files
results = append(results, checkRequiredFiles())
// Display results
displayDoctorResults(results)
// Return error if any critical checks failed
for _, result := range results {
if result.Status == "FAIL" {
return fmt.Errorf("%w - see details above", ErrSystemChecksFailed)
}
}
log.Printf("✅ All system checks passed - ready to run integration tests!")
return nil
}
// checkDockerBinary verifies Docker binary is available.
func checkDockerBinary() DoctorResult {
_, err := exec.LookPath("docker")
if err != nil {
return DoctorResult{
Name: "Docker Binary",
Status: "FAIL",
Message: "Docker binary not found in PATH",
Suggestions: []string{
"Install Docker: https://docs.docker.com/get-docker/",
"For macOS: consider using colima or Docker Desktop",
"Ensure docker is in your PATH",
},
}
}
return DoctorResult{
Name: "Docker Binary",
Status: "PASS",
Message: "Docker binary found",
}
}
// checkDockerDaemon verifies Docker daemon is running and accessible.
func checkDockerDaemon(ctx context.Context) DoctorResult {
cli, err := createDockerClient()
if err != nil {
return DoctorResult{
Name: "Docker Daemon",
Status: "FAIL",
Message: fmt.Sprintf("Cannot create Docker client: %v", err),
Suggestions: []string{
"Start Docker daemon/service",
"Check Docker Desktop is running (if using Docker Desktop)",
"For colima: run 'colima start'",
"Verify DOCKER_HOST environment variable if set",
},
}
}
defer cli.Close()
_, err = cli.Ping(ctx)
if err != nil {
return DoctorResult{
Name: "Docker Daemon",
Status: "FAIL",
Message: fmt.Sprintf("Cannot ping Docker daemon: %v", err),
Suggestions: []string{
"Ensure Docker daemon is running",
"Check Docker socket permissions",
"Try: docker info",
},
}
}
return DoctorResult{
Name: "Docker Daemon",
Status: "PASS",
Message: "Docker daemon is running and accessible",
}
}
// checkDockerContext verifies Docker context configuration.
func checkDockerContext(_ context.Context) DoctorResult {
contextInfo, err := getCurrentDockerContext()
if err != nil {
return DoctorResult{
Name: "Docker Context",
Status: "WARN",
Message: "Could not detect Docker context, using default settings",
Suggestions: []string{
"Check: docker context ls",
"Consider setting up a specific context if needed",
},
}
}
if contextInfo == nil {
return DoctorResult{
Name: "Docker Context",
Status: "PASS",
Message: "Using default Docker context",
}
}
return DoctorResult{
Name: "Docker Context",
Status: "PASS",
Message: "Using Docker context: " + contextInfo.Name,
}
}
// checkDockerSocket verifies Docker socket accessibility.
func checkDockerSocket(ctx context.Context) DoctorResult {
cli, err := createDockerClient()
if err != nil {
return DoctorResult{
Name: "Docker Socket",
Status: "FAIL",
Message: fmt.Sprintf("Cannot access Docker socket: %v", err),
Suggestions: []string{
"Check Docker socket permissions",
"Add user to docker group: sudo usermod -aG docker $USER",
"For colima: ensure socket is accessible",
},
}
}
defer cli.Close()
info, err := cli.Info(ctx)
if err != nil {
return DoctorResult{
Name: "Docker Socket",
Status: "FAIL",
Message: fmt.Sprintf("Cannot get Docker info: %v", err),
Suggestions: []string{
"Check Docker daemon status",
"Verify socket permissions",
},
}
}
return DoctorResult{
Name: "Docker Socket",
Status: "PASS",
Message: fmt.Sprintf("Docker socket accessible (Server: %s)", info.ServerVersion),
}
}
// checkGolangImage verifies we can access the golang Docker image.
func checkGolangImage(ctx context.Context) DoctorResult {
cli, err := createDockerClient()
if err != nil {
return DoctorResult{
Name: "Golang Image",
Status: "FAIL",
Message: "Cannot create Docker client for image check",
}
}
defer cli.Close()
goVersion := detectGoVersion()
imageName := "golang:" + goVersion
// Check if we can pull the image
err = ensureImageAvailable(ctx, cli, imageName, false)
if err != nil {
return DoctorResult{
Name: "Golang Image",
Status: "FAIL",
Message: fmt.Sprintf("Cannot pull golang image %s: %v", imageName, err),
Suggestions: []string{
"Check internet connectivity",
"Verify Docker Hub access",
"Try: docker pull " + imageName,
},
}
}
return DoctorResult{
Name: "Golang Image",
Status: "PASS",
Message: fmt.Sprintf("Golang image %s is available", imageName),
}
}
// checkGoInstallation verifies Go is installed and working.
func checkGoInstallation() DoctorResult {
_, err := exec.LookPath("go")
if err != nil {
return DoctorResult{
Name: "Go Installation",
Status: "FAIL",
Message: "Go binary not found in PATH",
Suggestions: []string{
"Install Go: https://golang.org/dl/",
"Ensure go is in your PATH",
},
}
}
cmd := exec.Command("go", "version")
output, err := cmd.Output()
if err != nil {
return DoctorResult{
Name: "Go Installation",
Status: "FAIL",
Message: fmt.Sprintf("Cannot get Go version: %v", err),
}
}
version := strings.TrimSpace(string(output))
return DoctorResult{
Name: "Go Installation",
Status: "PASS",
Message: version,
}
}
// checkGitRepository verifies we're in a git repository.
func checkGitRepository() DoctorResult {
cmd := exec.Command("git", "rev-parse", "--git-dir")
err := cmd.Run()
if err != nil {
return DoctorResult{
Name: "Git Repository",
Status: "FAIL",
Message: "Not in a Git repository",
Suggestions: []string{
"Run from within the headscale git repository",
"Clone the repository: git clone https://github.com/juanfont/headscale.git",
},
}
}
return DoctorResult{
Name: "Git Repository",
Status: "PASS",
Message: "Running in Git repository",
}
}
// checkRequiredFiles verifies required files exist.
func checkRequiredFiles() DoctorResult {
requiredFiles := []string{
"go.mod",
"integration/",
"cmd/hi/",
}
var missingFiles []string
for _, file := range requiredFiles {
cmd := exec.Command("test", "-e", file)
if err := cmd.Run(); err != nil {
missingFiles = append(missingFiles, file)
}
}
if len(missingFiles) > 0 {
return DoctorResult{
Name: "Required Files",
Status: "FAIL",
Message: "Missing required files: " + strings.Join(missingFiles, ", "),
Suggestions: []string{
"Ensure you're in the headscale project root directory",
"Check that integration/ directory exists",
"Verify this is a complete headscale repository",
},
}
}
return DoctorResult{
Name: "Required Files",
Status: "PASS",
Message: "All required files found",
}
}
// displayDoctorResults shows the results in a formatted way.
func displayDoctorResults(results []DoctorResult) {
log.Printf("🔍 System Health Check Results")
log.Printf("================================")
for _, result := range results {
var icon string
switch result.Status {
case "PASS":
icon = "✅"
case "WARN":
icon = "⚠️"
case "FAIL":
icon = "❌"
default:
icon = "❓"
}
log.Printf("%s %s: %s", icon, result.Name, result.Message)
if len(result.Suggestions) > 0 {
for _, suggestion := range result.Suggestions {
log.Printf(" 💡 %s", suggestion)
}
}
}
log.Printf("================================")
}

View File

@@ -1,93 +0,0 @@
package main
import (
"context"
"os"
"github.com/creachadair/command"
"github.com/creachadair/flax"
)
var runConfig RunConfig
func main() {
root := command.C{
Name: "hi",
Help: "Headscale Integration test runner",
Commands: []*command.C{
{
Name: "run",
Help: "Run integration tests",
Usage: "run [test-pattern] [flags]",
SetFlags: command.Flags(flax.MustBind, &runConfig),
Run: runIntegrationTest,
},
{
Name: "doctor",
Help: "Check system requirements for running integration tests",
Run: func(env *command.Env) error {
return runDoctorCheck(env.Context())
},
},
{
Name: "clean",
Help: "Clean Docker resources",
Commands: []*command.C{
{
Name: "networks",
Help: "Prune unused Docker networks",
Run: func(env *command.Env) error {
return pruneDockerNetworks(env.Context())
},
},
{
Name: "images",
Help: "Clean old test images",
Run: func(env *command.Env) error {
return cleanOldImages(env.Context())
},
},
{
Name: "containers",
Help: "Kill all test containers",
Run: func(env *command.Env) error {
return killTestContainers(env.Context())
},
},
{
Name: "cache",
Help: "Clean Go module cache volume",
Run: func(env *command.Env) error {
return cleanCacheVolume(env.Context())
},
},
{
Name: "all",
Help: "Run all cleanup operations",
Run: func(env *command.Env) error {
return cleanAll(env.Context())
},
},
},
},
command.HelpCommand(nil),
},
}
env := root.NewEnv(nil).MergeFlags(true)
command.RunOrFail(env, os.Args[1:])
}
func cleanAll(ctx context.Context) error {
if err := killTestContainers(ctx); err != nil {
return err
}
if err := pruneDockerNetworks(ctx); err != nil {
return err
}
if err := cleanOldImages(ctx); err != nil {
return err
}
return cleanCacheVolume(ctx)
}

View File

@@ -1,122 +0,0 @@
package main
import (
"errors"
"fmt"
"log"
"os"
"path/filepath"
"time"
"github.com/creachadair/command"
)
var ErrTestPatternRequired = errors.New("test pattern is required as first argument or use --test flag")
type RunConfig struct {
TestPattern string `flag:"test,Test pattern to run"`
Timeout time.Duration `flag:"timeout,default=120m,Test timeout"`
FailFast bool `flag:"failfast,default=true,Stop on first test failure"`
UsePostgres bool `flag:"postgres,default=false,Use PostgreSQL instead of SQLite"`
GoVersion string `flag:"go-version,Go version to use (auto-detected from go.mod)"`
CleanBefore bool `flag:"clean-before,default=true,Clean resources before test"`
CleanAfter bool `flag:"clean-after,default=true,Clean resources after test"`
KeepOnFailure bool `flag:"keep-on-failure,default=false,Keep containers on test failure"`
LogsDir string `flag:"logs-dir,default=control_logs,Control logs directory"`
Verbose bool `flag:"verbose,default=false,Verbose output"`
}
// runIntegrationTest executes the integration test workflow.
func runIntegrationTest(env *command.Env) error {
args := env.Args
if len(args) > 0 && runConfig.TestPattern == "" {
runConfig.TestPattern = args[0]
}
if runConfig.TestPattern == "" {
return ErrTestPatternRequired
}
if runConfig.GoVersion == "" {
runConfig.GoVersion = detectGoVersion()
}
// Run pre-flight checks
if runConfig.Verbose {
log.Printf("Running pre-flight system checks...")
}
if err := runDoctorCheck(env.Context()); err != nil {
return fmt.Errorf("pre-flight checks failed: %w", err)
}
if runConfig.Verbose {
log.Printf("Running test: %s", runConfig.TestPattern)
log.Printf("Go version: %s", runConfig.GoVersion)
log.Printf("Timeout: %s", runConfig.Timeout)
log.Printf("Use PostgreSQL: %t", runConfig.UsePostgres)
}
return runTestContainer(env.Context(), &runConfig)
}
// detectGoVersion reads the Go version from go.mod file.
func detectGoVersion() string {
goModPath := filepath.Join("..", "..", "go.mod")
if _, err := os.Stat("go.mod"); err == nil {
goModPath = "go.mod"
} else if _, err := os.Stat("../../go.mod"); err == nil {
goModPath = "../../go.mod"
}
content, err := os.ReadFile(goModPath)
if err != nil {
return "1.24"
}
lines := splitLines(string(content))
for _, line := range lines {
if len(line) > 3 && line[:3] == "go " {
version := line[3:]
if idx := indexOf(version, " "); idx != -1 {
version = version[:idx]
}
return version
}
}
return "1.24"
}
// splitLines splits a string into lines without using strings.Split.
func splitLines(s string) []string {
var lines []string
var current string
for _, char := range s {
if char == '\n' {
lines = append(lines, current)
current = ""
} else {
current += string(char)
}
}
if current != "" {
lines = append(lines, current)
}
return lines
}
// indexOf finds the first occurrence of substr in s.
func indexOf(s, substr string) int {
for i := 0; i <= len(s)-len(substr); i++ {
if s[i:i+len(substr)] == substr {
return i
}
}
return -1
}

View File

@@ -1,101 +0,0 @@
package main
import (
"archive/tar"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
)
var (
// ErrFileNotFoundInTar indicates a file was not found in the tar archive.
ErrFileNotFoundInTar = errors.New("file not found in tar")
)
// extractFileFromTar extracts a single file from a tar reader.
func extractFileFromTar(tarReader io.Reader, fileName, outputPath string) error {
tr := tar.NewReader(tarReader)
for {
header, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("failed to read tar header: %w", err)
}
// Check if this is the file we're looking for
if filepath.Base(header.Name) == fileName {
if header.Typeflag == tar.TypeReg {
// Create the output file
outFile, err := os.Create(outputPath)
if err != nil {
return fmt.Errorf("failed to create output file: %w", err)
}
defer outFile.Close()
// Copy file contents
if _, err := io.Copy(outFile, tr); err != nil {
return fmt.Errorf("failed to copy file contents: %w", err)
}
return nil
}
}
}
return fmt.Errorf("%w: %s", ErrFileNotFoundInTar, fileName)
}
// extractDirectoryFromTar extracts all files from a tar reader to a target directory.
func extractDirectoryFromTar(tarReader io.Reader, targetDir string) error {
tr := tar.NewReader(tarReader)
for {
header, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("failed to read tar header: %w", err)
}
// Clean the path to prevent directory traversal
cleanName := filepath.Clean(header.Name)
if strings.Contains(cleanName, "..") {
continue // Skip potentially dangerous paths
}
targetPath := filepath.Join(targetDir, filepath.Base(cleanName))
switch header.Typeflag {
case tar.TypeDir:
// Create directory
if err := os.MkdirAll(targetPath, os.FileMode(header.Mode)); err != nil {
return fmt.Errorf("failed to create directory %s: %w", targetPath, err)
}
case tar.TypeReg:
// Create file
outFile, err := os.Create(targetPath)
if err != nil {
return fmt.Errorf("failed to create file %s: %w", targetPath, err)
}
if _, err := io.Copy(outFile, tr); err != nil {
outFile.Close()
return fmt.Errorf("failed to copy file contents: %w", err)
}
outFile.Close()
// Set file permissions
if err := os.Chmod(targetPath, os.FileMode(header.Mode)); err != nil {
return fmt.Errorf("failed to set file permissions: %w", err)
}
}
}
return nil
}

View File

@@ -18,8 +18,10 @@ server_url: http://127.0.0.1:8080
# listen_addr: 0.0.0.0:8080
listen_addr: 127.0.0.1:8080
# Address to listen to /metrics and /debug, you may want
# to keep this endpoint private to your internal network
# Address to listen to /metrics, you may want
# to keep this endpoint private to your internal
# network
#
metrics_listen_addr: 127.0.0.1:9090
# Address to listen for gRPC.
@@ -41,9 +43,9 @@ grpc_allow_insecure: false
# The Noise section includes specific configuration for the
# TS2021 Noise protocol
noise:
# The Noise private key is used to encrypt the traffic between headscale and
# Tailscale clients when using the new Noise-based protocol. A missing key
# will be automatically generated.
# The Noise private key is used to encrypt the
# traffic between headscale and Tailscale clients when
# using the new Noise-based protocol.
private_key_path: /var/lib/headscale/noise_private.key
# List of IP prefixes to allocate tailaddresses from.
@@ -85,17 +87,16 @@ derp:
region_code: "headscale"
region_name: "Headscale Embedded DERP"
# Only allow clients associated with this server access
verify_clients: true
# Listens over UDP at the configured address for STUN connections - to help with NAT traversal.
# When the embedded DERP server is enabled stun_listen_addr MUST be defined.
#
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
stun_listen_addr: "0.0.0.0:3478"
# Private key used to encrypt the traffic between headscale DERP and
# Tailscale clients. A missing key will be automatically generated.
# Private key used to encrypt the traffic between headscale DERP
# and Tailscale clients.
# The private key file will be autogenerated if it's missing.
#
private_key_path: /var/lib/headscale/derp_server_private.key
# This flag can be used, so the DERP map entry for the embedded DERP server is not written automatically,
@@ -273,10 +274,6 @@ dns:
# `hostname.base_domain` (e.g., _myhost.example.com_).
base_domain: example.com
# Whether to use the local DNS settings of a node (default) or override the
# local DNS settings and force the use of Headscale's DNS configuration.
override_local_dns: false
# List of DNS servers to expose to clients.
nameservers:
global:
@@ -378,6 +375,19 @@ unix_socket_permission: "0770"
# # - plain: Use plain code verifier
# # - S256: Use SHA256 hashed code verifier (default, recommended)
# method: S256
#
# # Map legacy users from pre-0.24.0 versions of headscale to the new OIDC users
# # by taking the username from the legacy user and matching it with the username
# # provided by the OIDC. This is useful when migrating from legacy users to OIDC
# # to force them using the unique identifier from the OIDC and to give them a
# # proper display name and picture if available.
# # Note that this will only work if the username from the legacy user is the same
# # and there is a possibility for account takeover should a username have changed
# # with the provider.
# # When this feature is disabled, it will cause all new logins to be created as new users.
# # Note this option will be removed in the future and should be set to false
# # on all new installations, or when all users have logged in with OIDC once.
# map_legacy_users: false
# Logtail configuration
# Logtail is Tailscales logging and auditing infrastructure, it allows the control panel

View File

@@ -40,62 +40,10 @@ official releases](../setup/install/official.md) for more information.
In addition to that, you may use packages provided by the community or from distributions. Learn more in the
[installation guide using community packages](../setup/install/community.md).
For convenience, we also [build container images with headscale](../setup/install/container.md). But **please be aware that
For convenience, we also [build Docker images with headscale](../setup/install/container.md). But **please be aware that
we don't officially support deploying headscale using Docker**. On our [Discord server](https://discord.gg/c84AZQhmpx)
we have a "docker-issues" channel where you can ask for Docker-specific help to the community.
## Scaling / How many clients does Headscale support?
It depends. As often stated, Headscale is not enterprise software and our focus
is homelabbers and self-hosters. Of course, we do not prevent people from using
it in a commercial/professional setting and often get questions about scaling.
Please note that when Headscale is developed, performance is not part of the
consideration as the main audience is considered to be users with a moddest
amount of devices. We focus on correctness and feature parity with Tailscale
SaaS over time.
To understand if you might be able to use Headscale for your usecase, I will
describe two scenarios in an effort to explain what is the central bottleneck
of Headscale:
1. An environment with 1000 servers
- they rarely "move" (change their endpoints)
- new nodes are added rarely
2. An environment with 80 laptops/phones (end user devices)
- nodes move often, e.g. switching from home to office
Headscale calculates a map of all nodes that need to talk to each other,
creating this "world map" requires a lot of CPU time. When an event that
requires changes to this map happens, the whole "world" is recalculated, and a
new "world map" is created for every node in the network.
This means that under certain conditions, Headscale can likely handle 100s
of devices (maybe more), if there is _little to no change_ happening in the
network. For example, in Scenario 1, the process of computing the world map is
extremly demanding due to the size of the network, but when the map has been
created and the nodes are not changing, the Headscale instance will likely
return to a very low resource usage until the next time there is an event
requiring the new map.
In the case of Scenario 2, the process of computing the world map is less
demanding due to the smaller size of the network, however, the type of nodes
will likely change frequently, which would lead to a constant resource usage.
Headscale will start to struggle when the two scenarios overlap, e.g. many nodes
with frequent changes will cause the resource usage to remain constantly high.
In the worst case scenario, the queue of nodes waiting for their map will grow
to a point where Headscale never will be able to catch up, and nodes will never
learn about the current state of the world.
We expect that the performance will improve over time as we improve the code
base, but it is not a focus. In general, we will never make the tradeoff to make
things faster on the cost of less maintainable or readable code. We are a small
team and have to optimise for maintainabillity.
## Which database should I use?
We recommend the use of SQLite as database for headscale:
@@ -108,9 +56,6 @@ We recommend the use of SQLite as database for headscale:
The headscale project itself does not provide a tool to migrate from PostgreSQL to SQLite. Please have a look at [the
related tools documentation](../ref/integration/tools.md) for migration tooling provided by the community.
The choice of database has little to no impact on the performance of the server,
see [Scaling / How many clients does Headscale support?](#scaling-how-many-clients-does-headscale-support) for understanding how Headscale spends its resources.
## Why is my reverse proxy not working with headscale?
We don't know. We don't use reverse proxies with headscale ourselves, so we don't have any experience with them. We have
@@ -121,16 +66,3 @@ help to the community.
## Can I use headscale and tailscale on the same machine?
Running headscale on a machine that is also in the tailnet can cause problems with subnet routers, traffic relay nodes, and MagicDNS. It might work, but it is not supported.
## Why do two nodes see each other in their status, even if an ACL allows traffic only in one direction?
A frequent use case is to allow traffic only from one node to another, but not the other way around. For example, the
workstation of an administrator should be able to connect to all nodes but the nodes themselves shouldn't be able to
connect back to the administrator's node. Why do all nodes see the administrator's workstation in the output of
`tailscale status`?
This is essentially how Tailscale works. If traffic is allowed to flow in one direction, then both nodes see each other
in their output of `tailscale status`. Traffic is still filtered according to the ACL, with the exception of `tailscale
ping` which is always allowed in either direction.
See also <https://tailscale.com/kb/1087/device-visibility>.

View File

@@ -2,32 +2,27 @@
Headscale aims to implement a self-hosted, open source alternative to the Tailscale control server. Headscale's goal is
to provide self-hosters and hobbyists with an open-source server they can use for their projects and labs. This page
provides on overview of Headscale's feature and compatibility with the Tailscale control server:
provides on overview of headscale's feature and compatibility with the Tailscale control server:
- [x] Full "base" support of Tailscale's features
- [x] Node registration
- [x] Interactive
- [x] Pre authenticated key
- [x] [DNS](../ref/dns.md)
- [x] [DNS](https://tailscale.com/kb/1054/dns)
- [x] [MagicDNS](https://tailscale.com/kb/1081/magicdns)
- [x] [Global and restricted nameservers (split DNS)](https://tailscale.com/kb/1054/dns#nameservers)
- [x] [search domains](https://tailscale.com/kb/1054/dns#search-domains)
- [x] [Extra DNS records (Headscale only)](../ref/dns.md#setting-extra-dns-records)
- [x] [Extra DNS records (headscale only)](../ref/dns.md#setting-extra-dns-records)
- [x] [Taildrop (File Sharing)](https://tailscale.com/kb/1106/taildrop)
- [x] [Routes](../ref/routes.md)
- [x] [Subnet routers](../ref/routes.md#subnet-router)
- [x] [Exit nodes](../ref/routes.md#exit-node)
- [x] Routing advertising (including exit nodes)
- [x] Dual stack (IPv4 and IPv6)
- [x] Ephemeral nodes
- [x] Embedded [DERP server](https://tailscale.com/kb/1232/derp-servers)
- [x] Access control lists ([GitHub label "policy"](https://github.com/juanfont/headscale/labels/policy%20%F0%9F%93%9D))
- [x] ACL management via API
- [x] Some [Autogroups](https://tailscale.com/kb/1396/targets#autogroups), currently: `autogroup:internet`,
`autogroup:nonroot`, `autogroup:member`, `autogroup:tagged`
- [x] [Auto approvers](https://tailscale.com/kb/1337/acl-syntax#auto-approvers) for [subnet
routers](../ref/routes.md#automatically-approve-routes-of-a-subnet-router) and [exit
nodes](../ref/routes.md#automatically-approve-an-exit-node-with-auto-approvers)
- [x] [Tailscale SSH](https://tailscale.com/kb/1193/tailscale-ssh)
- [x] `autogroup:internet`
- [ ] `autogroup:self`
- [ ] `autogroup:member`
* [ ] Node registration using Single-Sign-On (OpenID Connect) ([GitHub label "OIDC"](https://github.com/juanfont/headscale/labels/OIDC))
- [x] Basic registration
- [x] Update user profile from identity provider
@@ -35,4 +30,3 @@ provides on overview of Headscale's feature and compatibility with the Tailscale
- [ ] OIDC groups cannot be used in ACLs
- [ ] [Funnel](https://tailscale.com/kb/1223/funnel) ([#1040](https://github.com/juanfont/headscale/issues/1040))
- [ ] [Serve](https://tailscale.com/kb/1312/serve) ([#1234](https://github.com/juanfont/headscale/issues/1921))
- [ ] [Network flow logs](https://tailscale.com/kb/1219/network-flow-logs) ([#1687](https://github.com/juanfont/headscale/issues/1687))

View File

@@ -2,8 +2,7 @@
All headscale releases are available on the [GitHub release page](https://github.com/juanfont/headscale/releases). Those
releases are available as binaries for various platforms and architectures, packages for Debian based systems and source
code archives. Container images are available on [Docker Hub](https://hub.docker.com/r/headscale/headscale) and
[GitHub Container Registry](https://github.com/juanfont/headscale/pkgs/container/headscale).
code archives. Container images are available on [Docker Hub](https://hub.docker.com/r/headscale/headscale).
An Atom/RSS feed of headscale releases is available [here](https://github.com/juanfont/headscale/releases.atom).

5
docs/packaging/README.md Normal file
View File

@@ -0,0 +1,5 @@
# Packaging
We use [nFPM](https://nfpm.goreleaser.com/) for making `.deb`, `.rpm` and `.apk`.
This folder contains files we need to package with these releases.

View File

@@ -1,4 +1,5 @@
[Unit]
After=syslog.target
After=network.target
Description=headscale coordination server for Tailscale
X-Restart-Triggers=/etc/headscale/config.yaml
@@ -13,7 +14,7 @@ Restart=always
RestartSec=5
WorkingDirectory=/var/lib/headscale
ReadWritePaths=/var/lib/headscale
ReadWritePaths=/var/lib/headscale /var/run
AmbientCapabilities=CAP_NET_BIND_SERVICE CAP_CHOWN
CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_CHOWN

View File

@@ -0,0 +1,88 @@
#!/bin/sh
# Determine OS platform
# shellcheck source=/dev/null
. /etc/os-release
HEADSCALE_EXE="/usr/bin/headscale"
BSD_HIER=""
HEADSCALE_RUN_DIR="/var/run/headscale"
HEADSCALE_HOME_DIR="/var/lib/headscale"
HEADSCALE_USER="headscale"
HEADSCALE_GROUP="headscale"
HEADSCALE_SHELL="/usr/sbin/nologin"
ensure_sudo() {
if [ "$(id -u)" = "0" ]; then
echo "Sudo permissions detected"
else
echo "No sudo permission detected, please run as sudo"
exit 1
fi
}
ensure_headscale_path() {
if [ ! -f "$HEADSCALE_EXE" ]; then
echo "headscale not in default path, exiting..."
exit 1
fi
printf "Found headscale %s\n" "$HEADSCALE_EXE"
}
create_headscale_user() {
printf "PostInstall: Adding headscale user %s\n" "$HEADSCALE_USER"
useradd -s "$HEADSCALE_SHELL" -d "$HEADSCALE_HOME_DIR" -c "headscale default user" "$HEADSCALE_USER"
}
create_headscale_group() {
if command -V systemctl >/dev/null 2>&1; then
printf "PostInstall: Adding headscale group %s\n" "$HEADSCALE_GROUP"
groupadd "$HEADSCALE_GROUP"
printf "PostInstall: Adding headscale user %s to group %s\n" "$HEADSCALE_USER" "$HEADSCALE_GROUP"
usermod -a -G "$HEADSCALE_GROUP" "$HEADSCALE_USER"
fi
if [ "$ID" = "alpine" ]; then
printf "PostInstall: Adding headscale group %s\n" "$HEADSCALE_GROUP"
addgroup "$HEADSCALE_GROUP"
printf "PostInstall: Adding headscale user %s to group %s\n" "$HEADSCALE_USER" "$HEADSCALE_GROUP"
addgroup "$HEADSCALE_USER" "$HEADSCALE_GROUP"
fi
}
create_run_dir() {
printf "PostInstall: Creating headscale run directory \n"
mkdir -p "$HEADSCALE_RUN_DIR"
printf "PostInstall: Modifying group ownership of headscale run directory \n"
chown "$HEADSCALE_USER":"$HEADSCALE_GROUP" "$HEADSCALE_RUN_DIR"
}
summary() {
echo "----------------------------------------------------------------------"
echo " headscale package has been successfully installed."
echo ""
echo " Please follow the next steps to start the software:"
echo ""
echo " sudo systemctl enable headscale"
echo " sudo systemctl start headscale"
echo ""
echo " Configuration settings can be adjusted here:"
echo " ${BSD_HIER}/etc/headscale/config.yaml"
echo ""
echo "----------------------------------------------------------------------"
}
#
# Main body of the script
#
{
ensure_sudo
ensure_headscale_path
create_headscale_user
create_headscale_group
create_run_dir
summary
}

View File

@@ -0,0 +1,15 @@
#!/bin/sh
# Determine OS platform
# shellcheck source=/dev/null
. /etc/os-release
if command -V systemctl >/dev/null 2>&1; then
echo "Stop and disable headscale service"
systemctl stop headscale >/dev/null 2>&1 || true
systemctl disable headscale >/dev/null 2>&1 || true
echo "Running daemon-reload"
systemctl daemon-reload || true
fi
echo "Removing run directory"
rm -rf "/var/run/headscale.sock"

View File

@@ -64,10 +64,10 @@ Here are the ACL's to implement the same permissions as above:
// groups are collections of users having a common scope. A user can be in multiple groups
// groups cannot be composed of groups
"groups": {
"group:boss": ["boss@"],
"group:dev": ["dev1@", "dev2@"],
"group:admin": ["admin1@"],
"group:intern": ["intern1@"]
"group:boss": ["boss"],
"group:dev": ["dev1", "dev2"],
"group:admin": ["admin1"],
"group:intern": ["intern1"]
},
// tagOwners in tailscale is an association between a TAG and the people allowed to set this TAG on a server.
// This is documented [here](https://tailscale.com/kb/1068/acl-tags#defining-a-tag)
@@ -149,11 +149,13 @@ Here are the ACL's to implement the same permissions as above:
},
// developers have access to the internal network through the router.
// the internal network is composed of HTTPS endpoints and Postgresql
// database servers.
// database servers. There's an additional rule to allow traffic to be
// forwarded to the internal subnet, 10.20.0.0/16. See this issue
// https://github.com/juanfont/headscale/issues/502
{
"action": "accept",
"src": ["group:dev"],
"dst": ["10.20.0.0/16:443,5432"]
"dst": ["10.20.0.0/16:443,5432", "router.internal:0"]
},
// servers should be able to talk to database in tcp/5432. Database should not be able to initiate connections to
@@ -179,11 +181,11 @@ Here are the ACL's to implement the same permissions as above:
// We still have to allow internal users communications since nothing guarantees that each user have
// their own users.
{ "action": "accept", "src": ["boss@"], "dst": ["boss@:*"] },
{ "action": "accept", "src": ["dev1@"], "dst": ["dev1@:*"] },
{ "action": "accept", "src": ["dev2@"], "dst": ["dev2@:*"] },
{ "action": "accept", "src": ["admin1@"], "dst": ["admin1@:*"] },
{ "action": "accept", "src": ["intern1@"], "dst": ["intern1@:*"] }
{ "action": "accept", "src": ["boss"], "dst": ["boss:*"] },
{ "action": "accept", "src": ["dev1"], "dst": ["dev1:*"] },
{ "action": "accept", "src": ["dev2"], "dst": ["dev2:*"] },
{ "action": "accept", "src": ["admin1"], "dst": ["admin1:*"] },
{ "action": "accept", "src": ["intern1"], "dst": ["intern1:*"] }
]
}
```

View File

@@ -5,9 +5,7 @@
- `/etc/headscale`
- `$HOME/.headscale`
- the current working directory
- To load the configuration from a different path, use:
- the command line flag `-c`, `--config`
- the environment variable `HEADSCALE_CONFIG`
- Use the command line flag `-c`, `--config` to load the configuration from a different path
- Validate the configuration file with: `headscale configtest`
!!! example "Get the [example configuration from the GitHub repository](https://github.com/juanfont/headscale/blob/main/config-example.yaml)"

View File

@@ -9,10 +9,10 @@ Headscale allows to set extra DNS records which are made available via
[MagicDNS](https://tailscale.com/kb/1081/magicdns). Extra DNS records can be configured either via static entries in the
[configuration file](./configuration.md) or from a JSON file that Headscale continuously watches for changes:
- Use the `dns.extra_records` option in the [configuration file](./configuration.md) for entries that are static and
* Use the `dns.extra_records` option in the [configuration file](./configuration.md) for entries that are static and
don't change while Headscale is running. Those entries are processed when Headscale is starting up and changes to the
configuration require a restart of Headscale.
- For dynamic DNS records that may be added, updated or removed while Headscale is running or DNS records that are
* For dynamic DNS records that may be added, updated or removed while Headscale is running or DNS records that are
generated by scripts the option `dns.extra_records_path` in the [configuration file](./configuration.md) is useful.
Set it to the absolute path of the JSON file containing DNS records and Headscale processes this file as it detects
changes.
@@ -25,6 +25,7 @@ hostname and port combination "http://hostname-in-magic-dns.myvpn.example.com:30
Currently, [only A and AAAA records are processed by Tailscale](https://github.com/tailscale/tailscale/blob/v1.78.3/ipn/ipnlocal/local.go#L4461-L4479).
1. Configure extra DNS records using one of the available configuration options:
=== "Static entries, via `dns.extra_records`"
@@ -75,14 +76,14 @@ hostname and port combination "http://hostname-in-magic-dns.myvpn.example.com:30
=== "Query with dig"
```console
```shell
dig +short grafana.myvpn.example.com
100.64.0.3
```
=== "Query with drill"
```console
```shell
drill -Q grafana.myvpn.example.com
100.64.0.3
```

51
docs/ref/exit-node.md Normal file
View File

@@ -0,0 +1,51 @@
# Exit Nodes
## On the node
Register the node and make it advertise itself as an exit node:
```console
$ sudo tailscale up --login-server https://headscale.example.com --advertise-exit-node
```
If the node is already registered, it can advertise exit capabilities like this:
```console
$ sudo tailscale set --advertise-exit-node
```
To use a node as an exit node, IP forwarding must be enabled on the node. Check the official [Tailscale documentation](https://tailscale.com/kb/1019/subnets/?tab=linux#enable-ip-forwarding) for how to enable IP forwarding.
## On the control server
```console
$ # list nodes
$ headscale routes list
ID | Node | Prefix | Advertised | Enabled | Primary
1 | | 0.0.0.0/0 | false | false | -
2 | | ::/0 | false | false | -
3 | phobos | 0.0.0.0/0 | true | false | -
4 | phobos | ::/0 | true | false | -
$ # enable routes for phobos
$ headscale routes enable -r 3
$ headscale routes enable -r 4
$ # Check node list again. The routes are now enabled.
$ headscale routes list
ID | Node | Prefix | Advertised | Enabled | Primary
1 | | 0.0.0.0/0 | false | false | -
2 | | ::/0 | false | false | -
3 | phobos | 0.0.0.0/0 | true | true | -
4 | phobos | ::/0 | true | true | -
```
## On the client
The exit node can now be used with:
```console
$ sudo tailscale set --exit-node phobos
```
Check the official [Tailscale documentation](https://tailscale.com/kb/1103/exit-nodes#use-the-exit-node) for how to do it on your device.

View File

@@ -5,11 +5,9 @@
This page contains community contributions. The projects listed here are not
maintained by the headscale authors and are written by community members.
This page collects third-party tools, client libraries, and scripts related to headscale.
This page collects third-party tools and scripts related to headscale.
| Name | Repository Link | Description |
| --------------------- | --------------------------------------------------------------- | -------------------------------------------------------------------- |
| tailscale-manager | [Github](https://github.com/singlestore-labs/tailscale-manager) | Dynamically manage Tailscale route advertisements |
| headscalebacktosqlite | [Github](https://github.com/bigbozza/headscalebacktosqlite) | Migrate headscale from PostgreSQL back to SQLite |
| headscale-pf | [Github](https://github.com/YouSysAdmin/headscale-pf) | Populates user groups based on user groups in Jumpcloud or Authentik |
| headscale-client-go | [Github](https://github.com/hibare/headscale-client-go) | A Go client implementation for the Headscale HTTP API. |
| Name | Repository Link | Description |
| --------------------- | --------------------------------------------------------------- | ------------------------------------------------- |
| tailscale-manager | [Github](https://github.com/singlestore-labs/tailscale-manager) | Dynamically manage Tailscale route advertisements |
| headscalebacktosqlite | [Github](https://github.com/bigbozza/headscalebacktosqlite) | Migrate headscale from PostgreSQL back to SQLite |

View File

@@ -7,14 +7,13 @@
Headscale doesn't provide a built-in web interface but users may pick one from the available options.
| Name | Repository Link | Description |
| ---------------------- | ----------------------------------------------------------- | -------------------------------------------------------------------------------------------- |
| headscale-ui | [Github](https://github.com/gurucomputing/headscale-ui) | A web frontend for the headscale Tailscale-compatible coordination server |
| HeadscaleUi | [GitHub](https://github.com/simcu/headscale-ui) | A static headscale admin ui, no backend environment required |
| Headplane | [GitHub](https://github.com/tale/headplane) | An advanced Tailscale inspired frontend for headscale |
| headscale-admin | [Github](https://github.com/GoodiesHQ/headscale-admin) | Headscale-Admin is meant to be a simple, modern web interface for headscale |
| ouroboros | [Github](https://github.com/yellowsink/ouroboros) | Ouroboros is designed for users to manage their own devices, rather than for admins |
| unraid-headscale-admin | [Github](https://github.com/ich777/unraid-headscale-admin) | A simple headscale admin UI for Unraid, it offers Local (`docker exec`) and API Mode |
| headscale-console | [Github](https://github.com/rickli-cloud/headscale-console) | WebAssembly-based client supporting SSH, VNC and RDP with optional self-service capabilities |
| Name | Repository Link | Description |
| --------------- | ------------------------------------------------------- | ----------------------------------------------------------------------------------- |
| headscale-webui | [Github](https://github.com/ifargle/headscale-webui) | A simple headscale web UI for small-scale deployments. |
| headscale-ui | [Github](https://github.com/gurucomputing/headscale-ui) | A web frontend for the headscale Tailscale-compatible coordination server |
| HeadscaleUi | [GitHub](https://github.com/simcu/headscale-ui) | A static headscale admin ui, no backend environment required |
| Headplane | [GitHub](https://github.com/tale/headplane) | An advanced Tailscale inspired frontend for headscale |
| headscale-admin | [Github](https://github.com/GoodiesHQ/headscale-admin) | Headscale-Admin is meant to be a simple, modern web interface for headscale |
| ouroboros | [Github](https://github.com/yellowsink/ouroboros) | Ouroboros is designed for users to manage their own devices, rather than for admins |
You can ask for support on our [Discord server](https://discord.gg/c84AZQhmpx) in the "web-interfaces" channel.

View File

@@ -56,6 +56,12 @@ oidc:
# - plain: Use plain code verifier
# - S256: Use SHA256 hashed code verifier (default, recommended)
method: S256
# If `strip_email_domain` is set to `true`, the domain part of the username email address will be removed.
# This will transform `first-name.last-name@example.com` to the user `first-name.last-name`
# If `strip_email_domain` is set to `false` the domain part will NOT be removed resulting to the following
# user: `first-name.last-name.example.com`
strip_email_domain: true
```
## Azure AD example
@@ -177,45 +183,3 @@ However if you don't have a domain, or need to add users outside of your domain,
```
You can also use `allowed_domains` and `allowed_users` to restrict the users who can authenticate.
## Authelia
Authelia since v4.39.0, has removed most claims from the `ID Token`, they are still available when application queries [UserInfo Endpoint](https://openid.net/specs/openid-connect-core-1_0.html#UserInfo).
Following config restores sending 'default' claims in the `ID Token`
For more information please read: [Authelia restore functionality prior to claims parameter](https://www.authelia.com/integration/openid-connect/openid-connect-1.0-claims/#restore-functionality-prior-to-claims-parameter)
```yaml
identity_providers:
oidc:
claims_policies:
default:
id_token:
[
"groups",
"email",
"email_verified",
"alt_emails",
"preferred_username",
"name",
]
clients:
- client_id: "headscale"
client_name: "headscale"
client_secret: ""
public: false
claims_policy: "default"
authorization_policy: "two_factor"
require_pkce: true
pkce_challenge_method: "S256"
redirect_uris:
- "https://headscale.example.com/oidc/callback"
scopes:
- "openid"
- "profile"
- "groups"
- "email"
userinfo_signed_response_alg: "none"
token_endpoint_auth_method: "client_secret_basic"
```

View File

@@ -1,271 +0,0 @@
# Routes
Headscale supports route advertising and can be used to manage [subnet routers](https://tailscale.com/kb/1019/subnets)
and [exit nodes](https://tailscale.com/kb/1103/exit-nodes) for a tailnet.
- [Subnet routers](#subnet-router) may be used to connect an existing network such as a virtual
private cloud or an on-premise network with your tailnet. Use a subnet router to access devices where Tailscale can't
be installed or to gradually rollout Tailscale.
- [Exit nodes](#exit-node) can be used to route all Internet traffic for another Tailscale
node. Use it to securely access the Internet on an untrusted Wi-Fi or to access online services that expect traffic
from a specific IP address.
## Subnet router
The setup of a subnet router requires double opt-in, once from a subnet router and once on the control server to allow
its use within the tailnet. Optionally, use [`autoApprovers` to automatically approve routes from a subnet
router](#automatically-approve-routes-of-a-subnet-router).
### Setup a subnet router
#### Configure a node as subnet router
Register a node and advertise the routes it should handle as comma separated list:
```console
$ sudo tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-routes=10.0.0.0/8,192.168.0.0/24
```
If the node is already registered, it can advertise new routes or update previously announced routes with:
```console
$ sudo tailscale set --advertise-routes=10.0.0.0/8,192.168.0.0/24
```
Finally, [enable IP forwarding](#enable-ip-forwarding) to route traffic.
#### Enable the subnet router on the control server
The routes of a tailnet can be displayed with the `headscale nodes list-routes` command. A subnet router with the
hostname `myrouter` announced the IPv4 networks `10.0.0.0/8` and `192.168.0.0/24`. Those need to be approved before they
can be used.
```console
$ headscale nodes list-routes
ID | Hostname | Approved | Available | Serving (Primary)
1 | myrouter | | 10.0.0.0/8, 192.168.0.0/24 |
```
Approve all desired routes of a subnet router by specifying them as comma separated list:
```console
$ headscale nodes approve-routes --identifier 1 --routes 10.0.0.0/8,192.168.0.0/24
Node updated
```
The node `myrouter` can now route the IPv4 networks `10.0.0.0/8` and `192.168.0.0/24` for the tailnet.
```console
$ headscale nodes list-routes
ID | Hostname | Approved | Available | Serving (Primary)
1 | myrouter | 10.0.0.0/8, 192.168.0.0/24 | 10.0.0.0/8, 192.168.0.0/24 | 10.0.0.0/8, 192.168.0.0/24
```
#### Use the subnet router
To accept routes advertised by a subnet router on a node:
```console
$ sudo tailscale set --accept-routes
```
Please refer to the official [Tailscale
documentation](https://tailscale.com/kb/1019/subnets#use-your-subnet-routes-from-other-devices) for how to use a subnet
router on different operating systems.
### Restrict the use of a subnet router with ACL
The routes announced by subnet routers are available to the nodes in a tailnet. By default, without an ACL enabled, all
nodes can accept and use such routes. Configure an ACL to explicitly manage who can use routes.
The ACL snippet below defines three hosts, a subnet router `router`, a regular node `node` and `service.example.net` as
internal service that can be reached via a route on the subnet router `router`. It allows the node `node` to access
`service.example.net` on port 80 and 443 which is reachable via the subnet router. Access to the subnet router itself is
denied.
```json title="Access the routes of a subnet router without the subnet router itself"
{
"hosts": {
// the router is not referenced but announces 192.168.0.0/24"
"router": "100.64.0.1/32",
"node": "100.64.0.2/32",
"service.example.net": "192.168.0.1/32"
},
"acls": [
{
"action": "accept",
"src": ["node"],
"dst": ["service.example.net:80,443"]
}
]
}
```
### Automatically approve routes of a subnet router
The initial setup of a subnet router usually requires manual approval of their announced routes on the control server
before they can be used by a node in a tailnet. Headscale supports the `autoApprovers` section of an ACL to automate the
approval of routes served with a subnet router.
The ACL snippet below defines the tag `tag:router` owned by the user `alice`. This tag is used for `routes` in the
`autoApprovers` section. The IPv4 route `192.168.0.0/24` is automatically approved once announced by a subnet router
owned by the user `alice` and that also advertises the tag `tag:router`.
```json title="Subnet routers owned by alice and tagged with tag:router are automatically approved"
{
"tagOwners": {
"tag:router": ["alice@"]
},
"autoApprovers": {
"routes": {
"192.168.0.0/24": ["tag:router"]
}
},
"acls": [
// more rules
]
}
```
Advertise the route `192.168.0.0/24` from a subnet router that also advertises the tag `tag:router` when joining the tailnet:
```console
$ sudo tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-tags tag:router --advertise-routes 192.168.0.0/24
```
Please see the [official Tailscale documentation](https://tailscale.com/kb/1337/acl-syntax#autoapprovers) for more
information on auto approvers.
## Exit node
The setup of an exit node requires double opt-in, once from an exit node and once on the control server to allow its use
within the tailnet. Optionally, use [`autoApprovers` to automatically approve an exit
node](#automatically-approve-an-exit-node-with-auto-approvers).
### Setup an exit node
#### Configure a node as exit node
Register a node and make it advertise itself as an exit node:
```console
$ sudo tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-exit-node
```
If the node is already registered, it can advertise exit capabilities like this:
```console
$ sudo tailscale set --advertise-exit-node
```
Finally, [enable IP forwarding](#enable-ip-forwarding) to route traffic.
#### Enable the exit node on the control server
The routes of a tailnet can be displayed with the `headscale nodes list-routes` command. An exit node can be recognized
by its announced routes: `0.0.0.0/0` for IPv4 and `::/0` for IPv6. The exit node with the hostname `myexit` is already
available, but needs to be approved:
```console
$ headscale nodes list-routes
ID | Hostname | Approved | Available | Serving (Primary)
1 | myexit | | 0.0.0.0/0, ::/0 |
```
For exit nodes, it is sufficient to approve either the IPv4 or IPv6 route. The other will be approved automatically.
```console
$ headscale nodes approve-routes --identifier 1 --routes 0.0.0.0/0
Node updated
```
The node `myexit` is now approved as exit node for the tailnet:
```console
$ headscale nodes list-routes
ID | Hostname | Approved | Available | Serving (Primary)
1 | myexit | 0.0.0.0/0, ::/0 | 0.0.0.0/0, ::/0 | 0.0.0.0/0, ::/0
```
#### Use the exit node
The exit node can now be used on a node with:
```console
$ sudo tailscale set --exit-node myexit
```
Please refer to the official [Tailscale documentation](https://tailscale.com/kb/1103/exit-nodes#use-the-exit-node) for
how to use an exit node on different operating systems.
### Restrict the use of an exit node with ACL
An exit node is offered to all nodes in a tailnet. By default, without an ACL enabled, all nodes in a tailnet can select
and use an exit node. Configure `autogroup:internet` in an ACL rule to restrict who can use _any_ of the available exit
nodes.
```json title="Example use of autogroup:internet"
{
"acls": [
{
"action": "accept",
"src": ["..."],
"dst": ["autogroup:internet:*"]
}
]
}
```
### Automatically approve an exit node with auto approvers
The initial setup of an exit node usually requires manual approval on the control server before it can be used by a node
in a tailnet. Headscale supports the `autoApprovers` section of an ACL to automate the approval of a new exit node as
soon as it joins the tailnet.
The ACL snippet below defines the tag `tag:exit` owned by the user `alice`. This tag is used for `exitNode` in the
`autoApprovers` section. A new exit node which is owned by the user `alice` and that also advertises the tag `tag:exit`
is automatically approved:
```json title="Exit nodes owned by alice and tagged with tag:exit are automatically approved"
{
"tagOwners": {
"tag:exit": ["alice@"]
},
"autoApprovers": {
"exitNode": ["tag:exit"]
},
"acls": [
// more rules
]
}
```
Advertise a node as exit node and also advertise the tag `tag:exit` when joining the tailnet:
```console
$ sudo tailscale up --login-server <YOUR_HEADSCALE_URL> --advertise-tags tag:exit --advertise-exit-node
```
Please see the [official Tailscale documentation](https://tailscale.com/kb/1337/acl-syntax#autoapprovers) for more
information on auto approvers.
## High availability
Headscale has limited support for high availability routing. Multiple subnet routers with overlapping routes or multiple
exit nodes can be used to provide high availability for users. If one router node goes offline, another one can serve
the same routes to clients. Please see the official [Tailscale documentation on high
availability](https://tailscale.com/kb/1115/high-availability#subnet-router-high-availability) for details.
!!! bug
In certain situations it might take up to 16 minutes for Headscale to detect a node as offline. A failover node
might not be selected fast enough, if such a node is used as subnet router or exit node causing service
interruptions for clients. See [issue 2129](https://github.com/juanfont/headscale/issues/2129) for more information.
## Troubleshooting
### Enable IP forwarding
A subnet router or exit node is routing traffic on behalf of other nodes and thus requires IP forwarding. Check the
official [Tailscale documentation](https://tailscale.com/kb/1019/subnets/?tab=linux#enable-ip-forwarding) for how to
enable IP forwarding.

View File

@@ -52,7 +52,7 @@ If you want to validate that certificate renewal completed successfully, this ca
1. Open the URL for your headscale server in your browser of choice, and manually inspecting the expiry date of the certificate you receive.
2. Or, check remotely from CLI using `openssl`:
```console
```bash
$ openssl s_client -servername [hostname] -connect [hostname]:443 | openssl x509 -noout -dates
(...)
notBefore=Feb 8 09:48:26 2024 GMT

View File

@@ -7,64 +7,65 @@
**It might be outdated and it might miss necessary steps**.
This documentation has the goal of showing a user how-to set up and run headscale in a container. A container runtime
such as [Docker](https://www.docker.com) or [Podman](https://podman.io) is required. The container image can be found on
[Docker Hub](https://hub.docker.com/r/headscale/headscale) and [GitHub Container
Registry](https://github.com/juanfont/headscale/pkgs/container/headscale). The container image URLs are:
- [Docker Hub](https://hub.docker.com/r/headscale/headscale): `docker.io/headscale/headscale:<VERSION>`
- [GitHub Container Registry](https://github.com/juanfont/headscale/pkgs/container/headscale):
`ghcr.io/juanfont/headscale:<VERSION>`
This documentation has the goal of showing a user how-to set up and run headscale in a container.
[Docker](https://www.docker.com) is used as the reference container implementation, but there is no reason that it should
not work with alternatives like [Podman](https://podman.io). The Docker image can be found on Docker Hub [here](https://hub.docker.com/r/headscale/headscale).
## Configure and run headscale
1. Create a directory on the Docker host to store headscale's [configuration](../../ref/configuration.md) and the [SQLite](https://www.sqlite.org/) database:
1. Prepare a directory on the host Docker node in your directory of choice, used to hold headscale configuration and the [SQLite](https://www.sqlite.org/) database:
```shell
mkdir -p ./headscale/{config,lib,run}
mkdir -p ./headscale/config
cd ./headscale
```
1. Download the example configuration for your chosen version and save it as: `$(pwd)/config/config.yaml`. Adjust the
1. Download the example configuration for your chosen version and save it as: `/etc/headscale/config.yaml`. Adjust the
configuration to suit your local environment. See [Configuration](../../ref/configuration.md) for details.
1. Start headscale from within the previously created `./headscale` directory:
```shell
sudo mkdir -p /etc/headscale
sudo nano /etc/headscale/config.yaml
```
Alternatively, you can mount `/var/lib` and `/var/run` from your host system by adding
`--volume $(pwd)/lib:/var/lib/headscale` and `--volume $(pwd)/run:/var/run/headscale`
in the next step.
1. Start the headscale server while working in the host headscale directory:
```shell
docker run \
--name headscale \
--detach \
--volume "$(pwd)/config:/etc/headscale" \
--volume "$(pwd)/lib:/var/lib/headscale" \
--volume "$(pwd)/run:/var/run/headscale" \
--volume $(pwd)/config:/etc/headscale/ \
--publish 127.0.0.1:8080:8080 \
--publish 127.0.0.1:9090:9090 \
docker.io/headscale/headscale:<VERSION> \
headscale/headscale:<VERSION> \
serve
```
Note: use `0.0.0.0:8080:8080` instead of `127.0.0.1:8080:8080` if you want to expose the container externally.
This command mounts the local directories inside the container, forwards port 8080 and 9090 out of the container so
the headscale instance becomes available and then detaches so headscale runs in the background.
This command will mount `config/` under `/etc/headscale`, forward port 8080 out of the container so the
headscale instance becomes available and then detach so headscale runs in the background.
A similar configuration for `docker-compose`:
Example `docker-compose.yaml`
```yaml
version: "3.7"
```yaml title="docker-compose.yaml"
services:
headscale:
image: docker.io/headscale/headscale:<VERSION>
image: headscale/headscale:<VERSION>
restart: unless-stopped
container_name: headscale
ports:
- "127.0.0.1:8080:8080"
- "127.0.0.1:9090:9090"
volumes:
# Please set <HEADSCALE_PATH> to the absolute path
# of the previously created headscale directory.
- <HEADSCALE_PATH>/config:/etc/headscale
- <HEADSCALE_PATH>/lib:/var/lib/headscale
- <HEADSCALE_PATH>/run:/var/run/headscale
# Please change <CONFIG_PATH> to the fullpath of the config folder just created
- <CONFIG_PATH>:/etc/headscale
command: serve
```
@@ -97,7 +98,7 @@ Registry](https://github.com/juanfont/headscale/pkgs/container/headscale). The c
### Register a machine (normal login)
On a client machine, execute the `tailscale up` command to login:
On a client machine, execute the `tailscale` login command:
```shell
tailscale up --login-server YOUR_HEADSCALE_URL
@@ -110,7 +111,7 @@ docker exec -it headscale \
headscale nodes register --user myfirstuser --key <YOUR_MACHINE_KEY>
```
### Register a machine using a pre authenticated key
### Register machine using a pre authenticated key
Generate a key using the command line:
@@ -119,7 +120,7 @@ docker exec -it headscale \
headscale preauthkeys create --user myfirstuser --reusable --expiration 24h
```
This will return a pre-authenticated key that can be used to connect a node to headscale with the `tailscale up` command:
This will return a pre-authenticated key that can be used to connect a node to headscale during the `tailscale` command:
```shell
tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
@@ -127,11 +128,11 @@ tailscale up --login-server <YOUR_HEADSCALE_URL> --authkey <YOUR_AUTH_KEY>
## Debugging headscale running in Docker
The Headscale container image is based on a "distroless" image that does not contain a shell or any other debug tools. If you need to debug headscale running in the Docker container, you can use the `-debug` variant, for example `docker.io/headscale/headscale:x.x.x-debug`.
The `headscale/headscale` Docker container is based on a "distroless" image that does not contain a shell or any other debug tools. If you need to debug your application running in the Docker container, you can use the `-debug` variant, for example `headscale/headscale:x.x.x-debug`.
### Running the debug Docker container
To run the debug Docker container, use the exact same commands as above, but replace `docker.io/headscale/headscale:x.x.x` with `docker.io/headscale/headscale:x.x.x-debug` (`x.x.x` is the version of headscale). The two containers are compatible with each other, so you can alternate between them.
To run the debug Docker container, use the exact same commands as above, but replace `headscale/headscale:x.x.x` with `headscale/headscale:x.x.x-debug` (`x.x.x` is the version of headscale). The two containers are compatible with each other, so you can alternate between them.
### Executing commands in the debug container
@@ -141,14 +142,14 @@ Additionally, the debug container includes a minimalist Busybox shell.
To launch a shell in the container, use:
```shell
docker run -it docker.io/headscale/headscale:x.x.x-debug sh
```
docker run -it headscale/headscale:x.x.x-debug sh
```
You can also execute commands directly, such as `ls /ko-app` in this example:
```shell
docker run docker.io/headscale/headscale:x.x.x-debug ls /ko-app
```
docker run headscale/headscale:x.x.x-debug ls /ko-app
```
Using `docker exec -it` allows you to run commands in an existing container.

View File

@@ -7,7 +7,7 @@ Both are available on the [GitHub releases page](https://github.com/juanfont/hea
It is recommended to use our DEB packages to install headscale on a Debian based system as those packages configure a
local user to run headscale, provide a default configuration and ship with a systemd service file. Supported
distributions are Ubuntu 22.04 or newer, Debian 11 or newer.
distributions are Ubuntu 20.04 or newer, Debian 11 or newer.
1. Download the [latest headscale package](https://github.com/juanfont/headscale/releases/latest) for your platform (`.deb` for Ubuntu and Debian).
@@ -87,8 +87,8 @@ managed by systemd.
sudo nano /etc/headscale/config.yaml
```
1. Copy [headscale's systemd service file](https://github.com/juanfont/headscale/blob/main/packaging/systemd/headscale.service)
to `/etc/systemd/system/headscale.service` and adjust it to suit your local setup. The following parameters likely need
1. Copy [headscale's systemd service file](../../packaging/headscale.systemd.service) to
`/etc/systemd/system/headscale.service` and adjust it to suit your local setup. The following parameters likely need
to be modified: `ExecStart`, `WorkingDirectory`, `ReadWritePaths`.
1. In `/etc/headscale/config.yaml`, override the default `headscale` unix socket with a path that is writable by the

View File

@@ -17,7 +17,7 @@ README](https://github.com/juanfont/headscale#contributing) for more information
```shell
# Install prerequisites
pkg_add go git
pkg_add go
git clone https://github.com/juanfont/headscale.git
@@ -30,7 +30,7 @@ latestTag=$(git describe --tags `git rev-list --tags --max-count=1`)
git checkout $latestTag
go build -ldflags="-s -w -X github.com/juanfont/headscale/hscontrol/types.Version=$latestTag" -X github.com/juanfont/headscale/hscontrol/types.GitCommitHash=HASH" github.com/juanfont/headscale
go build -ldflags="-s -w -X github.com/juanfont/headscale/cmd/headscale/cli.Version=$latestTag" github.com/juanfont/headscale
# make it executable
chmod a+x headscale

6
flake.lock generated
View File

@@ -20,11 +20,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1746300365,
"narHash": "sha256-thYTdWqCRipwPRxWiTiH1vusLuAy0okjOyzRx4hLWh4=",
"lastModified": 1738297584,
"narHash": "sha256-AYvaFBzt8dU0fcSK2jKD0Vg23K2eIRxfsVXIPCW9a0E=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "f21e4546e3ede7ae34d12a84602a22246b31f7e0",
"rev": "9189ac18287c599860e878e905da550aa6dec1cd",
"type": "github"
},
"original": {

View File

@@ -12,16 +12,17 @@
flake-utils,
...
}: let
headscaleVersion = self.shortRev or self.dirtyShortRev;
commitHash = self.rev or self.dirtyRev;
headscaleVersion =
if (self ? shortRev)
then self.shortRev
else "dev";
in
{
overlay = _: prev: let
pkgs = nixpkgs.legacyPackages.${prev.system};
buildGo = pkgs.buildGo124Module;
vendorHash = "sha256-ACab+UvKrh+7G5KXNS+Iu9y8ZExefQDhwEKgIv0iIvE=";
buildGo = pkgs.buildGo123Module;
in {
headscale = buildGo {
headscale = buildGo rec {
pname = "headscale";
version = headscaleVersion;
src = pkgs.lib.cleanSource self;
@@ -31,27 +32,11 @@
# When updating go.mod or go.sum, a new sha will need to be calculated,
# update this if you have a mismatch after doing a change to those files.
inherit vendorHash;
vendorHash = "sha256-ZQj2A0GdLhHc7JLW7qgpGBveXXNWg9ueSG47OZQQXEw=";
subPackages = ["cmd/headscale"];
ldflags = [
"-s"
"-w"
"-X github.com/juanfont/headscale/hscontrol/types.Version=${headscaleVersion}"
"-X github.com/juanfont/headscale/hscontrol/types.GitCommitHash=${commitHash}"
];
};
hi = buildGo {
pname = "hi";
version = headscaleVersion;
src = pkgs.lib.cleanSource self;
checkFlags = ["-short"];
inherit vendorHash;
subPackages = ["cmd/hi"];
ldflags = ["-s" "-w" "-X github.com/juanfont/headscale/cmd/headscale/cli.Version=v${version}"];
};
protoc-gen-grpc-gateway = buildGo rec {
@@ -93,9 +78,6 @@
# golangci-lint = prev.golangci-lint.override {
# buildGoModule = buildGo;
# };
# golangci-lint-langserver = prev.golangci-lint.override {
# buildGoModule = buildGo;
# };
goreleaser = prev.goreleaser.override {
buildGoModule = buildGo;
@@ -112,10 +94,6 @@
gofumpt = prev.gofumpt.override {
buildGoModule = buildGo;
};
gopls = prev.gopls.override {
buildGoModule = buildGo;
};
};
}
// flake-utils.lib.eachDefaultSystem
@@ -124,12 +102,11 @@
overlays = [self.overlay];
inherit system;
};
buildDeps = with pkgs; [git go_1_24 gnumake];
buildDeps = with pkgs; [git go_1_23 gnumake];
devDeps = with pkgs;
buildDeps
++ [
golangci-lint
golangci-lint-langserver
golines
nodePackages.prettier
goreleaser
@@ -137,7 +114,6 @@
gotestsum
gotests
gofumpt
gopls
ksh
ko
yq-go
@@ -156,9 +132,6 @@
buf
clang-tools # clang-format
protobuf-language-server
# Add hi to make it even easier to use ci runner.
hi
];
# Add entry to build a docker image with headscale

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.6
// protoc-gen-go v1.35.2
// protoc (unknown)
// source: headscale/v1/apikey.proto
@@ -12,7 +12,6 @@ import (
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
@@ -23,14 +22,15 @@ const (
)
type ApiKey struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"`
Expiration *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expiration,proto3" json:"expiration,omitempty"`
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
LastSeen *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=last_seen,json=lastSeen,proto3" json:"last_seen,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"`
Expiration *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expiration,proto3" json:"expiration,omitempty"`
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
LastSeen *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=last_seen,json=lastSeen,proto3" json:"last_seen,omitempty"`
}
func (x *ApiKey) Reset() {
@@ -99,10 +99,11 @@ func (x *ApiKey) GetLastSeen() *timestamppb.Timestamp {
}
type CreateApiKeyRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Expiration *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=expiration,proto3" json:"expiration,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Expiration *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=expiration,proto3" json:"expiration,omitempty"`
}
func (x *CreateApiKeyRequest) Reset() {
@@ -143,10 +144,11 @@ func (x *CreateApiKeyRequest) GetExpiration() *timestamppb.Timestamp {
}
type CreateApiKeyResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
ApiKey string `protobuf:"bytes,1,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ApiKey string `protobuf:"bytes,1,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"`
}
func (x *CreateApiKeyResponse) Reset() {
@@ -187,10 +189,11 @@ func (x *CreateApiKeyResponse) GetApiKey() string {
}
type ExpireApiKeyRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
}
func (x *ExpireApiKeyRequest) Reset() {
@@ -231,9 +234,9 @@ func (x *ExpireApiKeyRequest) GetPrefix() string {
}
type ExpireApiKeyResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *ExpireApiKeyResponse) Reset() {
@@ -267,9 +270,9 @@ func (*ExpireApiKeyResponse) Descriptor() ([]byte, []int) {
}
type ListApiKeysRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *ListApiKeysRequest) Reset() {
@@ -303,10 +306,11 @@ func (*ListApiKeysRequest) Descriptor() ([]byte, []int) {
}
type ListApiKeysResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
ApiKeys []*ApiKey `protobuf:"bytes,1,rep,name=api_keys,json=apiKeys,proto3" json:"api_keys,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ApiKeys []*ApiKey `protobuf:"bytes,1,rep,name=api_keys,json=apiKeys,proto3" json:"api_keys,omitempty"`
}
func (x *ListApiKeysResponse) Reset() {
@@ -347,10 +351,11 @@ func (x *ListApiKeysResponse) GetApiKeys() []*ApiKey {
}
type DeleteApiKeyRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"`
}
func (x *DeleteApiKeyRequest) Reset() {
@@ -391,9 +396,9 @@ func (x *DeleteApiKeyRequest) GetPrefix() string {
}
type DeleteApiKeyResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *DeleteApiKeyResponse) Reset() {
@@ -428,42 +433,62 @@ func (*DeleteApiKeyResponse) Descriptor() ([]byte, []int) {
var File_headscale_v1_apikey_proto protoreflect.FileDescriptor
const file_headscale_v1_apikey_proto_rawDesc = "" +
"\n" +
"\x19headscale/v1/apikey.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"\xe0\x01\n" +
"\x06ApiKey\x12\x0e\n" +
"\x02id\x18\x01 \x01(\x04R\x02id\x12\x16\n" +
"\x06prefix\x18\x02 \x01(\tR\x06prefix\x12:\n" +
"\n" +
"expiration\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\n" +
"expiration\x129\n" +
"\n" +
"created_at\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x127\n" +
"\tlast_seen\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\blastSeen\"Q\n" +
"\x13CreateApiKeyRequest\x12:\n" +
"\n" +
"expiration\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\n" +
"expiration\"/\n" +
"\x14CreateApiKeyResponse\x12\x17\n" +
"\aapi_key\x18\x01 \x01(\tR\x06apiKey\"-\n" +
"\x13ExpireApiKeyRequest\x12\x16\n" +
"\x06prefix\x18\x01 \x01(\tR\x06prefix\"\x16\n" +
"\x14ExpireApiKeyResponse\"\x14\n" +
"\x12ListApiKeysRequest\"F\n" +
"\x13ListApiKeysResponse\x12/\n" +
"\bapi_keys\x18\x01 \x03(\v2\x14.headscale.v1.ApiKeyR\aapiKeys\"-\n" +
"\x13DeleteApiKeyRequest\x12\x16\n" +
"\x06prefix\x18\x01 \x01(\tR\x06prefix\"\x16\n" +
"\x14DeleteApiKeyResponseB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
var file_headscale_v1_apikey_proto_rawDesc = []byte{
0x0a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x61,
0x70, 0x69, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x65, 0x61,
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe0, 0x01, 0x0a, 0x06, 0x41,
0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x3a, 0x0a,
0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65,
0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65,
0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74,
0x65, 0x64, 0x41, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x65,
0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x22, 0x51, 0x0a,
0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x22, 0x2f, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x61, 0x70, 0x69, 0x5f,
0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x70, 0x69, 0x4b, 0x65,
0x79, 0x22, 0x2d, 0x0a, 0x13, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65,
0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66,
0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78,
0x22, 0x16, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74,
0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x46,
0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x61, 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79,
0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x07, 0x61,
0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x2d, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a,
0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70,
0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41,
0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x29, 0x5a,
0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e,
0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67,
0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_headscale_v1_apikey_proto_rawDescOnce sync.Once
file_headscale_v1_apikey_proto_rawDescData []byte
file_headscale_v1_apikey_proto_rawDescData = file_headscale_v1_apikey_proto_rawDesc
)
func file_headscale_v1_apikey_proto_rawDescGZIP() []byte {
file_headscale_v1_apikey_proto_rawDescOnce.Do(func() {
file_headscale_v1_apikey_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_apikey_proto_rawDesc), len(file_headscale_v1_apikey_proto_rawDesc)))
file_headscale_v1_apikey_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_apikey_proto_rawDescData)
})
return file_headscale_v1_apikey_proto_rawDescData
}
@@ -503,7 +528,7 @@ func file_headscale_v1_apikey_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_apikey_proto_rawDesc), len(file_headscale_v1_apikey_proto_rawDesc)),
RawDescriptor: file_headscale_v1_apikey_proto_rawDesc,
NumEnums: 0,
NumMessages: 9,
NumExtensions: 0,
@@ -514,6 +539,7 @@ func file_headscale_v1_apikey_proto_init() {
MessageInfos: file_headscale_v1_apikey_proto_msgTypes,
}.Build()
File_headscale_v1_apikey_proto = out.File
file_headscale_v1_apikey_proto_rawDesc = nil
file_headscale_v1_apikey_proto_goTypes = nil
file_headscale_v1_apikey_proto_depIdxs = nil
}

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.6
// protoc-gen-go v1.35.2
// protoc (unknown)
// source: headscale/v1/device.proto
@@ -12,7 +12,6 @@ import (
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
@@ -23,11 +22,12 @@ const (
)
type Latency struct {
state protoimpl.MessageState `protogen:"open.v1"`
LatencyMs float32 `protobuf:"fixed32,1,opt,name=latency_ms,json=latencyMs,proto3" json:"latency_ms,omitempty"`
Preferred bool `protobuf:"varint,2,opt,name=preferred,proto3" json:"preferred,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
LatencyMs float32 `protobuf:"fixed32,1,opt,name=latency_ms,json=latencyMs,proto3" json:"latency_ms,omitempty"`
Preferred bool `protobuf:"varint,2,opt,name=preferred,proto3" json:"preferred,omitempty"`
}
func (x *Latency) Reset() {
@@ -75,15 +75,16 @@ func (x *Latency) GetPreferred() bool {
}
type ClientSupports struct {
state protoimpl.MessageState `protogen:"open.v1"`
HairPinning bool `protobuf:"varint,1,opt,name=hair_pinning,json=hairPinning,proto3" json:"hair_pinning,omitempty"`
Ipv6 bool `protobuf:"varint,2,opt,name=ipv6,proto3" json:"ipv6,omitempty"`
Pcp bool `protobuf:"varint,3,opt,name=pcp,proto3" json:"pcp,omitempty"`
Pmp bool `protobuf:"varint,4,opt,name=pmp,proto3" json:"pmp,omitempty"`
Udp bool `protobuf:"varint,5,opt,name=udp,proto3" json:"udp,omitempty"`
Upnp bool `protobuf:"varint,6,opt,name=upnp,proto3" json:"upnp,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
HairPinning bool `protobuf:"varint,1,opt,name=hair_pinning,json=hairPinning,proto3" json:"hair_pinning,omitempty"`
Ipv6 bool `protobuf:"varint,2,opt,name=ipv6,proto3" json:"ipv6,omitempty"`
Pcp bool `protobuf:"varint,3,opt,name=pcp,proto3" json:"pcp,omitempty"`
Pmp bool `protobuf:"varint,4,opt,name=pmp,proto3" json:"pmp,omitempty"`
Udp bool `protobuf:"varint,5,opt,name=udp,proto3" json:"udp,omitempty"`
Upnp bool `protobuf:"varint,6,opt,name=upnp,proto3" json:"upnp,omitempty"`
}
func (x *ClientSupports) Reset() {
@@ -159,14 +160,15 @@ func (x *ClientSupports) GetUpnp() bool {
}
type ClientConnectivity struct {
state protoimpl.MessageState `protogen:"open.v1"`
Endpoints []string `protobuf:"bytes,1,rep,name=endpoints,proto3" json:"endpoints,omitempty"`
Derp string `protobuf:"bytes,2,opt,name=derp,proto3" json:"derp,omitempty"`
MappingVariesByDestIp bool `protobuf:"varint,3,opt,name=mapping_varies_by_dest_ip,json=mappingVariesByDestIp,proto3" json:"mapping_varies_by_dest_ip,omitempty"`
Latency map[string]*Latency `protobuf:"bytes,4,rep,name=latency,proto3" json:"latency,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
ClientSupports *ClientSupports `protobuf:"bytes,5,opt,name=client_supports,json=clientSupports,proto3" json:"client_supports,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Endpoints []string `protobuf:"bytes,1,rep,name=endpoints,proto3" json:"endpoints,omitempty"`
Derp string `protobuf:"bytes,2,opt,name=derp,proto3" json:"derp,omitempty"`
MappingVariesByDestIp bool `protobuf:"varint,3,opt,name=mapping_varies_by_dest_ip,json=mappingVariesByDestIp,proto3" json:"mapping_varies_by_dest_ip,omitempty"`
Latency map[string]*Latency `protobuf:"bytes,4,rep,name=latency,proto3" json:"latency,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
ClientSupports *ClientSupports `protobuf:"bytes,5,opt,name=client_supports,json=clientSupports,proto3" json:"client_supports,omitempty"`
}
func (x *ClientConnectivity) Reset() {
@@ -235,10 +237,11 @@ func (x *ClientConnectivity) GetClientSupports() *ClientSupports {
}
type GetDeviceRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
}
func (x *GetDeviceRequest) Reset() {
@@ -279,7 +282,10 @@ func (x *GetDeviceRequest) GetId() string {
}
type GetDeviceResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Addresses []string `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses,omitempty"`
Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
User string `protobuf:"bytes,3,opt,name=user,proto3" json:"user,omitempty"`
@@ -300,8 +306,6 @@ type GetDeviceResponse struct {
EnabledRoutes []string `protobuf:"bytes,18,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
AdvertisedRoutes []string `protobuf:"bytes,19,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
ClientConnectivity *ClientConnectivity `protobuf:"bytes,20,opt,name=client_connectivity,json=clientConnectivity,proto3" json:"client_connectivity,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetDeviceResponse) Reset() {
@@ -475,10 +479,11 @@ func (x *GetDeviceResponse) GetClientConnectivity() *ClientConnectivity {
}
type DeleteDeviceRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
}
func (x *DeleteDeviceRequest) Reset() {
@@ -519,9 +524,9 @@ func (x *DeleteDeviceRequest) GetId() string {
}
type DeleteDeviceResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *DeleteDeviceResponse) Reset() {
@@ -555,10 +560,11 @@ func (*DeleteDeviceResponse) Descriptor() ([]byte, []int) {
}
type GetDeviceRoutesRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
}
func (x *GetDeviceRoutesRequest) Reset() {
@@ -599,11 +605,12 @@ func (x *GetDeviceRoutesRequest) GetId() string {
}
type GetDeviceRoutesResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
EnabledRoutes []string `protobuf:"bytes,1,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
AdvertisedRoutes []string `protobuf:"bytes,2,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
EnabledRoutes []string `protobuf:"bytes,1,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
AdvertisedRoutes []string `protobuf:"bytes,2,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
}
func (x *GetDeviceRoutesResponse) Reset() {
@@ -651,11 +658,12 @@ func (x *GetDeviceRoutesResponse) GetAdvertisedRoutes() []string {
}
type EnableDeviceRoutesRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
Routes []string `protobuf:"bytes,2,rep,name=routes,proto3" json:"routes,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
Routes []string `protobuf:"bytes,2,rep,name=routes,proto3" json:"routes,omitempty"`
}
func (x *EnableDeviceRoutesRequest) Reset() {
@@ -703,11 +711,12 @@ func (x *EnableDeviceRoutesRequest) GetRoutes() []string {
}
type EnableDeviceRoutesResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
EnabledRoutes []string `protobuf:"bytes,1,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
AdvertisedRoutes []string `protobuf:"bytes,2,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
EnabledRoutes []string `protobuf:"bytes,1,rep,name=enabled_routes,json=enabledRoutes,proto3" json:"enabled_routes,omitempty"`
AdvertisedRoutes []string `protobuf:"bytes,2,rep,name=advertised_routes,json=advertisedRoutes,proto3" json:"advertised_routes,omitempty"`
}
func (x *EnableDeviceRoutesResponse) Reset() {
@@ -756,80 +765,139 @@ func (x *EnableDeviceRoutesResponse) GetAdvertisedRoutes() []string {
var File_headscale_v1_device_proto protoreflect.FileDescriptor
const file_headscale_v1_device_proto_rawDesc = "" +
"\n" +
"\x19headscale/v1/device.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"F\n" +
"\aLatency\x12\x1d\n" +
"\n" +
"latency_ms\x18\x01 \x01(\x02R\tlatencyMs\x12\x1c\n" +
"\tpreferred\x18\x02 \x01(\bR\tpreferred\"\x91\x01\n" +
"\x0eClientSupports\x12!\n" +
"\fhair_pinning\x18\x01 \x01(\bR\vhairPinning\x12\x12\n" +
"\x04ipv6\x18\x02 \x01(\bR\x04ipv6\x12\x10\n" +
"\x03pcp\x18\x03 \x01(\bR\x03pcp\x12\x10\n" +
"\x03pmp\x18\x04 \x01(\bR\x03pmp\x12\x10\n" +
"\x03udp\x18\x05 \x01(\bR\x03udp\x12\x12\n" +
"\x04upnp\x18\x06 \x01(\bR\x04upnp\"\xe3\x02\n" +
"\x12ClientConnectivity\x12\x1c\n" +
"\tendpoints\x18\x01 \x03(\tR\tendpoints\x12\x12\n" +
"\x04derp\x18\x02 \x01(\tR\x04derp\x128\n" +
"\x19mapping_varies_by_dest_ip\x18\x03 \x01(\bR\x15mappingVariesByDestIp\x12G\n" +
"\alatency\x18\x04 \x03(\v2-.headscale.v1.ClientConnectivity.LatencyEntryR\alatency\x12E\n" +
"\x0fclient_supports\x18\x05 \x01(\v2\x1c.headscale.v1.ClientSupportsR\x0eclientSupports\x1aQ\n" +
"\fLatencyEntry\x12\x10\n" +
"\x03key\x18\x01 \x01(\tR\x03key\x12+\n" +
"\x05value\x18\x02 \x01(\v2\x15.headscale.v1.LatencyR\x05value:\x028\x01\"\"\n" +
"\x10GetDeviceRequest\x12\x0e\n" +
"\x02id\x18\x01 \x01(\tR\x02id\"\xa0\x06\n" +
"\x11GetDeviceResponse\x12\x1c\n" +
"\taddresses\x18\x01 \x03(\tR\taddresses\x12\x0e\n" +
"\x02id\x18\x02 \x01(\tR\x02id\x12\x12\n" +
"\x04user\x18\x03 \x01(\tR\x04user\x12\x12\n" +
"\x04name\x18\x04 \x01(\tR\x04name\x12\x1a\n" +
"\bhostname\x18\x05 \x01(\tR\bhostname\x12%\n" +
"\x0eclient_version\x18\x06 \x01(\tR\rclientVersion\x12)\n" +
"\x10update_available\x18\a \x01(\bR\x0fupdateAvailable\x12\x0e\n" +
"\x02os\x18\b \x01(\tR\x02os\x124\n" +
"\acreated\x18\t \x01(\v2\x1a.google.protobuf.TimestampR\acreated\x127\n" +
"\tlast_seen\x18\n" +
" \x01(\v2\x1a.google.protobuf.TimestampR\blastSeen\x12.\n" +
"\x13key_expiry_disabled\x18\v \x01(\bR\x11keyExpiryDisabled\x124\n" +
"\aexpires\x18\f \x01(\v2\x1a.google.protobuf.TimestampR\aexpires\x12\x1e\n" +
"\n" +
"authorized\x18\r \x01(\bR\n" +
"authorized\x12\x1f\n" +
"\vis_external\x18\x0e \x01(\bR\n" +
"isExternal\x12\x1f\n" +
"\vmachine_key\x18\x0f \x01(\tR\n" +
"machineKey\x12\x19\n" +
"\bnode_key\x18\x10 \x01(\tR\anodeKey\x12>\n" +
"\x1bblocks_incoming_connections\x18\x11 \x01(\bR\x19blocksIncomingConnections\x12%\n" +
"\x0eenabled_routes\x18\x12 \x03(\tR\renabledRoutes\x12+\n" +
"\x11advertised_routes\x18\x13 \x03(\tR\x10advertisedRoutes\x12Q\n" +
"\x13client_connectivity\x18\x14 \x01(\v2 .headscale.v1.ClientConnectivityR\x12clientConnectivity\"%\n" +
"\x13DeleteDeviceRequest\x12\x0e\n" +
"\x02id\x18\x01 \x01(\tR\x02id\"\x16\n" +
"\x14DeleteDeviceResponse\"(\n" +
"\x16GetDeviceRoutesRequest\x12\x0e\n" +
"\x02id\x18\x01 \x01(\tR\x02id\"m\n" +
"\x17GetDeviceRoutesResponse\x12%\n" +
"\x0eenabled_routes\x18\x01 \x03(\tR\renabledRoutes\x12+\n" +
"\x11advertised_routes\x18\x02 \x03(\tR\x10advertisedRoutes\"C\n" +
"\x19EnableDeviceRoutesRequest\x12\x0e\n" +
"\x02id\x18\x01 \x01(\tR\x02id\x12\x16\n" +
"\x06routes\x18\x02 \x03(\tR\x06routes\"p\n" +
"\x1aEnableDeviceRoutesResponse\x12%\n" +
"\x0eenabled_routes\x18\x01 \x03(\tR\renabledRoutes\x12+\n" +
"\x11advertised_routes\x18\x02 \x03(\tR\x10advertisedRoutesB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
var file_headscale_v1_device_proto_rawDesc = []byte{
0x0a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x64,
0x65, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x65, 0x61,
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x46, 0x0a, 0x07, 0x4c, 0x61,
0x74, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79,
0x5f, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e,
0x63, 0x79, 0x4d, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65,
0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72,
0x65, 0x64, 0x22, 0x91, 0x01, 0x0a, 0x0e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x75, 0x70,
0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x68, 0x61, 0x69, 0x72, 0x5f, 0x70, 0x69,
0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x68, 0x61, 0x69,
0x72, 0x50, 0x69, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x69, 0x70, 0x76, 0x36,
0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x69, 0x70, 0x76, 0x36, 0x12, 0x10, 0x0a, 0x03,
0x70, 0x63, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x70, 0x63, 0x70, 0x12, 0x10,
0x0a, 0x03, 0x70, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x70, 0x6d, 0x70,
0x12, 0x10, 0x0a, 0x03, 0x75, 0x64, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x75,
0x64, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x70, 0x6e, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08,
0x52, 0x04, 0x75, 0x70, 0x6e, 0x70, 0x22, 0xe3, 0x02, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x12, 0x1c, 0x0a,
0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09,
0x52, 0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x64,
0x65, 0x72, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x65, 0x72, 0x70, 0x12,
0x38, 0x0a, 0x19, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x65,
0x73, 0x5f, 0x62, 0x79, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01,
0x28, 0x08, 0x52, 0x15, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x72, 0x69, 0x65,
0x73, 0x42, 0x79, 0x44, 0x65, 0x73, 0x74, 0x49, 0x70, 0x12, 0x47, 0x0a, 0x07, 0x6c, 0x61, 0x74,
0x65, 0x6e, 0x63, 0x79, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x68, 0x65, 0x61,
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x2e, 0x4c, 0x61, 0x74,
0x65, 0x6e, 0x63, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x6c, 0x61, 0x74, 0x65, 0x6e,
0x63, 0x79, 0x12, 0x45, 0x0a, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x75, 0x70,
0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x68, 0x65,
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x1a, 0x51, 0x0a, 0x0c, 0x4c, 0x61, 0x74,
0x65, 0x6e, 0x63, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2b, 0x0a, 0x05, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x68, 0x65, 0x61,
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63,
0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x22, 0x0a, 0x10,
0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64,
0x22, 0xa0, 0x06, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73,
0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65,
0x73, 0x73, 0x65, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01,
0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08,
0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6c, 0x69, 0x65,
0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
0x52, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
0x29, 0x0a, 0x10, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61,
0x62, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x75, 0x70, 0x64, 0x61, 0x74,
0x65, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x73,
0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x6f, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x63, 0x72,
0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
0x12, 0x37, 0x0a, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x65, 0x6e, 0x18, 0x0a, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
0x08, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x12, 0x2e, 0x0a, 0x13, 0x6b, 0x65, 0x79,
0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x79, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64,
0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x6b, 0x65, 0x79, 0x45, 0x78, 0x70, 0x69, 0x72,
0x79, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x78, 0x70,
0x69, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x12,
0x1e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x18, 0x0d, 0x20,
0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x12,
0x1f, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x0e,
0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18,
0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x4b, 0x65,
0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x10, 0x20,
0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x3e, 0x0a, 0x1b,
0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x69, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x5f,
0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28,
0x08, 0x52, 0x19, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e,
0x67, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0e,
0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x12,
0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x6f, 0x75,
0x74, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65,
0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10,
0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73,
0x12, 0x51, 0x0a, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65,
0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e,
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x52,
0x12, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76,
0x69, 0x74, 0x79, 0x22, 0x25, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x65, 0x76,
0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x65,
0x6c, 0x65, 0x74, 0x65, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0x28, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52,
0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02,
0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x6d, 0x0a, 0x17,
0x47, 0x65, 0x74, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c,
0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2b,
0x0a, 0x11, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75,
0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x61, 0x64, 0x76, 0x65, 0x72,
0x74, 0x69, 0x73, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x19, 0x45,
0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74,
0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73,
0x22, 0x70, 0x0a, 0x1a, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65,
0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25,
0x0a, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73,
0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52,
0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69,
0x73, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09,
0x52, 0x10, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74,
0x65, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_headscale_v1_device_proto_rawDescOnce sync.Once
file_headscale_v1_device_proto_rawDescData []byte
file_headscale_v1_device_proto_rawDescData = file_headscale_v1_device_proto_rawDesc
)
func file_headscale_v1_device_proto_rawDescGZIP() []byte {
file_headscale_v1_device_proto_rawDescOnce.Do(func() {
file_headscale_v1_device_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_device_proto_rawDesc), len(file_headscale_v1_device_proto_rawDesc)))
file_headscale_v1_device_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_device_proto_rawDescData)
})
return file_headscale_v1_device_proto_rawDescData
}
@@ -874,7 +942,7 @@ func file_headscale_v1_device_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_device_proto_rawDesc), len(file_headscale_v1_device_proto_rawDesc)),
RawDescriptor: file_headscale_v1_device_proto_rawDesc,
NumEnums: 0,
NumMessages: 12,
NumExtensions: 0,
@@ -885,6 +953,7 @@ func file_headscale_v1_device_proto_init() {
MessageInfos: file_headscale_v1_device_proto_msgTypes,
}.Build()
File_headscale_v1_device_proto = out.File
file_headscale_v1_device_proto_rawDesc = nil
file_headscale_v1_device_proto_goTypes = nil
file_headscale_v1_device_proto_depIdxs = nil
}

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.6
// protoc-gen-go v1.35.2
// protoc (unknown)
// source: headscale/v1/headscale.proto
@@ -11,7 +11,6 @@ import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
unsafe "unsafe"
)
const (
@@ -23,90 +22,291 @@ const (
var File_headscale_v1_headscale_proto protoreflect.FileDescriptor
const file_headscale_v1_headscale_proto_rawDesc = "" +
"\n" +
"\x1cheadscale/v1/headscale.proto\x12\fheadscale.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17headscale/v1/user.proto\x1a\x1dheadscale/v1/preauthkey.proto\x1a\x17headscale/v1/node.proto\x1a\x19headscale/v1/apikey.proto\x1a\x19headscale/v1/policy.proto2\xa3\x16\n" +
"\x10HeadscaleService\x12h\n" +
"\n" +
"CreateUser\x12\x1f.headscale.v1.CreateUserRequest\x1a .headscale.v1.CreateUserResponse\"\x17\x82\xd3\xe4\x93\x02\x11:\x01*\"\f/api/v1/user\x12\x80\x01\n" +
"\n" +
"RenameUser\x12\x1f.headscale.v1.RenameUserRequest\x1a .headscale.v1.RenameUserResponse\"/\x82\xd3\xe4\x93\x02)\"'/api/v1/user/{old_id}/rename/{new_name}\x12j\n" +
"\n" +
"DeleteUser\x12\x1f.headscale.v1.DeleteUserRequest\x1a .headscale.v1.DeleteUserResponse\"\x19\x82\xd3\xe4\x93\x02\x13*\x11/api/v1/user/{id}\x12b\n" +
"\tListUsers\x12\x1e.headscale.v1.ListUsersRequest\x1a\x1f.headscale.v1.ListUsersResponse\"\x14\x82\xd3\xe4\x93\x02\x0e\x12\f/api/v1/user\x12\x80\x01\n" +
"\x10CreatePreAuthKey\x12%.headscale.v1.CreatePreAuthKeyRequest\x1a&.headscale.v1.CreatePreAuthKeyResponse\"\x1d\x82\xd3\xe4\x93\x02\x17:\x01*\"\x12/api/v1/preauthkey\x12\x87\x01\n" +
"\x10ExpirePreAuthKey\x12%.headscale.v1.ExpirePreAuthKeyRequest\x1a&.headscale.v1.ExpirePreAuthKeyResponse\"$\x82\xd3\xe4\x93\x02\x1e:\x01*\"\x19/api/v1/preauthkey/expire\x12z\n" +
"\x0fListPreAuthKeys\x12$.headscale.v1.ListPreAuthKeysRequest\x1a%.headscale.v1.ListPreAuthKeysResponse\"\x1a\x82\xd3\xe4\x93\x02\x14\x12\x12/api/v1/preauthkey\x12}\n" +
"\x0fDebugCreateNode\x12$.headscale.v1.DebugCreateNodeRequest\x1a%.headscale.v1.DebugCreateNodeResponse\"\x1d\x82\xd3\xe4\x93\x02\x17:\x01*\"\x12/api/v1/debug/node\x12f\n" +
"\aGetNode\x12\x1c.headscale.v1.GetNodeRequest\x1a\x1d.headscale.v1.GetNodeResponse\"\x1e\x82\xd3\xe4\x93\x02\x18\x12\x16/api/v1/node/{node_id}\x12n\n" +
"\aSetTags\x12\x1c.headscale.v1.SetTagsRequest\x1a\x1d.headscale.v1.SetTagsResponse\"&\x82\xd3\xe4\x93\x02 :\x01*\"\x1b/api/v1/node/{node_id}/tags\x12\x96\x01\n" +
"\x11SetApprovedRoutes\x12&.headscale.v1.SetApprovedRoutesRequest\x1a'.headscale.v1.SetApprovedRoutesResponse\"0\x82\xd3\xe4\x93\x02*:\x01*\"%/api/v1/node/{node_id}/approve_routes\x12t\n" +
"\fRegisterNode\x12!.headscale.v1.RegisterNodeRequest\x1a\".headscale.v1.RegisterNodeResponse\"\x1d\x82\xd3\xe4\x93\x02\x17\"\x15/api/v1/node/register\x12o\n" +
"\n" +
"DeleteNode\x12\x1f.headscale.v1.DeleteNodeRequest\x1a .headscale.v1.DeleteNodeResponse\"\x1e\x82\xd3\xe4\x93\x02\x18*\x16/api/v1/node/{node_id}\x12v\n" +
"\n" +
"ExpireNode\x12\x1f.headscale.v1.ExpireNodeRequest\x1a .headscale.v1.ExpireNodeResponse\"%\x82\xd3\xe4\x93\x02\x1f\"\x1d/api/v1/node/{node_id}/expire\x12\x81\x01\n" +
"\n" +
"RenameNode\x12\x1f.headscale.v1.RenameNodeRequest\x1a .headscale.v1.RenameNodeResponse\"0\x82\xd3\xe4\x93\x02*\"(/api/v1/node/{node_id}/rename/{new_name}\x12b\n" +
"\tListNodes\x12\x1e.headscale.v1.ListNodesRequest\x1a\x1f.headscale.v1.ListNodesResponse\"\x14\x82\xd3\xe4\x93\x02\x0e\x12\f/api/v1/node\x12q\n" +
"\bMoveNode\x12\x1d.headscale.v1.MoveNodeRequest\x1a\x1e.headscale.v1.MoveNodeResponse\"&\x82\xd3\xe4\x93\x02 :\x01*\"\x1b/api/v1/node/{node_id}/user\x12\x80\x01\n" +
"\x0fBackfillNodeIPs\x12$.headscale.v1.BackfillNodeIPsRequest\x1a%.headscale.v1.BackfillNodeIPsResponse\" \x82\xd3\xe4\x93\x02\x1a\"\x18/api/v1/node/backfillips\x12p\n" +
"\fCreateApiKey\x12!.headscale.v1.CreateApiKeyRequest\x1a\".headscale.v1.CreateApiKeyResponse\"\x19\x82\xd3\xe4\x93\x02\x13:\x01*\"\x0e/api/v1/apikey\x12w\n" +
"\fExpireApiKey\x12!.headscale.v1.ExpireApiKeyRequest\x1a\".headscale.v1.ExpireApiKeyResponse\" \x82\xd3\xe4\x93\x02\x1a:\x01*\"\x15/api/v1/apikey/expire\x12j\n" +
"\vListApiKeys\x12 .headscale.v1.ListApiKeysRequest\x1a!.headscale.v1.ListApiKeysResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/apikey\x12v\n" +
"\fDeleteApiKey\x12!.headscale.v1.DeleteApiKeyRequest\x1a\".headscale.v1.DeleteApiKeyResponse\"\x1f\x82\xd3\xe4\x93\x02\x19*\x17/api/v1/apikey/{prefix}\x12d\n" +
"\tGetPolicy\x12\x1e.headscale.v1.GetPolicyRequest\x1a\x1f.headscale.v1.GetPolicyResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/api/v1/policy\x12g\n" +
"\tSetPolicy\x12\x1e.headscale.v1.SetPolicyRequest\x1a\x1f.headscale.v1.SetPolicyResponse\"\x19\x82\xd3\xe4\x93\x02\x13:\x01*\x1a\x0e/api/v1/policyB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
var file_headscale_v1_headscale_proto_rawDesc = []byte{
0x0a, 0x1c, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x68,
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c,
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x68, 0x65, 0x61, 0x64,
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76,
0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x1a, 0x17, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31,
0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x68, 0x65, 0x61,
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
0x65, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x1a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f,
0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xe9, 0x19, 0x0a,
0x10, 0x48, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x12, 0x68, 0x0a, 0x0a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12,
0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43,
0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0x17, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x11, 0x3a, 0x01, 0x2a, 0x22, 0x0c, 0x2f,
0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x12, 0x80, 0x01, 0x0a, 0x0a,
0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61,
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65,
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d,
0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82,
0xd3, 0xe4, 0x93, 0x02, 0x29, 0x22, 0x27, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x75,
0x73, 0x65, 0x72, 0x2f, 0x7b, 0x6f, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x65, 0x6e,
0x61, 0x6d, 0x65, 0x2f, 0x7b, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x6a,
0x0a, 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x68,
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65,
0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e,
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c,
0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x13, 0x2a, 0x11, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
0x2f, 0x75, 0x73, 0x65, 0x72, 0x2f, 0x7b, 0x69, 0x64, 0x7d, 0x12, 0x62, 0x0a, 0x09, 0x4c, 0x69,
0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x12, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0e,
0x12, 0x0c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x12, 0x80,
0x01, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68,
0x4b, 0x65, 0x79, 0x12, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68,
0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x68, 0x65, 0x61,
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x3a, 0x01, 0x2a, 0x22, 0x12, 0x2f,
0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65,
0x79, 0x12, 0x87, 0x01, 0x0a, 0x10, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41,
0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41,
0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e,
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70,
0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x3a, 0x01, 0x2a,
0x22, 0x19, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x61, 0x75, 0x74,
0x68, 0x6b, 0x65, 0x79, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x7a, 0x0a, 0x0f, 0x4c,
0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x24,
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69,
0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b,
0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4,
0x93, 0x02, 0x14, 0x12, 0x12, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65,
0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x12, 0x7d, 0x0a, 0x0f, 0x44, 0x65, 0x62, 0x75, 0x67,
0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x24, 0x2e, 0x68, 0x65, 0x61,
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43,
0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x3a,
0x01, 0x2a, 0x22, 0x12, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x65, 0x62, 0x75,
0x67, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x66, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64,
0x65, 0x12, 0x1c, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x1d, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47,
0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e,
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f,
0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x6e,
0x0a, 0x07, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x12, 0x1c, 0x2e, 0x68, 0x65, 0x61, 0x64,
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x3a, 0x01,
0x2a, 0x22, 0x1b, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f,
0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x74, 0x61, 0x67, 0x73, 0x12, 0x74,
0x0a, 0x0c, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x21,
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65,
0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x22, 0x15, 0x2f,
0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x72, 0x65, 0x67, 0x69,
0x73, 0x74, 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f,
0x64, 0x65, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x2a, 0x16, 0x2f,
0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64,
0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x76, 0x0a, 0x0a, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4e,
0x6f, 0x64, 0x65, 0x12, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x22, 0x1d,
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f,
0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x81, 0x01,
0x0a, 0x0a, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x2e, 0x68,
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e, 0x61,
0x6d, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e,
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6e,
0x61, 0x6d, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x22, 0x28, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f,
0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x2f, 0x7b, 0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
0x7d, 0x12, 0x62, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x1e,
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69,
0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f,
0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69,
0x73, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x14, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0e, 0x12, 0x0c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x71, 0x0a, 0x08, 0x4d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64,
0x65, 0x12, 0x1d, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
0x4d, 0x6f, 0x76, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x3a, 0x01, 0x2a, 0x22, 0x1b, 0x2f, 0x61, 0x70,
0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f,
0x69, 0x64, 0x7d, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x12, 0x80, 0x01, 0x0a, 0x0f, 0x42, 0x61, 0x63,
0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x50, 0x73, 0x12, 0x24, 0x2e, 0x68,
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b,
0x66, 0x69, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x50, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x1a, 0x25, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x50,
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02,
0x1a, 0x22, 0x18, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2f,
0x62, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x69, 0x70, 0x73, 0x12, 0x64, 0x0a, 0x09, 0x47,
0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65,
0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73,
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65,
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02,
0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65,
0x73, 0x12, 0x7c, 0x0a, 0x0b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
0x31, 0x2e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x22, 0x20, 0x2f,
0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f, 0x7b, 0x72,
0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x12,
0x80, 0x01, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22,
0x21, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f,
0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x64, 0x69, 0x73, 0x61, 0x62,
0x6c, 0x65, 0x12, 0x7f, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75,
0x74, 0x65, 0x73, 0x12, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e,
0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63,
0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f,
0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3,
0xe4, 0x93, 0x02, 0x1f, 0x12, 0x1d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f,
0x64, 0x65, 0x2f, 0x7b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x2f, 0x72, 0x6f, 0x75,
0x74, 0x65, 0x73, 0x12, 0x75, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75,
0x74, 0x65, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x21, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x2a,
0x19, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x2f,
0x7b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x7d, 0x12, 0x70, 0x0a, 0x0c, 0x43, 0x72,
0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61,
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e,
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65,
0x61, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x22, 0x19, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x13, 0x3a, 0x01, 0x2a, 0x22, 0x0e, 0x2f, 0x61,
0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x12, 0x77, 0x0a, 0x0c,
0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x21, 0x2e, 0x68,
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69,
0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45,
0x78, 0x70, 0x69, 0x72, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x3a, 0x01, 0x2a, 0x22, 0x15,
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79, 0x2f, 0x65,
0x78, 0x70, 0x69, 0x72, 0x65, 0x12, 0x6a, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69,
0x4b, 0x65, 0x79, 0x73, 0x12, 0x20, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x73, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79,
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02,
0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65,
0x79, 0x12, 0x76, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65,
0x79, 0x12, 0x21, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31,
0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65,
0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x19,
0x2a, 0x17, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x6b, 0x65, 0x79,
0x2f, 0x7b, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x7d, 0x12, 0x64, 0x0a, 0x09, 0x47, 0x65, 0x74,
0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1e, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12,
0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
0x67, 0x0a, 0x09, 0x53, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1e, 0x2e, 0x68,
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x50,
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x68,
0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x50,
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x19, 0x82,
0xd3, 0xe4, 0x93, 0x02, 0x13, 0x3a, 0x01, 0x2a, 0x1a, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76,
0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68,
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f,
0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f,
0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var file_headscale_v1_headscale_proto_goTypes = []any{
(*CreateUserRequest)(nil), // 0: headscale.v1.CreateUserRequest
(*RenameUserRequest)(nil), // 1: headscale.v1.RenameUserRequest
(*DeleteUserRequest)(nil), // 2: headscale.v1.DeleteUserRequest
(*ListUsersRequest)(nil), // 3: headscale.v1.ListUsersRequest
(*CreatePreAuthKeyRequest)(nil), // 4: headscale.v1.CreatePreAuthKeyRequest
(*ExpirePreAuthKeyRequest)(nil), // 5: headscale.v1.ExpirePreAuthKeyRequest
(*ListPreAuthKeysRequest)(nil), // 6: headscale.v1.ListPreAuthKeysRequest
(*DebugCreateNodeRequest)(nil), // 7: headscale.v1.DebugCreateNodeRequest
(*GetNodeRequest)(nil), // 8: headscale.v1.GetNodeRequest
(*SetTagsRequest)(nil), // 9: headscale.v1.SetTagsRequest
(*SetApprovedRoutesRequest)(nil), // 10: headscale.v1.SetApprovedRoutesRequest
(*RegisterNodeRequest)(nil), // 11: headscale.v1.RegisterNodeRequest
(*DeleteNodeRequest)(nil), // 12: headscale.v1.DeleteNodeRequest
(*ExpireNodeRequest)(nil), // 13: headscale.v1.ExpireNodeRequest
(*RenameNodeRequest)(nil), // 14: headscale.v1.RenameNodeRequest
(*ListNodesRequest)(nil), // 15: headscale.v1.ListNodesRequest
(*MoveNodeRequest)(nil), // 16: headscale.v1.MoveNodeRequest
(*BackfillNodeIPsRequest)(nil), // 17: headscale.v1.BackfillNodeIPsRequest
(*CreateApiKeyRequest)(nil), // 18: headscale.v1.CreateApiKeyRequest
(*ExpireApiKeyRequest)(nil), // 19: headscale.v1.ExpireApiKeyRequest
(*ListApiKeysRequest)(nil), // 20: headscale.v1.ListApiKeysRequest
(*DeleteApiKeyRequest)(nil), // 21: headscale.v1.DeleteApiKeyRequest
(*GetPolicyRequest)(nil), // 22: headscale.v1.GetPolicyRequest
(*SetPolicyRequest)(nil), // 23: headscale.v1.SetPolicyRequest
(*CreateUserResponse)(nil), // 24: headscale.v1.CreateUserResponse
(*RenameUserResponse)(nil), // 25: headscale.v1.RenameUserResponse
(*DeleteUserResponse)(nil), // 26: headscale.v1.DeleteUserResponse
(*ListUsersResponse)(nil), // 27: headscale.v1.ListUsersResponse
(*CreatePreAuthKeyResponse)(nil), // 28: headscale.v1.CreatePreAuthKeyResponse
(*ExpirePreAuthKeyResponse)(nil), // 29: headscale.v1.ExpirePreAuthKeyResponse
(*ListPreAuthKeysResponse)(nil), // 30: headscale.v1.ListPreAuthKeysResponse
(*DebugCreateNodeResponse)(nil), // 31: headscale.v1.DebugCreateNodeResponse
(*GetNodeResponse)(nil), // 32: headscale.v1.GetNodeResponse
(*SetTagsResponse)(nil), // 33: headscale.v1.SetTagsResponse
(*SetApprovedRoutesResponse)(nil), // 34: headscale.v1.SetApprovedRoutesResponse
(*RegisterNodeResponse)(nil), // 35: headscale.v1.RegisterNodeResponse
(*DeleteNodeResponse)(nil), // 36: headscale.v1.DeleteNodeResponse
(*ExpireNodeResponse)(nil), // 37: headscale.v1.ExpireNodeResponse
(*RenameNodeResponse)(nil), // 38: headscale.v1.RenameNodeResponse
(*ListNodesResponse)(nil), // 39: headscale.v1.ListNodesResponse
(*MoveNodeResponse)(nil), // 40: headscale.v1.MoveNodeResponse
(*BackfillNodeIPsResponse)(nil), // 41: headscale.v1.BackfillNodeIPsResponse
(*CreateApiKeyResponse)(nil), // 42: headscale.v1.CreateApiKeyResponse
(*ExpireApiKeyResponse)(nil), // 43: headscale.v1.ExpireApiKeyResponse
(*ListApiKeysResponse)(nil), // 44: headscale.v1.ListApiKeysResponse
(*DeleteApiKeyResponse)(nil), // 45: headscale.v1.DeleteApiKeyResponse
(*GetPolicyResponse)(nil), // 46: headscale.v1.GetPolicyResponse
(*SetPolicyResponse)(nil), // 47: headscale.v1.SetPolicyResponse
(*CreateUserRequest)(nil), // 0: headscale.v1.CreateUserRequest
(*RenameUserRequest)(nil), // 1: headscale.v1.RenameUserRequest
(*DeleteUserRequest)(nil), // 2: headscale.v1.DeleteUserRequest
(*ListUsersRequest)(nil), // 3: headscale.v1.ListUsersRequest
(*CreatePreAuthKeyRequest)(nil), // 4: headscale.v1.CreatePreAuthKeyRequest
(*ExpirePreAuthKeyRequest)(nil), // 5: headscale.v1.ExpirePreAuthKeyRequest
(*ListPreAuthKeysRequest)(nil), // 6: headscale.v1.ListPreAuthKeysRequest
(*DebugCreateNodeRequest)(nil), // 7: headscale.v1.DebugCreateNodeRequest
(*GetNodeRequest)(nil), // 8: headscale.v1.GetNodeRequest
(*SetTagsRequest)(nil), // 9: headscale.v1.SetTagsRequest
(*RegisterNodeRequest)(nil), // 10: headscale.v1.RegisterNodeRequest
(*DeleteNodeRequest)(nil), // 11: headscale.v1.DeleteNodeRequest
(*ExpireNodeRequest)(nil), // 12: headscale.v1.ExpireNodeRequest
(*RenameNodeRequest)(nil), // 13: headscale.v1.RenameNodeRequest
(*ListNodesRequest)(nil), // 14: headscale.v1.ListNodesRequest
(*MoveNodeRequest)(nil), // 15: headscale.v1.MoveNodeRequest
(*BackfillNodeIPsRequest)(nil), // 16: headscale.v1.BackfillNodeIPsRequest
(*GetRoutesRequest)(nil), // 17: headscale.v1.GetRoutesRequest
(*EnableRouteRequest)(nil), // 18: headscale.v1.EnableRouteRequest
(*DisableRouteRequest)(nil), // 19: headscale.v1.DisableRouteRequest
(*GetNodeRoutesRequest)(nil), // 20: headscale.v1.GetNodeRoutesRequest
(*DeleteRouteRequest)(nil), // 21: headscale.v1.DeleteRouteRequest
(*CreateApiKeyRequest)(nil), // 22: headscale.v1.CreateApiKeyRequest
(*ExpireApiKeyRequest)(nil), // 23: headscale.v1.ExpireApiKeyRequest
(*ListApiKeysRequest)(nil), // 24: headscale.v1.ListApiKeysRequest
(*DeleteApiKeyRequest)(nil), // 25: headscale.v1.DeleteApiKeyRequest
(*GetPolicyRequest)(nil), // 26: headscale.v1.GetPolicyRequest
(*SetPolicyRequest)(nil), // 27: headscale.v1.SetPolicyRequest
(*CreateUserResponse)(nil), // 28: headscale.v1.CreateUserResponse
(*RenameUserResponse)(nil), // 29: headscale.v1.RenameUserResponse
(*DeleteUserResponse)(nil), // 30: headscale.v1.DeleteUserResponse
(*ListUsersResponse)(nil), // 31: headscale.v1.ListUsersResponse
(*CreatePreAuthKeyResponse)(nil), // 32: headscale.v1.CreatePreAuthKeyResponse
(*ExpirePreAuthKeyResponse)(nil), // 33: headscale.v1.ExpirePreAuthKeyResponse
(*ListPreAuthKeysResponse)(nil), // 34: headscale.v1.ListPreAuthKeysResponse
(*DebugCreateNodeResponse)(nil), // 35: headscale.v1.DebugCreateNodeResponse
(*GetNodeResponse)(nil), // 36: headscale.v1.GetNodeResponse
(*SetTagsResponse)(nil), // 37: headscale.v1.SetTagsResponse
(*RegisterNodeResponse)(nil), // 38: headscale.v1.RegisterNodeResponse
(*DeleteNodeResponse)(nil), // 39: headscale.v1.DeleteNodeResponse
(*ExpireNodeResponse)(nil), // 40: headscale.v1.ExpireNodeResponse
(*RenameNodeResponse)(nil), // 41: headscale.v1.RenameNodeResponse
(*ListNodesResponse)(nil), // 42: headscale.v1.ListNodesResponse
(*MoveNodeResponse)(nil), // 43: headscale.v1.MoveNodeResponse
(*BackfillNodeIPsResponse)(nil), // 44: headscale.v1.BackfillNodeIPsResponse
(*GetRoutesResponse)(nil), // 45: headscale.v1.GetRoutesResponse
(*EnableRouteResponse)(nil), // 46: headscale.v1.EnableRouteResponse
(*DisableRouteResponse)(nil), // 47: headscale.v1.DisableRouteResponse
(*GetNodeRoutesResponse)(nil), // 48: headscale.v1.GetNodeRoutesResponse
(*DeleteRouteResponse)(nil), // 49: headscale.v1.DeleteRouteResponse
(*CreateApiKeyResponse)(nil), // 50: headscale.v1.CreateApiKeyResponse
(*ExpireApiKeyResponse)(nil), // 51: headscale.v1.ExpireApiKeyResponse
(*ListApiKeysResponse)(nil), // 52: headscale.v1.ListApiKeysResponse
(*DeleteApiKeyResponse)(nil), // 53: headscale.v1.DeleteApiKeyResponse
(*GetPolicyResponse)(nil), // 54: headscale.v1.GetPolicyResponse
(*SetPolicyResponse)(nil), // 55: headscale.v1.SetPolicyResponse
}
var file_headscale_v1_headscale_proto_depIdxs = []int32{
0, // 0: headscale.v1.HeadscaleService.CreateUser:input_type -> headscale.v1.CreateUserRequest
@@ -119,46 +319,54 @@ var file_headscale_v1_headscale_proto_depIdxs = []int32{
7, // 7: headscale.v1.HeadscaleService.DebugCreateNode:input_type -> headscale.v1.DebugCreateNodeRequest
8, // 8: headscale.v1.HeadscaleService.GetNode:input_type -> headscale.v1.GetNodeRequest
9, // 9: headscale.v1.HeadscaleService.SetTags:input_type -> headscale.v1.SetTagsRequest
10, // 10: headscale.v1.HeadscaleService.SetApprovedRoutes:input_type -> headscale.v1.SetApprovedRoutesRequest
11, // 11: headscale.v1.HeadscaleService.RegisterNode:input_type -> headscale.v1.RegisterNodeRequest
12, // 12: headscale.v1.HeadscaleService.DeleteNode:input_type -> headscale.v1.DeleteNodeRequest
13, // 13: headscale.v1.HeadscaleService.ExpireNode:input_type -> headscale.v1.ExpireNodeRequest
14, // 14: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest
15, // 15: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest
16, // 16: headscale.v1.HeadscaleService.MoveNode:input_type -> headscale.v1.MoveNodeRequest
17, // 17: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest
18, // 18: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
19, // 19: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
20, // 20: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
21, // 21: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest
22, // 22: headscale.v1.HeadscaleService.GetPolicy:input_type -> headscale.v1.GetPolicyRequest
23, // 23: headscale.v1.HeadscaleService.SetPolicy:input_type -> headscale.v1.SetPolicyRequest
24, // 24: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
25, // 25: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
26, // 26: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
27, // 27: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
28, // 28: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
29, // 29: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
30, // 30: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
31, // 31: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
32, // 32: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
33, // 33: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
34, // 34: headscale.v1.HeadscaleService.SetApprovedRoutes:output_type -> headscale.v1.SetApprovedRoutesResponse
35, // 35: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
36, // 36: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
37, // 37: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
38, // 38: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
39, // 39: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
40, // 40: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse
41, // 41: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse
42, // 42: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
43, // 43: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
44, // 44: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
45, // 45: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse
46, // 46: headscale.v1.HeadscaleService.GetPolicy:output_type -> headscale.v1.GetPolicyResponse
47, // 47: headscale.v1.HeadscaleService.SetPolicy:output_type -> headscale.v1.SetPolicyResponse
24, // [24:48] is the sub-list for method output_type
0, // [0:24] is the sub-list for method input_type
10, // 10: headscale.v1.HeadscaleService.RegisterNode:input_type -> headscale.v1.RegisterNodeRequest
11, // 11: headscale.v1.HeadscaleService.DeleteNode:input_type -> headscale.v1.DeleteNodeRequest
12, // 12: headscale.v1.HeadscaleService.ExpireNode:input_type -> headscale.v1.ExpireNodeRequest
13, // 13: headscale.v1.HeadscaleService.RenameNode:input_type -> headscale.v1.RenameNodeRequest
14, // 14: headscale.v1.HeadscaleService.ListNodes:input_type -> headscale.v1.ListNodesRequest
15, // 15: headscale.v1.HeadscaleService.MoveNode:input_type -> headscale.v1.MoveNodeRequest
16, // 16: headscale.v1.HeadscaleService.BackfillNodeIPs:input_type -> headscale.v1.BackfillNodeIPsRequest
17, // 17: headscale.v1.HeadscaleService.GetRoutes:input_type -> headscale.v1.GetRoutesRequest
18, // 18: headscale.v1.HeadscaleService.EnableRoute:input_type -> headscale.v1.EnableRouteRequest
19, // 19: headscale.v1.HeadscaleService.DisableRoute:input_type -> headscale.v1.DisableRouteRequest
20, // 20: headscale.v1.HeadscaleService.GetNodeRoutes:input_type -> headscale.v1.GetNodeRoutesRequest
21, // 21: headscale.v1.HeadscaleService.DeleteRoute:input_type -> headscale.v1.DeleteRouteRequest
22, // 22: headscale.v1.HeadscaleService.CreateApiKey:input_type -> headscale.v1.CreateApiKeyRequest
23, // 23: headscale.v1.HeadscaleService.ExpireApiKey:input_type -> headscale.v1.ExpireApiKeyRequest
24, // 24: headscale.v1.HeadscaleService.ListApiKeys:input_type -> headscale.v1.ListApiKeysRequest
25, // 25: headscale.v1.HeadscaleService.DeleteApiKey:input_type -> headscale.v1.DeleteApiKeyRequest
26, // 26: headscale.v1.HeadscaleService.GetPolicy:input_type -> headscale.v1.GetPolicyRequest
27, // 27: headscale.v1.HeadscaleService.SetPolicy:input_type -> headscale.v1.SetPolicyRequest
28, // 28: headscale.v1.HeadscaleService.CreateUser:output_type -> headscale.v1.CreateUserResponse
29, // 29: headscale.v1.HeadscaleService.RenameUser:output_type -> headscale.v1.RenameUserResponse
30, // 30: headscale.v1.HeadscaleService.DeleteUser:output_type -> headscale.v1.DeleteUserResponse
31, // 31: headscale.v1.HeadscaleService.ListUsers:output_type -> headscale.v1.ListUsersResponse
32, // 32: headscale.v1.HeadscaleService.CreatePreAuthKey:output_type -> headscale.v1.CreatePreAuthKeyResponse
33, // 33: headscale.v1.HeadscaleService.ExpirePreAuthKey:output_type -> headscale.v1.ExpirePreAuthKeyResponse
34, // 34: headscale.v1.HeadscaleService.ListPreAuthKeys:output_type -> headscale.v1.ListPreAuthKeysResponse
35, // 35: headscale.v1.HeadscaleService.DebugCreateNode:output_type -> headscale.v1.DebugCreateNodeResponse
36, // 36: headscale.v1.HeadscaleService.GetNode:output_type -> headscale.v1.GetNodeResponse
37, // 37: headscale.v1.HeadscaleService.SetTags:output_type -> headscale.v1.SetTagsResponse
38, // 38: headscale.v1.HeadscaleService.RegisterNode:output_type -> headscale.v1.RegisterNodeResponse
39, // 39: headscale.v1.HeadscaleService.DeleteNode:output_type -> headscale.v1.DeleteNodeResponse
40, // 40: headscale.v1.HeadscaleService.ExpireNode:output_type -> headscale.v1.ExpireNodeResponse
41, // 41: headscale.v1.HeadscaleService.RenameNode:output_type -> headscale.v1.RenameNodeResponse
42, // 42: headscale.v1.HeadscaleService.ListNodes:output_type -> headscale.v1.ListNodesResponse
43, // 43: headscale.v1.HeadscaleService.MoveNode:output_type -> headscale.v1.MoveNodeResponse
44, // 44: headscale.v1.HeadscaleService.BackfillNodeIPs:output_type -> headscale.v1.BackfillNodeIPsResponse
45, // 45: headscale.v1.HeadscaleService.GetRoutes:output_type -> headscale.v1.GetRoutesResponse
46, // 46: headscale.v1.HeadscaleService.EnableRoute:output_type -> headscale.v1.EnableRouteResponse
47, // 47: headscale.v1.HeadscaleService.DisableRoute:output_type -> headscale.v1.DisableRouteResponse
48, // 48: headscale.v1.HeadscaleService.GetNodeRoutes:output_type -> headscale.v1.GetNodeRoutesResponse
49, // 49: headscale.v1.HeadscaleService.DeleteRoute:output_type -> headscale.v1.DeleteRouteResponse
50, // 50: headscale.v1.HeadscaleService.CreateApiKey:output_type -> headscale.v1.CreateApiKeyResponse
51, // 51: headscale.v1.HeadscaleService.ExpireApiKey:output_type -> headscale.v1.ExpireApiKeyResponse
52, // 52: headscale.v1.HeadscaleService.ListApiKeys:output_type -> headscale.v1.ListApiKeysResponse
53, // 53: headscale.v1.HeadscaleService.DeleteApiKey:output_type -> headscale.v1.DeleteApiKeyResponse
54, // 54: headscale.v1.HeadscaleService.GetPolicy:output_type -> headscale.v1.GetPolicyResponse
55, // 55: headscale.v1.HeadscaleService.SetPolicy:output_type -> headscale.v1.SetPolicyResponse
28, // [28:56] is the sub-list for method output_type
0, // [0:28] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
@@ -172,13 +380,14 @@ func file_headscale_v1_headscale_proto_init() {
file_headscale_v1_user_proto_init()
file_headscale_v1_preauthkey_proto_init()
file_headscale_v1_node_proto_init()
file_headscale_v1_routes_proto_init()
file_headscale_v1_apikey_proto_init()
file_headscale_v1_policy_proto_init()
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_headscale_proto_rawDesc), len(file_headscale_v1_headscale_proto_rawDesc)),
RawDescriptor: file_headscale_v1_headscale_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
@@ -188,6 +397,7 @@ func file_headscale_v1_headscale_proto_init() {
DependencyIndexes: file_headscale_v1_headscale_proto_depIdxs,
}.Build()
File_headscale_v1_headscale_proto = out.File
file_headscale_v1_headscale_proto_rawDesc = nil
file_headscale_v1_headscale_proto_goTypes = nil
file_headscale_v1_headscale_proto_depIdxs = nil
}

View File

@@ -361,48 +361,6 @@ func local_request_HeadscaleService_SetTags_0(ctx context.Context, marshaler run
return msg, metadata, err
}
func request_HeadscaleService_SetApprovedRoutes_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq SetApprovedRoutesRequest
metadata runtime.ServerMetadata
err error
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
protoReq.NodeId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
msg, err := client.SetApprovedRoutes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_SetApprovedRoutes_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq SetApprovedRoutesRequest
metadata runtime.ServerMetadata
err error
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
protoReq.NodeId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
msg, err := server.SetApprovedRoutes(ctx, &protoReq)
return msg, metadata, err
}
var filter_HeadscaleService_RegisterNode_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_HeadscaleService_RegisterNode_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
@@ -665,6 +623,168 @@ func local_request_HeadscaleService_BackfillNodeIPs_0(ctx context.Context, marsh
return msg, metadata, err
}
func request_HeadscaleService_GetRoutes_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq GetRoutesRequest
metadata runtime.ServerMetadata
)
msg, err := client.GetRoutes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_GetRoutes_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq GetRoutesRequest
metadata runtime.ServerMetadata
)
msg, err := server.GetRoutes(ctx, &protoReq)
return msg, metadata, err
}
func request_HeadscaleService_EnableRoute_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq EnableRouteRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["route_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "route_id")
}
protoReq.RouteId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "route_id", err)
}
msg, err := client.EnableRoute(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_EnableRoute_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq EnableRouteRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["route_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "route_id")
}
protoReq.RouteId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "route_id", err)
}
msg, err := server.EnableRoute(ctx, &protoReq)
return msg, metadata, err
}
func request_HeadscaleService_DisableRoute_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq DisableRouteRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["route_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "route_id")
}
protoReq.RouteId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "route_id", err)
}
msg, err := client.DisableRoute(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_DisableRoute_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq DisableRouteRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["route_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "route_id")
}
protoReq.RouteId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "route_id", err)
}
msg, err := server.DisableRoute(ctx, &protoReq)
return msg, metadata, err
}
func request_HeadscaleService_GetNodeRoutes_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq GetNodeRoutesRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
protoReq.NodeId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
msg, err := client.GetNodeRoutes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_GetNodeRoutes_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq GetNodeRoutesRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
protoReq.NodeId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
msg, err := server.GetNodeRoutes(ctx, &protoReq)
return msg, metadata, err
}
func request_HeadscaleService_DeleteRoute_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq DeleteRouteRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["route_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "route_id")
}
protoReq.RouteId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "route_id", err)
}
msg, err := client.DeleteRoute(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HeadscaleService_DeleteRoute_0(ctx context.Context, marshaler runtime.Marshaler, server HeadscaleServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq DeleteRouteRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["route_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "route_id")
}
protoReq.RouteId, err = runtime.Uint64(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "route_id", err)
}
msg, err := server.DeleteRoute(ctx, &protoReq)
return msg, metadata, err
}
func request_HeadscaleService_CreateApiKey_0(ctx context.Context, marshaler runtime.Marshaler, client HeadscaleServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq CreateApiKeyRequest
@@ -1015,26 +1135,6 @@ func RegisterHeadscaleServiceHandlerServer(ctx context.Context, mux *runtime.Ser
}
forward_HeadscaleService_SetTags_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_HeadscaleService_SetApprovedRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/SetApprovedRoutes", runtime.WithHTTPPathPattern("/api/v1/node/{node_id}/approve_routes"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_HeadscaleService_SetApprovedRoutes_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_HeadscaleService_SetApprovedRoutes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_HeadscaleService_RegisterNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
@@ -1175,6 +1275,106 @@ func RegisterHeadscaleServiceHandlerServer(ctx context.Context, mux *runtime.Ser
}
forward_HeadscaleService_BackfillNodeIPs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodGet, pattern_HeadscaleService_GetRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/GetRoutes", runtime.WithHTTPPathPattern("/api/v1/routes"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_HeadscaleService_GetRoutes_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_HeadscaleService_GetRoutes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_HeadscaleService_EnableRoute_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/EnableRoute", runtime.WithHTTPPathPattern("/api/v1/routes/{route_id}/enable"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_HeadscaleService_EnableRoute_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_HeadscaleService_EnableRoute_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_HeadscaleService_DisableRoute_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/DisableRoute", runtime.WithHTTPPathPattern("/api/v1/routes/{route_id}/disable"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_HeadscaleService_DisableRoute_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_HeadscaleService_DisableRoute_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodGet, pattern_HeadscaleService_GetNodeRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/GetNodeRoutes", runtime.WithHTTPPathPattern("/api/v1/node/{node_id}/routes"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_HeadscaleService_GetNodeRoutes_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_HeadscaleService_GetNodeRoutes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodDelete, pattern_HeadscaleService_DeleteRoute_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/headscale.v1.HeadscaleService/DeleteRoute", runtime.WithHTTPPathPattern("/api/v1/routes/{route_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_HeadscaleService_DeleteRoute_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_HeadscaleService_DeleteRoute_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_HeadscaleService_CreateApiKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
@@ -1505,23 +1705,6 @@ func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.Ser
}
forward_HeadscaleService_SetTags_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_HeadscaleService_SetApprovedRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/SetApprovedRoutes", runtime.WithHTTPPathPattern("/api/v1/node/{node_id}/approve_routes"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_HeadscaleService_SetApprovedRoutes_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_HeadscaleService_SetApprovedRoutes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_HeadscaleService_RegisterNode_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
@@ -1641,6 +1824,91 @@ func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.Ser
}
forward_HeadscaleService_BackfillNodeIPs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodGet, pattern_HeadscaleService_GetRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/GetRoutes", runtime.WithHTTPPathPattern("/api/v1/routes"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_HeadscaleService_GetRoutes_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_HeadscaleService_GetRoutes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_HeadscaleService_EnableRoute_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/EnableRoute", runtime.WithHTTPPathPattern("/api/v1/routes/{route_id}/enable"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_HeadscaleService_EnableRoute_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_HeadscaleService_EnableRoute_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_HeadscaleService_DisableRoute_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/DisableRoute", runtime.WithHTTPPathPattern("/api/v1/routes/{route_id}/disable"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_HeadscaleService_DisableRoute_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_HeadscaleService_DisableRoute_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodGet, pattern_HeadscaleService_GetNodeRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/GetNodeRoutes", runtime.WithHTTPPathPattern("/api/v1/node/{node_id}/routes"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_HeadscaleService_GetNodeRoutes_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_HeadscaleService_GetNodeRoutes_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodDelete, pattern_HeadscaleService_DeleteRoute_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/headscale.v1.HeadscaleService/DeleteRoute", runtime.WithHTTPPathPattern("/api/v1/routes/{route_id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_HeadscaleService_DeleteRoute_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_HeadscaleService_DeleteRoute_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_HeadscaleService_CreateApiKey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
@@ -1747,55 +2015,63 @@ func RegisterHeadscaleServiceHandlerClient(ctx context.Context, mux *runtime.Ser
}
var (
pattern_HeadscaleService_CreateUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "user"}, ""))
pattern_HeadscaleService_RenameUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "user", "old_id", "rename", "new_name"}, ""))
pattern_HeadscaleService_DeleteUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "user", "id"}, ""))
pattern_HeadscaleService_ListUsers_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "user"}, ""))
pattern_HeadscaleService_CreatePreAuthKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "preauthkey"}, ""))
pattern_HeadscaleService_ExpirePreAuthKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "preauthkey", "expire"}, ""))
pattern_HeadscaleService_ListPreAuthKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "preauthkey"}, ""))
pattern_HeadscaleService_DebugCreateNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "debug", "node"}, ""))
pattern_HeadscaleService_GetNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "node", "node_id"}, ""))
pattern_HeadscaleService_SetTags_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "tags"}, ""))
pattern_HeadscaleService_SetApprovedRoutes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "approve_routes"}, ""))
pattern_HeadscaleService_RegisterNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "node", "register"}, ""))
pattern_HeadscaleService_DeleteNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "node", "node_id"}, ""))
pattern_HeadscaleService_ExpireNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "expire"}, ""))
pattern_HeadscaleService_RenameNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "node", "node_id", "rename", "new_name"}, ""))
pattern_HeadscaleService_ListNodes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "node"}, ""))
pattern_HeadscaleService_MoveNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "user"}, ""))
pattern_HeadscaleService_BackfillNodeIPs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "node", "backfillips"}, ""))
pattern_HeadscaleService_CreateApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "apikey"}, ""))
pattern_HeadscaleService_ExpireApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "apikey", "expire"}, ""))
pattern_HeadscaleService_ListApiKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "apikey"}, ""))
pattern_HeadscaleService_DeleteApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "apikey", "prefix"}, ""))
pattern_HeadscaleService_GetPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "policy"}, ""))
pattern_HeadscaleService_SetPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "policy"}, ""))
pattern_HeadscaleService_CreateUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "user"}, ""))
pattern_HeadscaleService_RenameUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "user", "old_id", "rename", "new_name"}, ""))
pattern_HeadscaleService_DeleteUser_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "user", "id"}, ""))
pattern_HeadscaleService_ListUsers_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "user"}, ""))
pattern_HeadscaleService_CreatePreAuthKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "preauthkey"}, ""))
pattern_HeadscaleService_ExpirePreAuthKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "preauthkey", "expire"}, ""))
pattern_HeadscaleService_ListPreAuthKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "preauthkey"}, ""))
pattern_HeadscaleService_DebugCreateNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "debug", "node"}, ""))
pattern_HeadscaleService_GetNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "node", "node_id"}, ""))
pattern_HeadscaleService_SetTags_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "tags"}, ""))
pattern_HeadscaleService_RegisterNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "node", "register"}, ""))
pattern_HeadscaleService_DeleteNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "node", "node_id"}, ""))
pattern_HeadscaleService_ExpireNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "expire"}, ""))
pattern_HeadscaleService_RenameNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"api", "v1", "node", "node_id", "rename", "new_name"}, ""))
pattern_HeadscaleService_ListNodes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "node"}, ""))
pattern_HeadscaleService_MoveNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "user"}, ""))
pattern_HeadscaleService_BackfillNodeIPs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "node", "backfillips"}, ""))
pattern_HeadscaleService_GetRoutes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "routes"}, ""))
pattern_HeadscaleService_EnableRoute_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "routes", "route_id", "enable"}, ""))
pattern_HeadscaleService_DisableRoute_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "routes", "route_id", "disable"}, ""))
pattern_HeadscaleService_GetNodeRoutes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"api", "v1", "node", "node_id", "routes"}, ""))
pattern_HeadscaleService_DeleteRoute_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "routes", "route_id"}, ""))
pattern_HeadscaleService_CreateApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "apikey"}, ""))
pattern_HeadscaleService_ExpireApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"api", "v1", "apikey", "expire"}, ""))
pattern_HeadscaleService_ListApiKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "apikey"}, ""))
pattern_HeadscaleService_DeleteApiKey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"api", "v1", "apikey", "prefix"}, ""))
pattern_HeadscaleService_GetPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "policy"}, ""))
pattern_HeadscaleService_SetPolicy_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "policy"}, ""))
)
var (
forward_HeadscaleService_CreateUser_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_RenameUser_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_DeleteUser_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_ListUsers_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_CreatePreAuthKey_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_ExpirePreAuthKey_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_ListPreAuthKeys_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_DebugCreateNode_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_GetNode_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_SetTags_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_SetApprovedRoutes_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_RegisterNode_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_DeleteNode_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_ExpireNode_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_RenameNode_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_ListNodes_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_MoveNode_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_BackfillNodeIPs_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_CreateApiKey_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_ExpireApiKey_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_ListApiKeys_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_DeleteApiKey_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_GetPolicy_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_SetPolicy_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_CreateUser_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_RenameUser_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_DeleteUser_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_ListUsers_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_CreatePreAuthKey_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_ExpirePreAuthKey_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_ListPreAuthKeys_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_DebugCreateNode_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_GetNode_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_SetTags_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_RegisterNode_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_DeleteNode_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_ExpireNode_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_RenameNode_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_ListNodes_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_MoveNode_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_BackfillNodeIPs_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_GetRoutes_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_EnableRoute_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_DisableRoute_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_GetNodeRoutes_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_DeleteRoute_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_CreateApiKey_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_ExpireApiKey_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_ListApiKeys_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_DeleteApiKey_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_GetPolicy_0 = runtime.ForwardResponseMessage
forward_HeadscaleService_SetPolicy_0 = runtime.ForwardResponseMessage
)

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc-gen-go-grpc v1.3.0
// - protoc (unknown)
// source: headscale/v1/headscale.proto
@@ -15,34 +15,38 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
const (
HeadscaleService_CreateUser_FullMethodName = "/headscale.v1.HeadscaleService/CreateUser"
HeadscaleService_RenameUser_FullMethodName = "/headscale.v1.HeadscaleService/RenameUser"
HeadscaleService_DeleteUser_FullMethodName = "/headscale.v1.HeadscaleService/DeleteUser"
HeadscaleService_ListUsers_FullMethodName = "/headscale.v1.HeadscaleService/ListUsers"
HeadscaleService_CreatePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/CreatePreAuthKey"
HeadscaleService_ExpirePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpirePreAuthKey"
HeadscaleService_ListPreAuthKeys_FullMethodName = "/headscale.v1.HeadscaleService/ListPreAuthKeys"
HeadscaleService_DebugCreateNode_FullMethodName = "/headscale.v1.HeadscaleService/DebugCreateNode"
HeadscaleService_GetNode_FullMethodName = "/headscale.v1.HeadscaleService/GetNode"
HeadscaleService_SetTags_FullMethodName = "/headscale.v1.HeadscaleService/SetTags"
HeadscaleService_SetApprovedRoutes_FullMethodName = "/headscale.v1.HeadscaleService/SetApprovedRoutes"
HeadscaleService_RegisterNode_FullMethodName = "/headscale.v1.HeadscaleService/RegisterNode"
HeadscaleService_DeleteNode_FullMethodName = "/headscale.v1.HeadscaleService/DeleteNode"
HeadscaleService_ExpireNode_FullMethodName = "/headscale.v1.HeadscaleService/ExpireNode"
HeadscaleService_RenameNode_FullMethodName = "/headscale.v1.HeadscaleService/RenameNode"
HeadscaleService_ListNodes_FullMethodName = "/headscale.v1.HeadscaleService/ListNodes"
HeadscaleService_MoveNode_FullMethodName = "/headscale.v1.HeadscaleService/MoveNode"
HeadscaleService_BackfillNodeIPs_FullMethodName = "/headscale.v1.HeadscaleService/BackfillNodeIPs"
HeadscaleService_CreateApiKey_FullMethodName = "/headscale.v1.HeadscaleService/CreateApiKey"
HeadscaleService_ExpireApiKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpireApiKey"
HeadscaleService_ListApiKeys_FullMethodName = "/headscale.v1.HeadscaleService/ListApiKeys"
HeadscaleService_DeleteApiKey_FullMethodName = "/headscale.v1.HeadscaleService/DeleteApiKey"
HeadscaleService_GetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/GetPolicy"
HeadscaleService_SetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/SetPolicy"
HeadscaleService_CreateUser_FullMethodName = "/headscale.v1.HeadscaleService/CreateUser"
HeadscaleService_RenameUser_FullMethodName = "/headscale.v1.HeadscaleService/RenameUser"
HeadscaleService_DeleteUser_FullMethodName = "/headscale.v1.HeadscaleService/DeleteUser"
HeadscaleService_ListUsers_FullMethodName = "/headscale.v1.HeadscaleService/ListUsers"
HeadscaleService_CreatePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/CreatePreAuthKey"
HeadscaleService_ExpirePreAuthKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpirePreAuthKey"
HeadscaleService_ListPreAuthKeys_FullMethodName = "/headscale.v1.HeadscaleService/ListPreAuthKeys"
HeadscaleService_DebugCreateNode_FullMethodName = "/headscale.v1.HeadscaleService/DebugCreateNode"
HeadscaleService_GetNode_FullMethodName = "/headscale.v1.HeadscaleService/GetNode"
HeadscaleService_SetTags_FullMethodName = "/headscale.v1.HeadscaleService/SetTags"
HeadscaleService_RegisterNode_FullMethodName = "/headscale.v1.HeadscaleService/RegisterNode"
HeadscaleService_DeleteNode_FullMethodName = "/headscale.v1.HeadscaleService/DeleteNode"
HeadscaleService_ExpireNode_FullMethodName = "/headscale.v1.HeadscaleService/ExpireNode"
HeadscaleService_RenameNode_FullMethodName = "/headscale.v1.HeadscaleService/RenameNode"
HeadscaleService_ListNodes_FullMethodName = "/headscale.v1.HeadscaleService/ListNodes"
HeadscaleService_MoveNode_FullMethodName = "/headscale.v1.HeadscaleService/MoveNode"
HeadscaleService_BackfillNodeIPs_FullMethodName = "/headscale.v1.HeadscaleService/BackfillNodeIPs"
HeadscaleService_GetRoutes_FullMethodName = "/headscale.v1.HeadscaleService/GetRoutes"
HeadscaleService_EnableRoute_FullMethodName = "/headscale.v1.HeadscaleService/EnableRoute"
HeadscaleService_DisableRoute_FullMethodName = "/headscale.v1.HeadscaleService/DisableRoute"
HeadscaleService_GetNodeRoutes_FullMethodName = "/headscale.v1.HeadscaleService/GetNodeRoutes"
HeadscaleService_DeleteRoute_FullMethodName = "/headscale.v1.HeadscaleService/DeleteRoute"
HeadscaleService_CreateApiKey_FullMethodName = "/headscale.v1.HeadscaleService/CreateApiKey"
HeadscaleService_ExpireApiKey_FullMethodName = "/headscale.v1.HeadscaleService/ExpireApiKey"
HeadscaleService_ListApiKeys_FullMethodName = "/headscale.v1.HeadscaleService/ListApiKeys"
HeadscaleService_DeleteApiKey_FullMethodName = "/headscale.v1.HeadscaleService/DeleteApiKey"
HeadscaleService_GetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/GetPolicy"
HeadscaleService_SetPolicy_FullMethodName = "/headscale.v1.HeadscaleService/SetPolicy"
)
// HeadscaleServiceClient is the client API for HeadscaleService service.
@@ -62,7 +66,6 @@ type HeadscaleServiceClient interface {
DebugCreateNode(ctx context.Context, in *DebugCreateNodeRequest, opts ...grpc.CallOption) (*DebugCreateNodeResponse, error)
GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error)
SetTags(ctx context.Context, in *SetTagsRequest, opts ...grpc.CallOption) (*SetTagsResponse, error)
SetApprovedRoutes(ctx context.Context, in *SetApprovedRoutesRequest, opts ...grpc.CallOption) (*SetApprovedRoutesResponse, error)
RegisterNode(ctx context.Context, in *RegisterNodeRequest, opts ...grpc.CallOption) (*RegisterNodeResponse, error)
DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*DeleteNodeResponse, error)
ExpireNode(ctx context.Context, in *ExpireNodeRequest, opts ...grpc.CallOption) (*ExpireNodeResponse, error)
@@ -70,6 +73,12 @@ type HeadscaleServiceClient interface {
ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error)
MoveNode(ctx context.Context, in *MoveNodeRequest, opts ...grpc.CallOption) (*MoveNodeResponse, error)
BackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error)
// --- Route start ---
GetRoutes(ctx context.Context, in *GetRoutesRequest, opts ...grpc.CallOption) (*GetRoutesResponse, error)
EnableRoute(ctx context.Context, in *EnableRouteRequest, opts ...grpc.CallOption) (*EnableRouteResponse, error)
DisableRoute(ctx context.Context, in *DisableRouteRequest, opts ...grpc.CallOption) (*DisableRouteResponse, error)
GetNodeRoutes(ctx context.Context, in *GetNodeRoutesRequest, opts ...grpc.CallOption) (*GetNodeRoutesResponse, error)
DeleteRoute(ctx context.Context, in *DeleteRouteRequest, opts ...grpc.CallOption) (*DeleteRouteResponse, error)
// --- ApiKeys start ---
CreateApiKey(ctx context.Context, in *CreateApiKeyRequest, opts ...grpc.CallOption) (*CreateApiKeyResponse, error)
ExpireApiKey(ctx context.Context, in *ExpireApiKeyRequest, opts ...grpc.CallOption) (*ExpireApiKeyResponse, error)
@@ -89,9 +98,8 @@ func NewHeadscaleServiceClient(cc grpc.ClientConnInterface) HeadscaleServiceClie
}
func (c *headscaleServiceClient) CreateUser(ctx context.Context, in *CreateUserRequest, opts ...grpc.CallOption) (*CreateUserResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CreateUserResponse)
err := c.cc.Invoke(ctx, HeadscaleService_CreateUser_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_CreateUser_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -99,9 +107,8 @@ func (c *headscaleServiceClient) CreateUser(ctx context.Context, in *CreateUserR
}
func (c *headscaleServiceClient) RenameUser(ctx context.Context, in *RenameUserRequest, opts ...grpc.CallOption) (*RenameUserResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(RenameUserResponse)
err := c.cc.Invoke(ctx, HeadscaleService_RenameUser_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_RenameUser_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -109,9 +116,8 @@ func (c *headscaleServiceClient) RenameUser(ctx context.Context, in *RenameUserR
}
func (c *headscaleServiceClient) DeleteUser(ctx context.Context, in *DeleteUserRequest, opts ...grpc.CallOption) (*DeleteUserResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DeleteUserResponse)
err := c.cc.Invoke(ctx, HeadscaleService_DeleteUser_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_DeleteUser_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -119,9 +125,8 @@ func (c *headscaleServiceClient) DeleteUser(ctx context.Context, in *DeleteUserR
}
func (c *headscaleServiceClient) ListUsers(ctx context.Context, in *ListUsersRequest, opts ...grpc.CallOption) (*ListUsersResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ListUsersResponse)
err := c.cc.Invoke(ctx, HeadscaleService_ListUsers_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_ListUsers_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -129,9 +134,8 @@ func (c *headscaleServiceClient) ListUsers(ctx context.Context, in *ListUsersReq
}
func (c *headscaleServiceClient) CreatePreAuthKey(ctx context.Context, in *CreatePreAuthKeyRequest, opts ...grpc.CallOption) (*CreatePreAuthKeyResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CreatePreAuthKeyResponse)
err := c.cc.Invoke(ctx, HeadscaleService_CreatePreAuthKey_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_CreatePreAuthKey_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -139,9 +143,8 @@ func (c *headscaleServiceClient) CreatePreAuthKey(ctx context.Context, in *Creat
}
func (c *headscaleServiceClient) ExpirePreAuthKey(ctx context.Context, in *ExpirePreAuthKeyRequest, opts ...grpc.CallOption) (*ExpirePreAuthKeyResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ExpirePreAuthKeyResponse)
err := c.cc.Invoke(ctx, HeadscaleService_ExpirePreAuthKey_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_ExpirePreAuthKey_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -149,9 +152,8 @@ func (c *headscaleServiceClient) ExpirePreAuthKey(ctx context.Context, in *Expir
}
func (c *headscaleServiceClient) ListPreAuthKeys(ctx context.Context, in *ListPreAuthKeysRequest, opts ...grpc.CallOption) (*ListPreAuthKeysResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ListPreAuthKeysResponse)
err := c.cc.Invoke(ctx, HeadscaleService_ListPreAuthKeys_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_ListPreAuthKeys_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -159,9 +161,8 @@ func (c *headscaleServiceClient) ListPreAuthKeys(ctx context.Context, in *ListPr
}
func (c *headscaleServiceClient) DebugCreateNode(ctx context.Context, in *DebugCreateNodeRequest, opts ...grpc.CallOption) (*DebugCreateNodeResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DebugCreateNodeResponse)
err := c.cc.Invoke(ctx, HeadscaleService_DebugCreateNode_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_DebugCreateNode_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -169,9 +170,8 @@ func (c *headscaleServiceClient) DebugCreateNode(ctx context.Context, in *DebugC
}
func (c *headscaleServiceClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetNodeResponse)
err := c.cc.Invoke(ctx, HeadscaleService_GetNode_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_GetNode_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -179,19 +179,8 @@ func (c *headscaleServiceClient) GetNode(ctx context.Context, in *GetNodeRequest
}
func (c *headscaleServiceClient) SetTags(ctx context.Context, in *SetTagsRequest, opts ...grpc.CallOption) (*SetTagsResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(SetTagsResponse)
err := c.cc.Invoke(ctx, HeadscaleService_SetTags_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) SetApprovedRoutes(ctx context.Context, in *SetApprovedRoutesRequest, opts ...grpc.CallOption) (*SetApprovedRoutesResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(SetApprovedRoutesResponse)
err := c.cc.Invoke(ctx, HeadscaleService_SetApprovedRoutes_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_SetTags_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -199,9 +188,8 @@ func (c *headscaleServiceClient) SetApprovedRoutes(ctx context.Context, in *SetA
}
func (c *headscaleServiceClient) RegisterNode(ctx context.Context, in *RegisterNodeRequest, opts ...grpc.CallOption) (*RegisterNodeResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(RegisterNodeResponse)
err := c.cc.Invoke(ctx, HeadscaleService_RegisterNode_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_RegisterNode_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -209,9 +197,8 @@ func (c *headscaleServiceClient) RegisterNode(ctx context.Context, in *RegisterN
}
func (c *headscaleServiceClient) DeleteNode(ctx context.Context, in *DeleteNodeRequest, opts ...grpc.CallOption) (*DeleteNodeResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DeleteNodeResponse)
err := c.cc.Invoke(ctx, HeadscaleService_DeleteNode_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_DeleteNode_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -219,9 +206,8 @@ func (c *headscaleServiceClient) DeleteNode(ctx context.Context, in *DeleteNodeR
}
func (c *headscaleServiceClient) ExpireNode(ctx context.Context, in *ExpireNodeRequest, opts ...grpc.CallOption) (*ExpireNodeResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ExpireNodeResponse)
err := c.cc.Invoke(ctx, HeadscaleService_ExpireNode_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_ExpireNode_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -229,9 +215,8 @@ func (c *headscaleServiceClient) ExpireNode(ctx context.Context, in *ExpireNodeR
}
func (c *headscaleServiceClient) RenameNode(ctx context.Context, in *RenameNodeRequest, opts ...grpc.CallOption) (*RenameNodeResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(RenameNodeResponse)
err := c.cc.Invoke(ctx, HeadscaleService_RenameNode_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_RenameNode_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -239,9 +224,8 @@ func (c *headscaleServiceClient) RenameNode(ctx context.Context, in *RenameNodeR
}
func (c *headscaleServiceClient) ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ListNodesResponse)
err := c.cc.Invoke(ctx, HeadscaleService_ListNodes_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_ListNodes_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -249,9 +233,8 @@ func (c *headscaleServiceClient) ListNodes(ctx context.Context, in *ListNodesReq
}
func (c *headscaleServiceClient) MoveNode(ctx context.Context, in *MoveNodeRequest, opts ...grpc.CallOption) (*MoveNodeResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(MoveNodeResponse)
err := c.cc.Invoke(ctx, HeadscaleService_MoveNode_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_MoveNode_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -259,9 +242,53 @@ func (c *headscaleServiceClient) MoveNode(ctx context.Context, in *MoveNodeReque
}
func (c *headscaleServiceClient) BackfillNodeIPs(ctx context.Context, in *BackfillNodeIPsRequest, opts ...grpc.CallOption) (*BackfillNodeIPsResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(BackfillNodeIPsResponse)
err := c.cc.Invoke(ctx, HeadscaleService_BackfillNodeIPs_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_BackfillNodeIPs_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) GetRoutes(ctx context.Context, in *GetRoutesRequest, opts ...grpc.CallOption) (*GetRoutesResponse, error) {
out := new(GetRoutesResponse)
err := c.cc.Invoke(ctx, HeadscaleService_GetRoutes_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) EnableRoute(ctx context.Context, in *EnableRouteRequest, opts ...grpc.CallOption) (*EnableRouteResponse, error) {
out := new(EnableRouteResponse)
err := c.cc.Invoke(ctx, HeadscaleService_EnableRoute_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) DisableRoute(ctx context.Context, in *DisableRouteRequest, opts ...grpc.CallOption) (*DisableRouteResponse, error) {
out := new(DisableRouteResponse)
err := c.cc.Invoke(ctx, HeadscaleService_DisableRoute_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) GetNodeRoutes(ctx context.Context, in *GetNodeRoutesRequest, opts ...grpc.CallOption) (*GetNodeRoutesResponse, error) {
out := new(GetNodeRoutesResponse)
err := c.cc.Invoke(ctx, HeadscaleService_GetNodeRoutes_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *headscaleServiceClient) DeleteRoute(ctx context.Context, in *DeleteRouteRequest, opts ...grpc.CallOption) (*DeleteRouteResponse, error) {
out := new(DeleteRouteResponse)
err := c.cc.Invoke(ctx, HeadscaleService_DeleteRoute_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -269,9 +296,8 @@ func (c *headscaleServiceClient) BackfillNodeIPs(ctx context.Context, in *Backfi
}
func (c *headscaleServiceClient) CreateApiKey(ctx context.Context, in *CreateApiKeyRequest, opts ...grpc.CallOption) (*CreateApiKeyResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CreateApiKeyResponse)
err := c.cc.Invoke(ctx, HeadscaleService_CreateApiKey_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_CreateApiKey_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -279,9 +305,8 @@ func (c *headscaleServiceClient) CreateApiKey(ctx context.Context, in *CreateApi
}
func (c *headscaleServiceClient) ExpireApiKey(ctx context.Context, in *ExpireApiKeyRequest, opts ...grpc.CallOption) (*ExpireApiKeyResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ExpireApiKeyResponse)
err := c.cc.Invoke(ctx, HeadscaleService_ExpireApiKey_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_ExpireApiKey_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -289,9 +314,8 @@ func (c *headscaleServiceClient) ExpireApiKey(ctx context.Context, in *ExpireApi
}
func (c *headscaleServiceClient) ListApiKeys(ctx context.Context, in *ListApiKeysRequest, opts ...grpc.CallOption) (*ListApiKeysResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ListApiKeysResponse)
err := c.cc.Invoke(ctx, HeadscaleService_ListApiKeys_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_ListApiKeys_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -299,9 +323,8 @@ func (c *headscaleServiceClient) ListApiKeys(ctx context.Context, in *ListApiKey
}
func (c *headscaleServiceClient) DeleteApiKey(ctx context.Context, in *DeleteApiKeyRequest, opts ...grpc.CallOption) (*DeleteApiKeyResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DeleteApiKeyResponse)
err := c.cc.Invoke(ctx, HeadscaleService_DeleteApiKey_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_DeleteApiKey_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -309,9 +332,8 @@ func (c *headscaleServiceClient) DeleteApiKey(ctx context.Context, in *DeleteApi
}
func (c *headscaleServiceClient) GetPolicy(ctx context.Context, in *GetPolicyRequest, opts ...grpc.CallOption) (*GetPolicyResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetPolicyResponse)
err := c.cc.Invoke(ctx, HeadscaleService_GetPolicy_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_GetPolicy_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -319,9 +341,8 @@ func (c *headscaleServiceClient) GetPolicy(ctx context.Context, in *GetPolicyReq
}
func (c *headscaleServiceClient) SetPolicy(ctx context.Context, in *SetPolicyRequest, opts ...grpc.CallOption) (*SetPolicyResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(SetPolicyResponse)
err := c.cc.Invoke(ctx, HeadscaleService_SetPolicy_FullMethodName, in, out, cOpts...)
err := c.cc.Invoke(ctx, HeadscaleService_SetPolicy_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
@@ -330,7 +351,7 @@ func (c *headscaleServiceClient) SetPolicy(ctx context.Context, in *SetPolicyReq
// HeadscaleServiceServer is the server API for HeadscaleService service.
// All implementations must embed UnimplementedHeadscaleServiceServer
// for forward compatibility.
// for forward compatibility
type HeadscaleServiceServer interface {
// --- User start ---
CreateUser(context.Context, *CreateUserRequest) (*CreateUserResponse, error)
@@ -345,7 +366,6 @@ type HeadscaleServiceServer interface {
DebugCreateNode(context.Context, *DebugCreateNodeRequest) (*DebugCreateNodeResponse, error)
GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error)
SetTags(context.Context, *SetTagsRequest) (*SetTagsResponse, error)
SetApprovedRoutes(context.Context, *SetApprovedRoutesRequest) (*SetApprovedRoutesResponse, error)
RegisterNode(context.Context, *RegisterNodeRequest) (*RegisterNodeResponse, error)
DeleteNode(context.Context, *DeleteNodeRequest) (*DeleteNodeResponse, error)
ExpireNode(context.Context, *ExpireNodeRequest) (*ExpireNodeResponse, error)
@@ -353,6 +373,12 @@ type HeadscaleServiceServer interface {
ListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error)
MoveNode(context.Context, *MoveNodeRequest) (*MoveNodeResponse, error)
BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error)
// --- Route start ---
GetRoutes(context.Context, *GetRoutesRequest) (*GetRoutesResponse, error)
EnableRoute(context.Context, *EnableRouteRequest) (*EnableRouteResponse, error)
DisableRoute(context.Context, *DisableRouteRequest) (*DisableRouteResponse, error)
GetNodeRoutes(context.Context, *GetNodeRoutesRequest) (*GetNodeRoutesResponse, error)
DeleteRoute(context.Context, *DeleteRouteRequest) (*DeleteRouteResponse, error)
// --- ApiKeys start ---
CreateApiKey(context.Context, *CreateApiKeyRequest) (*CreateApiKeyResponse, error)
ExpireApiKey(context.Context, *ExpireApiKeyRequest) (*ExpireApiKeyResponse, error)
@@ -364,12 +390,9 @@ type HeadscaleServiceServer interface {
mustEmbedUnimplementedHeadscaleServiceServer()
}
// UnimplementedHeadscaleServiceServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedHeadscaleServiceServer struct{}
// UnimplementedHeadscaleServiceServer must be embedded to have forward compatible implementations.
type UnimplementedHeadscaleServiceServer struct {
}
func (UnimplementedHeadscaleServiceServer) CreateUser(context.Context, *CreateUserRequest) (*CreateUserResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method CreateUser not implemented")
@@ -401,9 +424,6 @@ func (UnimplementedHeadscaleServiceServer) GetNode(context.Context, *GetNodeRequ
func (UnimplementedHeadscaleServiceServer) SetTags(context.Context, *SetTagsRequest) (*SetTagsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SetTags not implemented")
}
func (UnimplementedHeadscaleServiceServer) SetApprovedRoutes(context.Context, *SetApprovedRoutesRequest) (*SetApprovedRoutesResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SetApprovedRoutes not implemented")
}
func (UnimplementedHeadscaleServiceServer) RegisterNode(context.Context, *RegisterNodeRequest) (*RegisterNodeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method RegisterNode not implemented")
}
@@ -425,6 +445,21 @@ func (UnimplementedHeadscaleServiceServer) MoveNode(context.Context, *MoveNodeRe
func (UnimplementedHeadscaleServiceServer) BackfillNodeIPs(context.Context, *BackfillNodeIPsRequest) (*BackfillNodeIPsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method BackfillNodeIPs not implemented")
}
func (UnimplementedHeadscaleServiceServer) GetRoutes(context.Context, *GetRoutesRequest) (*GetRoutesResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetRoutes not implemented")
}
func (UnimplementedHeadscaleServiceServer) EnableRoute(context.Context, *EnableRouteRequest) (*EnableRouteResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method EnableRoute not implemented")
}
func (UnimplementedHeadscaleServiceServer) DisableRoute(context.Context, *DisableRouteRequest) (*DisableRouteResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method DisableRoute not implemented")
}
func (UnimplementedHeadscaleServiceServer) GetNodeRoutes(context.Context, *GetNodeRoutesRequest) (*GetNodeRoutesResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetNodeRoutes not implemented")
}
func (UnimplementedHeadscaleServiceServer) DeleteRoute(context.Context, *DeleteRouteRequest) (*DeleteRouteResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteRoute not implemented")
}
func (UnimplementedHeadscaleServiceServer) CreateApiKey(context.Context, *CreateApiKeyRequest) (*CreateApiKeyResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method CreateApiKey not implemented")
}
@@ -444,7 +479,6 @@ func (UnimplementedHeadscaleServiceServer) SetPolicy(context.Context, *SetPolicy
return nil, status.Errorf(codes.Unimplemented, "method SetPolicy not implemented")
}
func (UnimplementedHeadscaleServiceServer) mustEmbedUnimplementedHeadscaleServiceServer() {}
func (UnimplementedHeadscaleServiceServer) testEmbeddedByValue() {}
// UnsafeHeadscaleServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to HeadscaleServiceServer will
@@ -454,13 +488,6 @@ type UnsafeHeadscaleServiceServer interface {
}
func RegisterHeadscaleServiceServer(s grpc.ServiceRegistrar, srv HeadscaleServiceServer) {
// If the following call pancis, it indicates UnimplementedHeadscaleServiceServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&HeadscaleService_ServiceDesc, srv)
}
@@ -644,24 +671,6 @@ func _HeadscaleService_SetTags_Handler(srv interface{}, ctx context.Context, dec
return interceptor(ctx, in, info, handler)
}
func _HeadscaleService_SetApprovedRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SetApprovedRoutesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HeadscaleServiceServer).SetApprovedRoutes(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: HeadscaleService_SetApprovedRoutes_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HeadscaleServiceServer).SetApprovedRoutes(ctx, req.(*SetApprovedRoutesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _HeadscaleService_RegisterNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(RegisterNodeRequest)
if err := dec(in); err != nil {
@@ -788,6 +797,96 @@ func _HeadscaleService_BackfillNodeIPs_Handler(srv interface{}, ctx context.Cont
return interceptor(ctx, in, info, handler)
}
func _HeadscaleService_GetRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetRoutesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HeadscaleServiceServer).GetRoutes(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: HeadscaleService_GetRoutes_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HeadscaleServiceServer).GetRoutes(ctx, req.(*GetRoutesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _HeadscaleService_EnableRoute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(EnableRouteRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HeadscaleServiceServer).EnableRoute(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: HeadscaleService_EnableRoute_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HeadscaleServiceServer).EnableRoute(ctx, req.(*EnableRouteRequest))
}
return interceptor(ctx, in, info, handler)
}
func _HeadscaleService_DisableRoute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DisableRouteRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HeadscaleServiceServer).DisableRoute(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: HeadscaleService_DisableRoute_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HeadscaleServiceServer).DisableRoute(ctx, req.(*DisableRouteRequest))
}
return interceptor(ctx, in, info, handler)
}
func _HeadscaleService_GetNodeRoutes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetNodeRoutesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HeadscaleServiceServer).GetNodeRoutes(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: HeadscaleService_GetNodeRoutes_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HeadscaleServiceServer).GetNodeRoutes(ctx, req.(*GetNodeRoutesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _HeadscaleService_DeleteRoute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteRouteRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HeadscaleServiceServer).DeleteRoute(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: HeadscaleService_DeleteRoute_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HeadscaleServiceServer).DeleteRoute(ctx, req.(*DeleteRouteRequest))
}
return interceptor(ctx, in, info, handler)
}
func _HeadscaleService_CreateApiKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateApiKeyRequest)
if err := dec(in); err != nil {
@@ -943,10 +1042,6 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
MethodName: "SetTags",
Handler: _HeadscaleService_SetTags_Handler,
},
{
MethodName: "SetApprovedRoutes",
Handler: _HeadscaleService_SetApprovedRoutes_Handler,
},
{
MethodName: "RegisterNode",
Handler: _HeadscaleService_RegisterNode_Handler,
@@ -975,6 +1070,26 @@ var HeadscaleService_ServiceDesc = grpc.ServiceDesc{
MethodName: "BackfillNodeIPs",
Handler: _HeadscaleService_BackfillNodeIPs_Handler,
},
{
MethodName: "GetRoutes",
Handler: _HeadscaleService_GetRoutes_Handler,
},
{
MethodName: "EnableRoute",
Handler: _HeadscaleService_EnableRoute_Handler,
},
{
MethodName: "DisableRoute",
Handler: _HeadscaleService_DisableRoute_Handler,
},
{
MethodName: "GetNodeRoutes",
Handler: _HeadscaleService_GetNodeRoutes_Handler,
},
{
MethodName: "DeleteRoute",
Handler: _HeadscaleService_DeleteRoute_Handler,
},
{
MethodName: "CreateApiKey",
Handler: _HeadscaleService_CreateApiKey_Handler,

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.6
// protoc-gen-go v1.35.2
// protoc (unknown)
// source: headscale/v1/policy.proto
@@ -12,7 +12,6 @@ import (
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
@@ -23,10 +22,11 @@ const (
)
type SetPolicyRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
}
func (x *SetPolicyRequest) Reset() {
@@ -67,11 +67,12 @@ func (x *SetPolicyRequest) GetPolicy() string {
}
type SetPolicyResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
}
func (x *SetPolicyResponse) Reset() {
@@ -119,9 +120,9 @@ func (x *SetPolicyResponse) GetUpdatedAt() *timestamppb.Timestamp {
}
type GetPolicyRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *GetPolicyRequest) Reset() {
@@ -155,11 +156,12 @@ func (*GetPolicyRequest) Descriptor() ([]byte, []int) {
}
type GetPolicyResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Policy string `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
}
func (x *GetPolicyResponse) Reset() {
@@ -208,29 +210,42 @@ func (x *GetPolicyResponse) GetUpdatedAt() *timestamppb.Timestamp {
var File_headscale_v1_policy_proto protoreflect.FileDescriptor
const file_headscale_v1_policy_proto_rawDesc = "" +
"\n" +
"\x19headscale/v1/policy.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"*\n" +
"\x10SetPolicyRequest\x12\x16\n" +
"\x06policy\x18\x01 \x01(\tR\x06policy\"f\n" +
"\x11SetPolicyResponse\x12\x16\n" +
"\x06policy\x18\x01 \x01(\tR\x06policy\x129\n" +
"\n" +
"updated_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\tupdatedAt\"\x12\n" +
"\x10GetPolicyRequest\"f\n" +
"\x11GetPolicyResponse\x12\x16\n" +
"\x06policy\x18\x01 \x01(\tR\x06policy\x129\n" +
"\n" +
"updated_at\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\tupdatedAtB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
var file_headscale_v1_policy_proto_rawDesc = []byte{
0x0a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x70,
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x65, 0x61,
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2a, 0x0a, 0x10, 0x53, 0x65,
0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16,
0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x66, 0x0a, 0x11, 0x53, 0x65, 0x74, 0x50, 0x6f, 0x6c,
0x69, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70,
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6f, 0x6c,
0x69, 0x63, 0x79, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61,
0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74,
0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x12,
0x0a, 0x10, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x22, 0x66, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63,
0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69,
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e,
0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f,
0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_headscale_v1_policy_proto_rawDescOnce sync.Once
file_headscale_v1_policy_proto_rawDescData []byte
file_headscale_v1_policy_proto_rawDescData = file_headscale_v1_policy_proto_rawDesc
)
func file_headscale_v1_policy_proto_rawDescGZIP() []byte {
file_headscale_v1_policy_proto_rawDescOnce.Do(func() {
file_headscale_v1_policy_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_policy_proto_rawDesc), len(file_headscale_v1_policy_proto_rawDesc)))
file_headscale_v1_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_policy_proto_rawDescData)
})
return file_headscale_v1_policy_proto_rawDescData
}
@@ -262,7 +277,7 @@ func file_headscale_v1_policy_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_policy_proto_rawDesc), len(file_headscale_v1_policy_proto_rawDesc)),
RawDescriptor: file_headscale_v1_policy_proto_rawDesc,
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
@@ -273,6 +288,7 @@ func file_headscale_v1_policy_proto_init() {
MessageInfos: file_headscale_v1_policy_proto_msgTypes,
}.Build()
File_headscale_v1_policy_proto = out.File
file_headscale_v1_policy_proto_rawDesc = nil
file_headscale_v1_policy_proto_goTypes = nil
file_headscale_v1_policy_proto_depIdxs = nil
}

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.6
// protoc-gen-go v1.35.2
// protoc (unknown)
// source: headscale/v1/preauthkey.proto
@@ -12,7 +12,6 @@ import (
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
@@ -23,18 +22,19 @@ const (
)
type PreAuthKey struct {
state protoimpl.MessageState `protogen:"open.v1"`
User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
Reusable bool `protobuf:"varint,4,opt,name=reusable,proto3" json:"reusable,omitempty"`
Ephemeral bool `protobuf:"varint,5,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
Used bool `protobuf:"varint,6,opt,name=used,proto3" json:"used,omitempty"`
Expiration *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=expiration,proto3" json:"expiration,omitempty"`
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
AclTags []string `protobuf:"bytes,9,rep,name=acl_tags,json=aclTags,proto3" json:"acl_tags,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
Reusable bool `protobuf:"varint,4,opt,name=reusable,proto3" json:"reusable,omitempty"`
Ephemeral bool `protobuf:"varint,5,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
Used bool `protobuf:"varint,6,opt,name=used,proto3" json:"used,omitempty"`
Expiration *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=expiration,proto3" json:"expiration,omitempty"`
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
AclTags []string `protobuf:"bytes,9,rep,name=acl_tags,json=aclTags,proto3" json:"acl_tags,omitempty"`
}
func (x *PreAuthKey) Reset() {
@@ -67,18 +67,18 @@ func (*PreAuthKey) Descriptor() ([]byte, []int) {
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{0}
}
func (x *PreAuthKey) GetUser() *User {
func (x *PreAuthKey) GetUser() string {
if x != nil {
return x.User
}
return nil
return ""
}
func (x *PreAuthKey) GetId() uint64 {
func (x *PreAuthKey) GetId() string {
if x != nil {
return x.Id
}
return 0
return ""
}
func (x *PreAuthKey) GetKey() string {
@@ -131,14 +131,15 @@ func (x *PreAuthKey) GetAclTags() []string {
}
type CreatePreAuthKeyRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
User uint64 `protobuf:"varint,1,opt,name=user,proto3" json:"user,omitempty"`
Reusable bool `protobuf:"varint,2,opt,name=reusable,proto3" json:"reusable,omitempty"`
Ephemeral bool `protobuf:"varint,3,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
Expiration *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expiration,proto3" json:"expiration,omitempty"`
AclTags []string `protobuf:"bytes,5,rep,name=acl_tags,json=aclTags,proto3" json:"acl_tags,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
Reusable bool `protobuf:"varint,2,opt,name=reusable,proto3" json:"reusable,omitempty"`
Ephemeral bool `protobuf:"varint,3,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
Expiration *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expiration,proto3" json:"expiration,omitempty"`
AclTags []string `protobuf:"bytes,5,rep,name=acl_tags,json=aclTags,proto3" json:"acl_tags,omitempty"`
}
func (x *CreatePreAuthKeyRequest) Reset() {
@@ -171,11 +172,11 @@ func (*CreatePreAuthKeyRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{1}
}
func (x *CreatePreAuthKeyRequest) GetUser() uint64 {
func (x *CreatePreAuthKeyRequest) GetUser() string {
if x != nil {
return x.User
}
return 0
return ""
}
func (x *CreatePreAuthKeyRequest) GetReusable() bool {
@@ -207,10 +208,11 @@ func (x *CreatePreAuthKeyRequest) GetAclTags() []string {
}
type CreatePreAuthKeyResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
PreAuthKey *PreAuthKey `protobuf:"bytes,1,opt,name=pre_auth_key,json=preAuthKey,proto3" json:"pre_auth_key,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
PreAuthKey *PreAuthKey `protobuf:"bytes,1,opt,name=pre_auth_key,json=preAuthKey,proto3" json:"pre_auth_key,omitempty"`
}
func (x *CreatePreAuthKeyResponse) Reset() {
@@ -251,11 +253,12 @@ func (x *CreatePreAuthKeyResponse) GetPreAuthKey() *PreAuthKey {
}
type ExpirePreAuthKeyRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
User uint64 `protobuf:"varint,1,opt,name=user,proto3" json:"user,omitempty"`
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
}
func (x *ExpirePreAuthKeyRequest) Reset() {
@@ -288,11 +291,11 @@ func (*ExpirePreAuthKeyRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{3}
}
func (x *ExpirePreAuthKeyRequest) GetUser() uint64 {
func (x *ExpirePreAuthKeyRequest) GetUser() string {
if x != nil {
return x.User
}
return 0
return ""
}
func (x *ExpirePreAuthKeyRequest) GetKey() string {
@@ -303,9 +306,9 @@ func (x *ExpirePreAuthKeyRequest) GetKey() string {
}
type ExpirePreAuthKeyResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *ExpirePreAuthKeyResponse) Reset() {
@@ -339,10 +342,11 @@ func (*ExpirePreAuthKeyResponse) Descriptor() ([]byte, []int) {
}
type ListPreAuthKeysRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
User uint64 `protobuf:"varint,1,opt,name=user,proto3" json:"user,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
}
func (x *ListPreAuthKeysRequest) Reset() {
@@ -375,18 +379,19 @@ func (*ListPreAuthKeysRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_preauthkey_proto_rawDescGZIP(), []int{5}
}
func (x *ListPreAuthKeysRequest) GetUser() uint64 {
func (x *ListPreAuthKeysRequest) GetUser() string {
if x != nil {
return x.User
}
return 0
return ""
}
type ListPreAuthKeysResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
PreAuthKeys []*PreAuthKey `protobuf:"bytes,1,rep,name=pre_auth_keys,json=preAuthKeys,proto3" json:"pre_auth_keys,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
PreAuthKeys []*PreAuthKey `protobuf:"bytes,1,rep,name=pre_auth_keys,json=preAuthKeys,proto3" json:"pre_auth_keys,omitempty"`
}
func (x *ListPreAuthKeysResponse) Reset() {
@@ -428,51 +433,76 @@ func (x *ListPreAuthKeysResponse) GetPreAuthKeys() []*PreAuthKey {
var File_headscale_v1_preauthkey_proto protoreflect.FileDescriptor
const file_headscale_v1_preauthkey_proto_rawDesc = "" +
"\n" +
"\x1dheadscale/v1/preauthkey.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17headscale/v1/user.proto\"\xb6\x02\n" +
"\n" +
"PreAuthKey\x12&\n" +
"\x04user\x18\x01 \x01(\v2\x12.headscale.v1.UserR\x04user\x12\x0e\n" +
"\x02id\x18\x02 \x01(\x04R\x02id\x12\x10\n" +
"\x03key\x18\x03 \x01(\tR\x03key\x12\x1a\n" +
"\breusable\x18\x04 \x01(\bR\breusable\x12\x1c\n" +
"\tephemeral\x18\x05 \x01(\bR\tephemeral\x12\x12\n" +
"\x04used\x18\x06 \x01(\bR\x04used\x12:\n" +
"\n" +
"expiration\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\n" +
"expiration\x129\n" +
"\n" +
"created_at\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x12\x19\n" +
"\bacl_tags\x18\t \x03(\tR\aaclTags\"\xbe\x01\n" +
"\x17CreatePreAuthKeyRequest\x12\x12\n" +
"\x04user\x18\x01 \x01(\x04R\x04user\x12\x1a\n" +
"\breusable\x18\x02 \x01(\bR\breusable\x12\x1c\n" +
"\tephemeral\x18\x03 \x01(\bR\tephemeral\x12:\n" +
"\n" +
"expiration\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\n" +
"expiration\x12\x19\n" +
"\bacl_tags\x18\x05 \x03(\tR\aaclTags\"V\n" +
"\x18CreatePreAuthKeyResponse\x12:\n" +
"\fpre_auth_key\x18\x01 \x01(\v2\x18.headscale.v1.PreAuthKeyR\n" +
"preAuthKey\"?\n" +
"\x17ExpirePreAuthKeyRequest\x12\x12\n" +
"\x04user\x18\x01 \x01(\x04R\x04user\x12\x10\n" +
"\x03key\x18\x02 \x01(\tR\x03key\"\x1a\n" +
"\x18ExpirePreAuthKeyResponse\",\n" +
"\x16ListPreAuthKeysRequest\x12\x12\n" +
"\x04user\x18\x01 \x01(\x04R\x04user\"W\n" +
"\x17ListPreAuthKeysResponse\x12<\n" +
"\rpre_auth_keys\x18\x01 \x03(\v2\x18.headscale.v1.PreAuthKeyR\vpreAuthKeysB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
var file_headscale_v1_preauthkey_proto_rawDesc = []byte{
0x0a, 0x1d, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x70,
0x72, 0x65, 0x61, 0x75, 0x74, 0x68, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
0x0c, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2,
0x02, 0x0a, 0x0a, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x12, 0x12, 0x0a,
0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65,
0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69,
0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
0x6b, 0x65, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x18,
0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12,
0x1c, 0x0a, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01,
0x28, 0x08, 0x52, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x12, 0x0a,
0x04, 0x75, 0x73, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x75, 0x73, 0x65,
0x64, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a,
0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63,
0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x6c, 0x5f,
0x74, 0x61, 0x67, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x6c, 0x54,
0x61, 0x67, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72,
0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75,
0x73, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x75, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12,
0x1c, 0x0a, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01,
0x28, 0x08, 0x52, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x12, 0x3a, 0x0a,
0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65,
0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x6c,
0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x6c,
0x54, 0x61, 0x67, 0x73, 0x22, 0x56, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72,
0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x72, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6b, 0x65, 0x79,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79,
0x52, 0x0a, 0x70, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x22, 0x3f, 0x0a, 0x17,
0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b,
0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x1a, 0x0a,
0x18, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65,
0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x0a, 0x16, 0x4c, 0x69, 0x73,
0x74, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x22, 0x57, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x50,
0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6b,
0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x68, 0x65, 0x61, 0x64,
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68,
0x4b, 0x65, 0x79, 0x52, 0x0b, 0x70, 0x72, 0x65, 0x41, 0x75, 0x74, 0x68, 0x4b, 0x65, 0x79, 0x73,
0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a,
0x75, 0x61, 0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c,
0x65, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
}
var (
file_headscale_v1_preauthkey_proto_rawDescOnce sync.Once
file_headscale_v1_preauthkey_proto_rawDescData []byte
file_headscale_v1_preauthkey_proto_rawDescData = file_headscale_v1_preauthkey_proto_rawDesc
)
func file_headscale_v1_preauthkey_proto_rawDescGZIP() []byte {
file_headscale_v1_preauthkey_proto_rawDescOnce.Do(func() {
file_headscale_v1_preauthkey_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_preauthkey_proto_rawDesc), len(file_headscale_v1_preauthkey_proto_rawDesc)))
file_headscale_v1_preauthkey_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_preauthkey_proto_rawDescData)
})
return file_headscale_v1_preauthkey_proto_rawDescData
}
@@ -486,21 +516,19 @@ var file_headscale_v1_preauthkey_proto_goTypes = []any{
(*ExpirePreAuthKeyResponse)(nil), // 4: headscale.v1.ExpirePreAuthKeyResponse
(*ListPreAuthKeysRequest)(nil), // 5: headscale.v1.ListPreAuthKeysRequest
(*ListPreAuthKeysResponse)(nil), // 6: headscale.v1.ListPreAuthKeysResponse
(*User)(nil), // 7: headscale.v1.User
(*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp
(*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp
}
var file_headscale_v1_preauthkey_proto_depIdxs = []int32{
7, // 0: headscale.v1.PreAuthKey.user:type_name -> headscale.v1.User
8, // 1: headscale.v1.PreAuthKey.expiration:type_name -> google.protobuf.Timestamp
8, // 2: headscale.v1.PreAuthKey.created_at:type_name -> google.protobuf.Timestamp
8, // 3: headscale.v1.CreatePreAuthKeyRequest.expiration:type_name -> google.protobuf.Timestamp
0, // 4: headscale.v1.CreatePreAuthKeyResponse.pre_auth_key:type_name -> headscale.v1.PreAuthKey
0, // 5: headscale.v1.ListPreAuthKeysResponse.pre_auth_keys:type_name -> headscale.v1.PreAuthKey
6, // [6:6] is the sub-list for method output_type
6, // [6:6] is the sub-list for method input_type
6, // [6:6] is the sub-list for extension type_name
6, // [6:6] is the sub-list for extension extendee
0, // [0:6] is the sub-list for field type_name
7, // 0: headscale.v1.PreAuthKey.expiration:type_name -> google.protobuf.Timestamp
7, // 1: headscale.v1.PreAuthKey.created_at:type_name -> google.protobuf.Timestamp
7, // 2: headscale.v1.CreatePreAuthKeyRequest.expiration:type_name -> google.protobuf.Timestamp
0, // 3: headscale.v1.CreatePreAuthKeyResponse.pre_auth_key:type_name -> headscale.v1.PreAuthKey
0, // 4: headscale.v1.ListPreAuthKeysResponse.pre_auth_keys:type_name -> headscale.v1.PreAuthKey
5, // [5:5] is the sub-list for method output_type
5, // [5:5] is the sub-list for method input_type
5, // [5:5] is the sub-list for extension type_name
5, // [5:5] is the sub-list for extension extendee
0, // [0:5] is the sub-list for field type_name
}
func init() { file_headscale_v1_preauthkey_proto_init() }
@@ -508,12 +536,11 @@ func file_headscale_v1_preauthkey_proto_init() {
if File_headscale_v1_preauthkey_proto != nil {
return
}
file_headscale_v1_user_proto_init()
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_preauthkey_proto_rawDesc), len(file_headscale_v1_preauthkey_proto_rawDesc)),
RawDescriptor: file_headscale_v1_preauthkey_proto_rawDesc,
NumEnums: 0,
NumMessages: 7,
NumExtensions: 0,
@@ -524,6 +551,7 @@ func file_headscale_v1_preauthkey_proto_init() {
MessageInfos: file_headscale_v1_preauthkey_proto_msgTypes,
}.Build()
File_headscale_v1_preauthkey_proto = out.File
file_headscale_v1_preauthkey_proto_rawDesc = nil
file_headscale_v1_preauthkey_proto_goTypes = nil
file_headscale_v1_preauthkey_proto_depIdxs = nil
}

View File

@@ -0,0 +1,677 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.35.2
// protoc (unknown)
// source: headscale/v1/routes.proto
package v1
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Route struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Node *Node `protobuf:"bytes,2,opt,name=node,proto3" json:"node,omitempty"`
Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"`
Advertised bool `protobuf:"varint,4,opt,name=advertised,proto3" json:"advertised,omitempty"`
Enabled bool `protobuf:"varint,5,opt,name=enabled,proto3" json:"enabled,omitempty"`
IsPrimary bool `protobuf:"varint,6,opt,name=is_primary,json=isPrimary,proto3" json:"is_primary,omitempty"`
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"`
DeletedAt *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=deleted_at,json=deletedAt,proto3" json:"deleted_at,omitempty"`
}
func (x *Route) Reset() {
*x = Route{}
mi := &file_headscale_v1_routes_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Route) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Route) ProtoMessage() {}
func (x *Route) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_routes_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Route.ProtoReflect.Descriptor instead.
func (*Route) Descriptor() ([]byte, []int) {
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{0}
}
func (x *Route) GetId() uint64 {
if x != nil {
return x.Id
}
return 0
}
func (x *Route) GetNode() *Node {
if x != nil {
return x.Node
}
return nil
}
func (x *Route) GetPrefix() string {
if x != nil {
return x.Prefix
}
return ""
}
func (x *Route) GetAdvertised() bool {
if x != nil {
return x.Advertised
}
return false
}
func (x *Route) GetEnabled() bool {
if x != nil {
return x.Enabled
}
return false
}
func (x *Route) GetIsPrimary() bool {
if x != nil {
return x.IsPrimary
}
return false
}
func (x *Route) GetCreatedAt() *timestamppb.Timestamp {
if x != nil {
return x.CreatedAt
}
return nil
}
func (x *Route) GetUpdatedAt() *timestamppb.Timestamp {
if x != nil {
return x.UpdatedAt
}
return nil
}
func (x *Route) GetDeletedAt() *timestamppb.Timestamp {
if x != nil {
return x.DeletedAt
}
return nil
}
type GetRoutesRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *GetRoutesRequest) Reset() {
*x = GetRoutesRequest{}
mi := &file_headscale_v1_routes_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetRoutesRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetRoutesRequest) ProtoMessage() {}
func (x *GetRoutesRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_routes_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetRoutesRequest.ProtoReflect.Descriptor instead.
func (*GetRoutesRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{1}
}
type GetRoutesResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Routes []*Route `protobuf:"bytes,1,rep,name=routes,proto3" json:"routes,omitempty"`
}
func (x *GetRoutesResponse) Reset() {
*x = GetRoutesResponse{}
mi := &file_headscale_v1_routes_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetRoutesResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetRoutesResponse) ProtoMessage() {}
func (x *GetRoutesResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_routes_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetRoutesResponse.ProtoReflect.Descriptor instead.
func (*GetRoutesResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{2}
}
func (x *GetRoutesResponse) GetRoutes() []*Route {
if x != nil {
return x.Routes
}
return nil
}
type EnableRouteRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
RouteId uint64 `protobuf:"varint,1,opt,name=route_id,json=routeId,proto3" json:"route_id,omitempty"`
}
func (x *EnableRouteRequest) Reset() {
*x = EnableRouteRequest{}
mi := &file_headscale_v1_routes_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EnableRouteRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EnableRouteRequest) ProtoMessage() {}
func (x *EnableRouteRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_routes_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EnableRouteRequest.ProtoReflect.Descriptor instead.
func (*EnableRouteRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{3}
}
func (x *EnableRouteRequest) GetRouteId() uint64 {
if x != nil {
return x.RouteId
}
return 0
}
type EnableRouteResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *EnableRouteResponse) Reset() {
*x = EnableRouteResponse{}
mi := &file_headscale_v1_routes_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *EnableRouteResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EnableRouteResponse) ProtoMessage() {}
func (x *EnableRouteResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_routes_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EnableRouteResponse.ProtoReflect.Descriptor instead.
func (*EnableRouteResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{4}
}
type DisableRouteRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
RouteId uint64 `protobuf:"varint,1,opt,name=route_id,json=routeId,proto3" json:"route_id,omitempty"`
}
func (x *DisableRouteRequest) Reset() {
*x = DisableRouteRequest{}
mi := &file_headscale_v1_routes_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DisableRouteRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DisableRouteRequest) ProtoMessage() {}
func (x *DisableRouteRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_routes_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DisableRouteRequest.ProtoReflect.Descriptor instead.
func (*DisableRouteRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{5}
}
func (x *DisableRouteRequest) GetRouteId() uint64 {
if x != nil {
return x.RouteId
}
return 0
}
type DisableRouteResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *DisableRouteResponse) Reset() {
*x = DisableRouteResponse{}
mi := &file_headscale_v1_routes_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DisableRouteResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DisableRouteResponse) ProtoMessage() {}
func (x *DisableRouteResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_routes_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DisableRouteResponse.ProtoReflect.Descriptor instead.
func (*DisableRouteResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{6}
}
type GetNodeRoutesRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
NodeId uint64 `protobuf:"varint,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
}
func (x *GetNodeRoutesRequest) Reset() {
*x = GetNodeRoutesRequest{}
mi := &file_headscale_v1_routes_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetNodeRoutesRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetNodeRoutesRequest) ProtoMessage() {}
func (x *GetNodeRoutesRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_routes_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetNodeRoutesRequest.ProtoReflect.Descriptor instead.
func (*GetNodeRoutesRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{7}
}
func (x *GetNodeRoutesRequest) GetNodeId() uint64 {
if x != nil {
return x.NodeId
}
return 0
}
type GetNodeRoutesResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Routes []*Route `protobuf:"bytes,1,rep,name=routes,proto3" json:"routes,omitempty"`
}
func (x *GetNodeRoutesResponse) Reset() {
*x = GetNodeRoutesResponse{}
mi := &file_headscale_v1_routes_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetNodeRoutesResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetNodeRoutesResponse) ProtoMessage() {}
func (x *GetNodeRoutesResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_routes_proto_msgTypes[8]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetNodeRoutesResponse.ProtoReflect.Descriptor instead.
func (*GetNodeRoutesResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{8}
}
func (x *GetNodeRoutesResponse) GetRoutes() []*Route {
if x != nil {
return x.Routes
}
return nil
}
type DeleteRouteRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
RouteId uint64 `protobuf:"varint,1,opt,name=route_id,json=routeId,proto3" json:"route_id,omitempty"`
}
func (x *DeleteRouteRequest) Reset() {
*x = DeleteRouteRequest{}
mi := &file_headscale_v1_routes_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DeleteRouteRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeleteRouteRequest) ProtoMessage() {}
func (x *DeleteRouteRequest) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_routes_proto_msgTypes[9]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeleteRouteRequest.ProtoReflect.Descriptor instead.
func (*DeleteRouteRequest) Descriptor() ([]byte, []int) {
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{9}
}
func (x *DeleteRouteRequest) GetRouteId() uint64 {
if x != nil {
return x.RouteId
}
return 0
}
type DeleteRouteResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *DeleteRouteResponse) Reset() {
*x = DeleteRouteResponse{}
mi := &file_headscale_v1_routes_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DeleteRouteResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeleteRouteResponse) ProtoMessage() {}
func (x *DeleteRouteResponse) ProtoReflect() protoreflect.Message {
mi := &file_headscale_v1_routes_proto_msgTypes[10]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeleteRouteResponse.ProtoReflect.Descriptor instead.
func (*DeleteRouteResponse) Descriptor() ([]byte, []int) {
return file_headscale_v1_routes_proto_rawDescGZIP(), []int{10}
}
var File_headscale_v1_routes_proto protoreflect.FileDescriptor
var file_headscale_v1_routes_proto_rawDesc = []byte{
0x0a, 0x19, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72,
0x6f, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x65, 0x61,
0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x68, 0x65, 0x61, 0x64,
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x22, 0xe1, 0x02, 0x0a, 0x05, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x0e, 0x0a,
0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x26, 0x0a,
0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x68, 0x65,
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52,
0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1e, 0x0a,
0x0a, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28,
0x08, 0x52, 0x0a, 0x61, 0x64, 0x76, 0x65, 0x72, 0x74, 0x69, 0x73, 0x65, 0x64, 0x12, 0x18, 0x0a,
0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07,
0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x70, 0x72,
0x69, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x50,
0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65,
0x64, 0x5f, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41,
0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18,
0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a,
0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x64, 0x65,
0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x12, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x52, 0x6f,
0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x40, 0x0a, 0x11, 0x47,
0x65, 0x74, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x12, 0x2b, 0x0a, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x13, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e,
0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x06, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x2f, 0x0a,
0x12, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x18,
0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x64, 0x22, 0x15,
0x0a, 0x13, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x0a, 0x13, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65,
0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08,
0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07,
0x72, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x64, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x69, 0x73, 0x61, 0x62,
0x6c, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x2f, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f,
0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64,
0x22, 0x44, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x6f, 0x75, 0x74, 0x65,
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2b, 0x0a, 0x06, 0x72, 0x6f, 0x75,
0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x68, 0x65, 0x61, 0x64,
0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x06,
0x72, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x22, 0x2f, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08,
0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07,
0x72, 0x6f, 0x75, 0x74, 0x65, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74,
0x65, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x29,
0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61,
0x6e, 0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f,
0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
var (
file_headscale_v1_routes_proto_rawDescOnce sync.Once
file_headscale_v1_routes_proto_rawDescData = file_headscale_v1_routes_proto_rawDesc
)
func file_headscale_v1_routes_proto_rawDescGZIP() []byte {
file_headscale_v1_routes_proto_rawDescOnce.Do(func() {
file_headscale_v1_routes_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_routes_proto_rawDescData)
})
return file_headscale_v1_routes_proto_rawDescData
}
var file_headscale_v1_routes_proto_msgTypes = make([]protoimpl.MessageInfo, 11)
var file_headscale_v1_routes_proto_goTypes = []any{
(*Route)(nil), // 0: headscale.v1.Route
(*GetRoutesRequest)(nil), // 1: headscale.v1.GetRoutesRequest
(*GetRoutesResponse)(nil), // 2: headscale.v1.GetRoutesResponse
(*EnableRouteRequest)(nil), // 3: headscale.v1.EnableRouteRequest
(*EnableRouteResponse)(nil), // 4: headscale.v1.EnableRouteResponse
(*DisableRouteRequest)(nil), // 5: headscale.v1.DisableRouteRequest
(*DisableRouteResponse)(nil), // 6: headscale.v1.DisableRouteResponse
(*GetNodeRoutesRequest)(nil), // 7: headscale.v1.GetNodeRoutesRequest
(*GetNodeRoutesResponse)(nil), // 8: headscale.v1.GetNodeRoutesResponse
(*DeleteRouteRequest)(nil), // 9: headscale.v1.DeleteRouteRequest
(*DeleteRouteResponse)(nil), // 10: headscale.v1.DeleteRouteResponse
(*Node)(nil), // 11: headscale.v1.Node
(*timestamppb.Timestamp)(nil), // 12: google.protobuf.Timestamp
}
var file_headscale_v1_routes_proto_depIdxs = []int32{
11, // 0: headscale.v1.Route.node:type_name -> headscale.v1.Node
12, // 1: headscale.v1.Route.created_at:type_name -> google.protobuf.Timestamp
12, // 2: headscale.v1.Route.updated_at:type_name -> google.protobuf.Timestamp
12, // 3: headscale.v1.Route.deleted_at:type_name -> google.protobuf.Timestamp
0, // 4: headscale.v1.GetRoutesResponse.routes:type_name -> headscale.v1.Route
0, // 5: headscale.v1.GetNodeRoutesResponse.routes:type_name -> headscale.v1.Route
6, // [6:6] is the sub-list for method output_type
6, // [6:6] is the sub-list for method input_type
6, // [6:6] is the sub-list for extension type_name
6, // [6:6] is the sub-list for extension extendee
0, // [0:6] is the sub-list for field type_name
}
func init() { file_headscale_v1_routes_proto_init() }
func file_headscale_v1_routes_proto_init() {
if File_headscale_v1_routes_proto != nil {
return
}
file_headscale_v1_node_proto_init()
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_headscale_v1_routes_proto_rawDesc,
NumEnums: 0,
NumMessages: 11,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_headscale_v1_routes_proto_goTypes,
DependencyIndexes: file_headscale_v1_routes_proto_depIdxs,
MessageInfos: file_headscale_v1_routes_proto_msgTypes,
}.Build()
File_headscale_v1_routes_proto = out.File
file_headscale_v1_routes_proto_rawDesc = nil
file_headscale_v1_routes_proto_goTypes = nil
file_headscale_v1_routes_proto_depIdxs = nil
}

View File

@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.6
// protoc-gen-go v1.35.2
// protoc (unknown)
// source: headscale/v1/user.proto
@@ -12,7 +12,6 @@ import (
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
@@ -23,7 +22,10 @@ const (
)
type User struct {
state protoimpl.MessageState `protogen:"open.v1"`
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
CreatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
@@ -32,8 +34,6 @@ type User struct {
ProviderId string `protobuf:"bytes,6,opt,name=provider_id,json=providerId,proto3" json:"provider_id,omitempty"`
Provider string `protobuf:"bytes,7,opt,name=provider,proto3" json:"provider,omitempty"`
ProfilePicUrl string `protobuf:"bytes,8,opt,name=profile_pic_url,json=profilePicUrl,proto3" json:"profile_pic_url,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *User) Reset() {
@@ -123,13 +123,14 @@ func (x *User) GetProfilePicUrl() string {
}
type CreateUserRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
PictureUrl string `protobuf:"bytes,4,opt,name=picture_url,json=pictureUrl,proto3" json:"picture_url,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
PictureUrl string `protobuf:"bytes,4,opt,name=picture_url,json=pictureUrl,proto3" json:"picture_url,omitempty"`
}
func (x *CreateUserRequest) Reset() {
@@ -191,10 +192,11 @@ func (x *CreateUserRequest) GetPictureUrl() string {
}
type CreateUserResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
}
func (x *CreateUserResponse) Reset() {
@@ -235,11 +237,12 @@ func (x *CreateUserResponse) GetUser() *User {
}
type RenameUserRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
OldId uint64 `protobuf:"varint,1,opt,name=old_id,json=oldId,proto3" json:"old_id,omitempty"`
NewName string `protobuf:"bytes,2,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
OldId uint64 `protobuf:"varint,1,opt,name=old_id,json=oldId,proto3" json:"old_id,omitempty"`
NewName string `protobuf:"bytes,2,opt,name=new_name,json=newName,proto3" json:"new_name,omitempty"`
}
func (x *RenameUserRequest) Reset() {
@@ -287,10 +290,11 @@ func (x *RenameUserRequest) GetNewName() string {
}
type RenameUserResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
User *User `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
}
func (x *RenameUserResponse) Reset() {
@@ -331,10 +335,11 @@ func (x *RenameUserResponse) GetUser() *User {
}
type DeleteUserRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
}
func (x *DeleteUserRequest) Reset() {
@@ -375,9 +380,9 @@ func (x *DeleteUserRequest) GetId() uint64 {
}
type DeleteUserResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
}
func (x *DeleteUserResponse) Reset() {
@@ -411,12 +416,13 @@ func (*DeleteUserResponse) Descriptor() ([]byte, []int) {
}
type ListUsersRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
}
func (x *ListUsersRequest) Reset() {
@@ -471,10 +477,11 @@ func (x *ListUsersRequest) GetEmail() string {
}
type ListUsersResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Users []*User `protobuf:"bytes,1,rep,name=users,proto3" json:"users,omitempty"`
unknownFields protoimpl.UnknownFields
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Users []*User `protobuf:"bytes,1,rep,name=users,proto3" json:"users,omitempty"`
}
func (x *ListUsersResponse) Reset() {
@@ -516,51 +523,74 @@ func (x *ListUsersResponse) GetUsers() []*User {
var File_headscale_v1_user_proto protoreflect.FileDescriptor
const file_headscale_v1_user_proto_rawDesc = "" +
"\n" +
"\x17headscale/v1/user.proto\x12\fheadscale.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"\x83\x02\n" +
"\x04User\x12\x0e\n" +
"\x02id\x18\x01 \x01(\x04R\x02id\x12\x12\n" +
"\x04name\x18\x02 \x01(\tR\x04name\x129\n" +
"\n" +
"created_at\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x12!\n" +
"\fdisplay_name\x18\x04 \x01(\tR\vdisplayName\x12\x14\n" +
"\x05email\x18\x05 \x01(\tR\x05email\x12\x1f\n" +
"\vprovider_id\x18\x06 \x01(\tR\n" +
"providerId\x12\x1a\n" +
"\bprovider\x18\a \x01(\tR\bprovider\x12&\n" +
"\x0fprofile_pic_url\x18\b \x01(\tR\rprofilePicUrl\"\x81\x01\n" +
"\x11CreateUserRequest\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12!\n" +
"\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12\x14\n" +
"\x05email\x18\x03 \x01(\tR\x05email\x12\x1f\n" +
"\vpicture_url\x18\x04 \x01(\tR\n" +
"pictureUrl\"<\n" +
"\x12CreateUserResponse\x12&\n" +
"\x04user\x18\x01 \x01(\v2\x12.headscale.v1.UserR\x04user\"E\n" +
"\x11RenameUserRequest\x12\x15\n" +
"\x06old_id\x18\x01 \x01(\x04R\x05oldId\x12\x19\n" +
"\bnew_name\x18\x02 \x01(\tR\anewName\"<\n" +
"\x12RenameUserResponse\x12&\n" +
"\x04user\x18\x01 \x01(\v2\x12.headscale.v1.UserR\x04user\"#\n" +
"\x11DeleteUserRequest\x12\x0e\n" +
"\x02id\x18\x01 \x01(\x04R\x02id\"\x14\n" +
"\x12DeleteUserResponse\"L\n" +
"\x10ListUsersRequest\x12\x0e\n" +
"\x02id\x18\x01 \x01(\x04R\x02id\x12\x12\n" +
"\x04name\x18\x02 \x01(\tR\x04name\x12\x14\n" +
"\x05email\x18\x03 \x01(\tR\x05email\"=\n" +
"\x11ListUsersResponse\x12(\n" +
"\x05users\x18\x01 \x03(\v2\x12.headscale.v1.UserR\x05usersB)Z'github.com/juanfont/headscale/gen/go/v1b\x06proto3"
var file_headscale_v1_user_proto_rawDesc = []byte{
0x0a, 0x17, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x75,
0x73, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x73,
0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x83, 0x02, 0x0a, 0x04, 0x55, 0x73, 0x65,
0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69,
0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74,
0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e,
0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x6f,
0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72,
0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72,
0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c,
0x65, 0x5f, 0x70, 0x69, 0x63, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
0x0d, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x50, 0x69, 0x63, 0x55, 0x72, 0x6c, 0x22, 0x81,
0x01, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70,
0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65,
0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69,
0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x69, 0x63, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x75, 0x72, 0x6c,
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x69, 0x63, 0x74, 0x75, 0x72, 0x65, 0x55,
0x72, 0x6c, 0x22, 0x3c, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61,
0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72,
0x22, 0x45, 0x0a, 0x11, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x6f, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18,
0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6f, 0x6c, 0x64, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08,
0x6e, 0x65, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
0x6e, 0x65, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x3c, 0x0a, 0x12, 0x52, 0x65, 0x6e, 0x61, 0x6d,
0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a,
0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x68, 0x65,
0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x52,
0x04, 0x75, 0x73, 0x65, 0x72, 0x22, 0x23, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55,
0x73, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64,
0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x22, 0x14, 0x0a, 0x12, 0x44, 0x65,
0x6c, 0x65, 0x74, 0x65, 0x55, 0x73, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x4c, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71,
0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04,
0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69,
0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x3d,
0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x73, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x75, 0x73, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x12, 0x2e, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2e, 0x76,
0x31, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x52, 0x05, 0x75, 0x73, 0x65, 0x72, 0x73, 0x42, 0x29, 0x5a,
0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6a, 0x75, 0x61, 0x6e,
0x66, 0x6f, 0x6e, 0x74, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x2f, 0x67,
0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_headscale_v1_user_proto_rawDescOnce sync.Once
file_headscale_v1_user_proto_rawDescData []byte
file_headscale_v1_user_proto_rawDescData = file_headscale_v1_user_proto_rawDesc
)
func file_headscale_v1_user_proto_rawDescGZIP() []byte {
file_headscale_v1_user_proto_rawDescOnce.Do(func() {
file_headscale_v1_user_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_headscale_v1_user_proto_rawDesc), len(file_headscale_v1_user_proto_rawDesc)))
file_headscale_v1_user_proto_rawDescData = protoimpl.X.CompressGZIP(file_headscale_v1_user_proto_rawDescData)
})
return file_headscale_v1_user_proto_rawDescData
}
@@ -599,7 +629,7 @@ func file_headscale_v1_user_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_headscale_v1_user_proto_rawDesc), len(file_headscale_v1_user_proto_rawDesc)),
RawDescriptor: file_headscale_v1_user_proto_rawDesc,
NumEnums: 0,
NumMessages: 9,
NumExtensions: 0,
@@ -610,6 +640,7 @@ func file_headscale_v1_user_proto_init() {
MessageInfos: file_headscale_v1_user_proto_msgTypes,
}.Build()
File_headscale_v1_user_proto = out.File
file_headscale_v1_user_proto_rawDesc = nil
file_headscale_v1_user_proto_goTypes = nil
file_headscale_v1_user_proto_depIdxs = nil
}

View File

@@ -320,45 +320,6 @@
]
}
},
"/api/v1/node/{nodeId}/approve_routes": {
"post": {
"operationId": "HeadscaleService_SetApprovedRoutes",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/v1SetApprovedRoutesResponse"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
{
"name": "nodeId",
"in": "path",
"required": true,
"type": "string",
"format": "uint64"
},
{
"name": "body",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/HeadscaleServiceSetApprovedRoutesBody"
}
}
],
"tags": [
"HeadscaleService"
]
}
},
"/api/v1/node/{nodeId}/expire": {
"post": {
"operationId": "HeadscaleService_ExpireNode",
@@ -427,6 +388,37 @@
]
}
},
"/api/v1/node/{nodeId}/routes": {
"get": {
"operationId": "HeadscaleService_GetNodeRoutes",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/v1GetNodeRoutesResponse"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
{
"name": "nodeId",
"in": "path",
"required": true,
"type": "string",
"format": "uint64"
}
],
"tags": [
"HeadscaleService"
]
}
},
"/api/v1/node/{nodeId}/tags": {
"post": {
"operationId": "HeadscaleService_SetTags",
@@ -580,8 +572,7 @@
"name": "user",
"in": "query",
"required": false,
"type": "string",
"format": "uint64"
"type": "string"
}
],
"tags": [
@@ -652,6 +643,122 @@
]
}
},
"/api/v1/routes": {
"get": {
"summary": "--- Route start ---",
"operationId": "HeadscaleService_GetRoutes",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/v1GetRoutesResponse"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"tags": [
"HeadscaleService"
]
}
},
"/api/v1/routes/{routeId}": {
"delete": {
"operationId": "HeadscaleService_DeleteRoute",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/v1DeleteRouteResponse"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
{
"name": "routeId",
"in": "path",
"required": true,
"type": "string",
"format": "uint64"
}
],
"tags": [
"HeadscaleService"
]
}
},
"/api/v1/routes/{routeId}/disable": {
"post": {
"operationId": "HeadscaleService_DisableRoute",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/v1DisableRouteResponse"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
{
"name": "routeId",
"in": "path",
"required": true,
"type": "string",
"format": "uint64"
}
],
"tags": [
"HeadscaleService"
]
}
},
"/api/v1/routes/{routeId}/enable": {
"post": {
"operationId": "HeadscaleService_EnableRoute",
"responses": {
"200": {
"description": "A successful response.",
"schema": {
"$ref": "#/definitions/v1EnableRouteResponse"
}
},
"default": {
"description": "An unexpected error response.",
"schema": {
"$ref": "#/definitions/rpcStatus"
}
}
},
"parameters": [
{
"name": "routeId",
"in": "path",
"required": true,
"type": "string",
"format": "uint64"
}
],
"tags": [
"HeadscaleService"
]
}
},
"/api/v1/user": {
"get": {
"operationId": "HeadscaleService_ListUsers",
@@ -800,19 +907,7 @@
"type": "object",
"properties": {
"user": {
"type": "string",
"format": "uint64"
}
}
},
"HeadscaleServiceSetApprovedRoutesBody": {
"type": "object",
"properties": {
"routes": {
"type": "array",
"items": {
"type": "string"
}
"type": "string"
}
}
},
@@ -911,8 +1006,7 @@
"type": "object",
"properties": {
"user": {
"type": "string",
"format": "uint64"
"type": "string"
},
"reusable": {
"type": "boolean"
@@ -999,9 +1093,18 @@
"v1DeleteNodeResponse": {
"type": "object"
},
"v1DeleteRouteResponse": {
"type": "object"
},
"v1DeleteUserResponse": {
"type": "object"
},
"v1DisableRouteResponse": {
"type": "object"
},
"v1EnableRouteResponse": {
"type": "object"
},
"v1ExpireApiKeyRequest": {
"type": "object",
"properties": {
@@ -1025,8 +1128,7 @@
"type": "object",
"properties": {
"user": {
"type": "string",
"format": "uint64"
"type": "string"
},
"key": {
"type": "string"
@@ -1044,6 +1146,18 @@
}
}
},
"v1GetNodeRoutesResponse": {
"type": "object",
"properties": {
"routes": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/v1Route"
}
}
}
},
"v1GetPolicyResponse": {
"type": "object",
"properties": {
@@ -1056,6 +1170,18 @@
}
}
},
"v1GetRoutesResponse": {
"type": "object",
"properties": {
"routes": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/v1Route"
}
}
}
},
"v1ListApiKeysResponse": {
"type": "object",
"properties": {
@@ -1181,24 +1307,6 @@
},
"online": {
"type": "boolean"
},
"approvedRoutes": {
"type": "array",
"items": {
"type": "string"
}
},
"availableRoutes": {
"type": "array",
"items": {
"type": "string"
}
},
"subnetRoutes": {
"type": "array",
"items": {
"type": "string"
}
}
}
},
@@ -1206,11 +1314,10 @@
"type": "object",
"properties": {
"user": {
"$ref": "#/definitions/v1User"
"type": "string"
},
"id": {
"type": "string",
"format": "uint64"
"type": "string"
},
"key": {
"type": "string"
@@ -1274,11 +1381,39 @@
}
}
},
"v1SetApprovedRoutesResponse": {
"v1Route": {
"type": "object",
"properties": {
"id": {
"type": "string",
"format": "uint64"
},
"node": {
"$ref": "#/definitions/v1Node"
},
"prefix": {
"type": "string"
},
"advertised": {
"type": "boolean"
},
"enabled": {
"type": "boolean"
},
"isPrimary": {
"type": "boolean"
},
"createdAt": {
"type": "string",
"format": "date-time"
},
"updatedAt": {
"type": "string",
"format": "date-time"
},
"deletedAt": {
"type": "string",
"format": "date-time"
}
}
},

View File

@@ -0,0 +1,44 @@
{
"swagger": "2.0",
"info": {
"title": "headscale/v1/routes.proto",
"version": "version not set"
},
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"paths": {},
"definitions": {
"protobufAny": {
"type": "object",
"properties": {
"@type": {
"type": "string"
}
},
"additionalProperties": {}
},
"rpcStatus": {
"type": "object",
"properties": {
"code": {
"type": "integer",
"format": "int32"
},
"message": {
"type": "string"
},
"details": {
"type": "array",
"items": {
"type": "object",
"$ref": "#/definitions/protobufAny"
}
}
}
}
}
}

208
go.mod
View File

@@ -1,61 +1,56 @@
module github.com/juanfont/headscale
go 1.24.0
toolchain go1.24.2
go 1.23.1
require (
github.com/AlecAivazis/survey/v2 v2.3.7
github.com/arl/statsviz v0.6.0
github.com/cenkalti/backoff/v5 v5.0.2
github.com/cenkalti/backoff/v4 v4.3.0
github.com/chasefleming/elem-go v0.30.0
github.com/coder/websocket v1.8.13
github.com/coreos/go-oidc/v3 v3.14.1
github.com/creachadair/command v0.1.22
github.com/creachadair/flax v0.0.5
github.com/coder/websocket v1.8.12
github.com/coreos/go-oidc/v3 v3.11.0
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/docker/docker v28.2.2+incompatible
github.com/fsnotify/fsnotify v1.9.0
github.com/fsnotify/fsnotify v1.8.0
github.com/glebarez/sqlite v1.11.0
github.com/go-gormigrate/gormigrate/v2 v2.1.4
github.com/gofrs/uuid/v5 v5.3.2
github.com/google/go-cmp v0.7.0
github.com/go-gormigrate/gormigrate/v2 v2.1.3
github.com/gofrs/uuid/v5 v5.3.0
github.com/google/go-cmp v0.6.0
github.com/gorilla/mux v1.8.1
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0
github.com/jagottsicher/termcolor v1.0.2
github.com/klauspost/compress v1.18.0
github.com/klauspost/compress v1.17.11
github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25
github.com/ory/dockertest/v3 v3.12.0
github.com/ory/dockertest/v3 v3.11.0
github.com/philip-bui/grpc-zerolog v1.0.1
github.com/pkg/profile v1.7.0
github.com/prometheus/client_golang v1.22.0
github.com/prometheus/common v0.65.0
github.com/pterm/pterm v0.12.81
github.com/puzpuzpuz/xsync/v4 v4.1.0
github.com/rs/zerolog v1.34.0
github.com/samber/lo v1.51.0
github.com/prometheus/client_golang v1.20.5
github.com/prometheus/common v0.61.0
github.com/pterm/pterm v0.12.80
github.com/puzpuzpuz/xsync/v3 v3.4.0
github.com/rs/zerolog v1.33.0
github.com/samber/lo v1.47.0
github.com/sasha-s/go-deadlock v0.3.5
github.com/spf13/cobra v1.9.1
github.com/spf13/viper v1.20.1
github.com/spf13/cobra v1.8.1
github.com/spf13/viper v1.20.0-alpha.6
github.com/stretchr/testify v1.10.0
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97
github.com/tailscale/hujson v0.0.0-20241010212012-29efb4a0184b
github.com/tailscale/tailsql v0.0.0-20241211062219-bf96884c6a49
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba
golang.org/x/crypto v0.39.0
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0
golang.org/x/net v0.41.0
golang.org/x/oauth2 v0.30.0
golang.org/x/sync v0.15.0
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822
google.golang.org/grpc v1.73.0
google.golang.org/protobuf v1.36.6
golang.org/x/crypto v0.32.0
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8
golang.org/x/net v0.34.0
golang.org/x/oauth2 v0.25.0
golang.org/x/sync v0.10.0
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484
google.golang.org/grpc v1.69.0
google.golang.org/protobuf v1.36.0
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
gopkg.in/yaml.v3 v3.0.1
gorm.io/driver/postgres v1.6.0
gorm.io/gorm v1.30.0
tailscale.com v1.84.2
zgo.at/zcache/v2 v2.2.0
gorm.io/driver/postgres v1.5.11
gorm.io/gorm v1.25.12
tailscale.com v1.80.0
zgo.at/zcache/v2 v2.1.0
zombiezen.com/go/postgrestest v1.0.1
)
@@ -77,10 +72,10 @@ require (
// together, e.g:
// go get modernc.org/libc@v1.55.3 modernc.org/sqlite@v1.33.1
require (
modernc.org/libc v1.62.1 // indirect
modernc.org/mathutil v1.7.1 // indirect
modernc.org/memory v1.10.0 // indirect
modernc.org/sqlite v1.37.0 // indirect
modernc.org/libc v1.55.3 // indirect
modernc.org/mathutil v1.6.0 // indirect
modernc.org/memory v1.8.0 // indirect
modernc.org/sqlite v1.34.5 // indirect
)
require (
@@ -89,87 +84,83 @@ require (
atomicgo.dev/schedule v0.1.0 // indirect
dario.cat/mergo v1.0.1 // indirect
filippo.io/edwards25519 v1.1.0 // indirect
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
github.com/akutz/memconn v0.1.0 // indirect
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect
github.com/aws/aws-sdk-go-v2 v1.36.0 // indirect
github.com/aws/aws-sdk-go-v2/config v1.29.5 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.58 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 // indirect
github.com/aws/aws-sdk-go-v2 v1.26.1 // indirect
github.com/aws/aws-sdk-go-v2/config v1.27.11 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.11 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect
github.com/aws/aws-sdk-go-v2/service/ssm v1.45.0 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 // indirect
github.com/aws/smithy-go v1.22.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 // indirect
github.com/aws/smithy-go v1.20.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/bits-and-blooms/bitset v1.13.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/containerd/console v1.0.5 // indirect
github.com/containerd/console v1.0.4 // indirect
github.com/containerd/continuity v0.4.5 // indirect
github.com/containerd/errdefs v0.3.0 // indirect
github.com/containerd/errdefs/pkg v0.3.0 // indirect
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 // indirect
github.com/creachadair/mds v0.24.1 // indirect
github.com/creachadair/mds v0.20.0 // indirect
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 // indirect
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/cli v28.1.1+incompatible // indirect
github.com/docker/cli v27.4.1+incompatible // indirect
github.com/docker/docker v27.4.1+incompatible // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/felixge/fgprof v0.9.5 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/gaissmai/bart v0.18.0 // indirect
github.com/gaissmai/bart v0.11.1 // indirect
github.com/glebarez/go-sqlite v1.22.0 // indirect
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
github.com/go-jose/go-jose/v4 v4.1.0 // indirect
github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
github.com/go-jose/go-jose/v4 v4.0.2 // indirect
github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-viper/mapstructure/v2 v2.3.0 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/go-github v17.0.0+incompatible // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 // indirect
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a // indirect
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gookit/color v1.5.4 // indirect
github.com/gorilla/csrf v1.7.3 // indirect
github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30 // indirect
github.com/gorilla/securecookie v1.1.2 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/hashicorp/go-version v1.7.0 // indirect
github.com/hdevalence/ed25519consensus v0.2.0 // indirect
github.com/illarion/gonotify/v3 v3.0.2 // indirect
github.com/illarion/gonotify/v2 v2.0.3 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/insomniacslk/dhcp v0.0.0-20240129002554-15c9b8791914 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/jackc/pgx/v5 v5.7.4 // indirect
github.com/jackc/pgx/v5 v5.7.1 // indirect
github.com/jackc/puddle/v2 v2.2.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jsimonetti/rtnetlink v1.4.1 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/lib/pq v1.10.9 // indirect
github.com/lithammer/fuzzysearch v1.1.8 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/mdlayher/genetlink v1.3.2 // indirect
@@ -180,65 +171,60 @@ require (
github.com/miekg/dns v1.1.58 // indirect
github.com/mitchellh/go-ps v1.0.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/sys/atomicwriter v0.1.0 // indirect
github.com/moby/sys/user v0.4.0 // indirect
github.com/moby/term v0.5.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/moby/sys/user v0.3.0 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/opencontainers/runc v1.3.0 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/petermattis/goid v0.0.0-20250319124200-ccd6737f222a // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/opencontainers/runc v1.2.3 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/petermattis/goid v0.0.0-20241211131331-93ee7e083c43 // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus-community/pro-bing v0.4.0 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/rogpeppe/go-internal v1.14.1 // indirect
github.com/rogpeppe/go-internal v1.13.1 // indirect
github.com/safchain/ethtool v0.3.0 // indirect
github.com/sagikazarmark/locafero v0.9.0 // indirect
github.com/sagikazarmark/locafero v0.6.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.14.0 // indirect
github.com/spf13/cast v1.8.0 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.7.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e // indirect
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 // indirect
github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 // indirect
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 // indirect
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 // indirect
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc // indirect
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d // indirect
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694 // indirect
github.com/tailscale/setec v0.0.0-20240930150730-e6eb93658ed3 // indirect
github.com/tailscale/squibble v0.0.0-20240909231413-32a80b9743f7 // indirect
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 // indirect
github.com/tailscale/wireguard-go v0.0.0-20250304000100-91a0587fb251 // indirect
github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 // indirect
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 // indirect
github.com/vishvananda/netns v0.0.4 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
go.opentelemetry.io/otel v1.36.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 // indirect
go.opentelemetry.io/otel/metric v1.36.0 // indirect
go.opentelemetry.io/otel/sdk v1.36.0 // indirect
go.opentelemetry.io/otel/trace v1.36.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go4.org/mem v0.0.0-20240501181205-ae6ca9944745 // indirect
golang.org/x/mod v0.25.0 // indirect
golang.org/x/sys v0.33.0 // indirect
golang.org/x/term v0.32.0 // indirect
golang.org/x/text v0.26.0 // indirect
golang.org/x/time v0.10.0 // indirect
golang.org/x/tools v0.33.0 // indirect
golang.org/x/mod v0.22.0 // indirect
golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab // indirect
golang.org/x/term v0.28.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.9.0 // indirect
golang.org/x/tools v0.29.0 // indirect
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect
golang.zx2c4.com/wireguard/windows v0.5.3 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 // indirect
)

545
go.sum
View File

@@ -1,5 +1,3 @@
9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f h1:1C7nZuxUMNz7eiQALRfiqNOm04+m3edWlRff/BYHf0Q=
9fans.net/go v0.0.8-0.20250307142834-96bdba94b63f/go.mod h1:hHyrZRryGqVdqrknjq5OWDLGCTJ2NeEvtrpR96mjraM=
atomicgo.dev/assert v0.0.2 h1:FiKeMiZSgRrZsPo9qn/7vmr7mCsh5SZyXY4YGYiYwrg=
atomicgo.dev/assert v0.0.2/go.mod h1:ut4NcI3QDdJtlmAxQULOmA13Gz6e2DWbSAS8RUOmNYQ=
atomicgo.dev/cursor v0.2.0 h1:H6XN5alUJ52FZZUkI7AlJbUc1aW38GWZalpYRPpoPOw=
@@ -8,6 +6,7 @@ atomicgo.dev/keyboard v0.2.9 h1:tOsIid3nlPLZ3lwgG8KZMp/SFmr7P0ssEN5JUsm78K8=
atomicgo.dev/keyboard v0.2.9/go.mod h1:BC4w9g00XkxH/f1HXhW2sXmJFOCWbKn9xrOunSFtExQ=
atomicgo.dev/schedule v0.1.0 h1:nTthAbhZS5YZmgYbb2+DH8uQIZcTlIrd4eYr3UQxEjs=
atomicgo.dev/schedule v0.1.0/go.mod h1:xeUa3oAkiuHYh8bKiQBRojqAMq3PXXbJujjb0hw8pEU=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
@@ -16,8 +15,9 @@ filippo.io/mkcert v1.4.4 h1:8eVbbwfVlaqUM7OwuftKc2nuYOoTDQWqsoXmzoXZdbc=
filippo.io/mkcert v1.4.4/go.mod h1:VyvOchVuAye3BoUsPUOOofKygVwLV2KQMVFJNRq+1dA=
github.com/AlecAivazis/survey/v2 v2.3.7 h1:6I/u8FvytdGsgonrYsVn2t8t4QiRnh6QSTqkkhIiSjQ=
github.com/AlecAivazis/survey/v2 v2.3.7/go.mod h1:xUTIdE4KCOIjsBAE1JYsUPoCqYdZ1reCfTwbto0Fduo=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs=
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/MarvinJWendt/testza v0.1.0/go.mod h1:7AxNvlfeHP7Z/hDQ5JtE3OKYT3XFUeLCDE2DQninSqs=
@@ -41,53 +41,53 @@ github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7V
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/arl/statsviz v0.6.0 h1:jbW1QJkEYQkufd//4NDYRSNBpwJNrdzPahF7ZmoGdyE=
github.com/arl/statsviz v0.6.0/go.mod h1:0toboo+YGSUXDaS4g1D5TVS4dXs7S7YYT5J/qnW2h8s=
github.com/atomicgo/cursor v0.0.1/go.mod h1:cBON2QmmrysudxNBFthvMtN32r3jxVRIvzkUiF/RuIk=
github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk=
github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg=
github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k=
github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg=
github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y=
github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31 h1:8IwBjuLdqIO1dGB+dZ9zJEl8wzY3bVYxcs0Xyu/Lsc0=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.31/go.mod h1:8tMBcuVjL4kP/ECEIWTCWtwV2kj6+ouEKl4cqR4iWLw=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5 h1:siiQ+jummya9OLPDEyHVb2dLW4aOMe22FGDd0sAfuSw=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.5/go.mod h1:iHVx2J9pWzITdP5MJY6qWfG34TfD9EA+Qi3eV6qQCXw=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12 h1:tkVNm99nkJnFo1H9IIQb5QkCiPcvCDn3Pos+IeTbGRA=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12/go.mod h1:dIVlquSPUMqEJtx2/W17SM2SuESRaVEhEV9alcMqxjw=
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3 h1:JBod0SnNqcWQ0+uAyzeRFG1zCHotW8DukumYYyNy0zo=
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3/go.mod h1:FHSHmyEUkzRbaFFqqm6bkLAOQHgqhsLmfCahvCBMiyA=
github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA=
github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg=
github.com/aws/aws-sdk-go-v2/config v1.27.11 h1:f47rANd2LQEYHda2ddSCKYId18/8BhSRM4BULGmfgNA=
github.com/aws/aws-sdk-go-v2/config v1.27.11/go.mod h1:SMsV78RIOYdve1vf36z8LmnszlRWkwMQtomCAI0/mIE=
github.com/aws/aws-sdk-go-v2/credentials v1.17.11 h1:YuIB1dJNf1Re822rriUOTxopaHHvIq0l/pX3fwO+Tzs=
github.com/aws/aws-sdk-go-v2/credentials v1.17.11/go.mod h1:AQtFPsDH9bI2O+71anW6EKL+NcD7LG3dpKGMV4SShgo=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5 h1:81KE7vaZzrl7yHBYHVEzYB8sypz11NMOZ40YlWvPxsU=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.5/go.mod h1:LIt2rg7Mcgn09Ygbdh/RdIm0rQ+3BNkbP1gyVMFtRK0=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7 h1:ZMeFZ5yk+Ek+jNr1+uwCd2tG89t6oTS5yVWpa6yy2es=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.7/go.mod h1:mxV05U+4JiHqIpGqqYXOHLPKUC6bDXC44bsUhNjOEwY=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5 h1:f9RyWNtS8oH7cZlbn+/JNPpjUk5+5fLd5lM9M0i49Ys=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.5/go.mod h1:h5CoMZV2VF297/VLhRhO1WF+XYWOzXo+4HsObA4HjBQ=
github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1 h1:6cnno47Me9bRykw9AEv9zkXE+5or7jz8TsskTTccbgc=
github.com/aws/aws-sdk-go-v2/service/s3 v1.53.1/go.mod h1:qmdkIIAC+GCLASF7R2whgNrJADz0QZPX+Seiw/i4S3o=
github.com/aws/aws-sdk-go-v2/service/ssm v1.45.0 h1:IOdss+igJDFdic9w3WKwxGCmHqUxydvIhJOm9LJ32Dk=
github.com/aws/aws-sdk-go-v2/service/ssm v1.45.0/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok=
github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w=
github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ=
github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/aws/aws-sdk-go-v2/service/sso v1.20.5 h1:vN8hEbpRnL7+Hopy9dzmRle1xmDc7o8tmY0klsr175w=
github.com/aws/aws-sdk-go-v2/service/sso v1.20.5/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 h1:Jux+gDDyi1Lruk+KHF91tK2KCuY61kzoCpvtvJJBtOE=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak=
github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n2HZPkcKgPAi1phU=
github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw=
github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q=
github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8=
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chasefleming/elem-go v0.30.0 h1:BlhV1ekv1RbFiM8XZUQeln1Ikb4D+bu2eDO4agREvok=
@@ -101,35 +101,25 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/cilium/ebpf v0.17.3 h1:FnP4r16PWYSE4ux6zN+//jMcW4nMVRvuTLVTvCjyyjg=
github.com/cilium/ebpf v0.17.3/go.mod h1:G5EDHij8yiLzaqn0WjyfJHvRa+3aDlReIaLVRMvOyJk=
github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE=
github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok=
github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo=
github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
github.com/containerd/console v1.0.5 h1:R0ymNeydRqH2DmakFNdmjR2k0t7UPuiOV/N/27/qqsc=
github.com/containerd/console v1.0.5/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro=
github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4=
github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0=
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
github.com/coreos/go-oidc/v3 v3.14.1 h1:9ePWwfdwC4QKRlCXsJGou56adA/owXczOzwKdOumLqk=
github.com/coreos/go-oidc/v3 v3.14.1/go.mod h1:HaZ3szPaZ0e4r6ebqvsLWlk2Tn+aejfmrfah6hnSYEU=
github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI=
github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creachadair/command v0.1.22 h1:WmdrURwZdmPD1jm13SjKooaMoqo7mW1qI2BPCShs154=
github.com/creachadair/command v0.1.22/go.mod h1:YFc+OMGucqTpxwQg/iJnNg8BMNmRPDK60rYy8ckgKwE=
github.com/creachadair/flax v0.0.5 h1:zt+CRuXQASxwQ68e9GHAOnEgAU29nF0zYMHOCrL5wzE=
github.com/creachadair/flax v0.0.5/go.mod h1:F1PML0JZLXSNDMNiRGK2yjm5f+L9QCHchyHBldFymj8=
github.com/creachadair/mds v0.24.1 h1:bzL4ItCtAUxxO9KkotP0PVzlw4tnJicAcjPu82v2mGs=
github.com/creachadair/mds v0.24.1/go.mod h1:ArfS0vPHoLV/SzuIzoqTEZfoYmac7n9Cj8XPANHocvw=
github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc=
github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creachadair/mds v0.20.0 h1:bXQO154c2TDgCY+rRmdIfUqjeqGYejmZ/QayeTNwbp8=
github.com/creachadair/mds v0.20.0/go.mod h1:4b//mUiL8YldH6TImXjmW45myzTLNS1LLjOmrk888eg=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0=
@@ -142,14 +132,12 @@ github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0 h1:vrC07UZcgPzu/Oj
github.com/dblohm7/wingoes v0.0.0-20240123200102-b75a8a7d7eb0/go.mod h1:Nx87SkVqTKd8UtT+xu7sM/l+LgXs6c0aHrlKusR+2EQ=
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q=
github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c=
github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0=
github.com/docker/cli v28.1.1+incompatible h1:eyUemzeI45DY7eDPuwUcmDyDj1pM98oD5MdSpiItp8k=
github.com/docker/cli v28.1.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw=
github.com/docker/docker v28.2.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/cli v27.4.1+incompatible h1:VzPiUlRJ/xh+otB75gva3r05isHMo5wXDfPRi5/b4hI=
github.com/docker/cli v27.4.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/docker v27.4.1+incompatible h1:ZJvcY7gfwHn1JF48PfbyXg7Jyt9ZCWDW+GGXOIxEwp4=
github.com/docker/docker v27.4.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -158,34 +146,37 @@ github.com/dsnet/try v0.0.3 h1:ptR59SsrcFUYbT/FhAbKTV6iLkeD6O18qfIWRml2fqI=
github.com/dsnet/try v0.0.3/go.mod h1:WBM8tRpUmnXXhY1U6/S8dt6UWdHTQ7y8A5YSkRCkq40=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/gaissmai/bart v0.18.0 h1:jQLBT/RduJu0pv/tLwXE+xKPgtWJejbxuXAR+wLJafo=
github.com/gaissmai/bart v0.18.0/go.mod h1:JJzMAhNF5Rjo4SF4jWBrANuJfqY+FvsFhW7t1UZJ+XY=
github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc=
github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg=
github.com/github/fakeca v0.1.0 h1:Km/MVOFvclqxPM9dZBC4+QE564nU4gz4iZ0D9pMw28I=
github.com/github/fakeca v0.1.0/go.mod h1:+bormgoGMMuamOscx7N91aOuUST7wdaJ2rNjeohylyo=
github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ=
github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc=
github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw=
github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ=
github.com/go-gormigrate/gormigrate/v2 v2.1.4 h1:KOPEt27qy1cNzHfMZbp9YTmEuzkY4F4wrdsJW9WFk1U=
github.com/go-gormigrate/gormigrate/v2 v2.1.4/go.mod h1:y/6gPAH6QGAgP1UfHMiXcqGeJ88/GRQbfCReE1JJD5Y=
github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY=
github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
github.com/go-jose/go-jose/v4 v4.1.0 h1:cYSYxd3pw5zd2FSXk2vGdn9igQU2PS8MuxrCOCl0FdY=
github.com/go-jose/go-jose/v4 v4.1.0/go.mod h1:GG/vqmYm3Von2nYiB2vGTXzdoNKE5tix5tuc6iAd+sw=
github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 h1:F8d1AJ6M9UQCavhwmO6ZsrYLfG8zVFWfEfMS2MXPkSY=
github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-gormigrate/gormigrate/v2 v2.1.3 h1:ei3Vq/rpPI/jCJY9mRHJAKg5vU+EhZyWhBAkaAomQuw=
github.com/go-gormigrate/gormigrate/v2 v2.1.3/go.mod h1:VJ9FIOBAur+NmQ8c4tDVwOuiJcgupTG105FexPFrXzA=
github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k=
github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk=
github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 h1:KbX3Z3CgiYlbaavUq3Cj9/MjpO+88S7/AGXzynVDv84=
github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
@@ -194,46 +185,49 @@ github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk=
github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737 h1:cf60tHxREO3g1nroKr2osU3JWZsJzkfi7rEg+oAB0Lo=
github.com/go4org/plan9netshell v0.0.0-20250324183649-788daa080737/go.mod h1:MIS0jDzbU/vuM9MC4YnBITCv+RYuTRq8dJzmCrFsK9g=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg=
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU=
github.com/gofrs/uuid/v5 v5.3.2 h1:2jfO8j3XgSwlz/wHqemAEugfnTlikAYHhnqQ8Xh4fE0=
github.com/gofrs/uuid/v5 v5.3.2/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
github.com/gofrs/uuid/v5 v5.3.0 h1:m0mUMr+oVYUdxpMLgSYCZiXe7PuVPnI94+OMeVBNedk=
github.com/gofrs/uuid/v5 v5.3.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/go-tpm v0.9.4 h1:awZRf9FwOeTunQmHoDYSHJps3ie6f1UlhS1fOdPEt1I=
github.com/google/go-tpm v0.9.4/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 h1:wG8RYIyctLhdFk6Vl1yPGtSRtwGpVkWyZww1OCil2MI=
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6zZ3oEn0JuiUQ4wqwuyqqzasOltcoXPtgLbFp4=
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a h1:rDA3FfmxwXR+BVKKdz55WwMJ1pD2hJQNW31d+l3mPk4=
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@@ -242,16 +236,16 @@ github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQ
github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo=
github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0=
github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/QJi9w=
github.com/gorilla/csrf v1.7.3 h1:BHWt6FTLZAb2HtWT5KDBf6qgpZzvtbp9QWDRKZMXJC0=
github.com/gorilla/csrf v1.7.3/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk=
github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30 h1:fiJdrgVBkjZ5B1HJ2WQwNOaXB+QyYcNXTA3t1XYLz0M=
github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA=
github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0 h1:+epNPbD5EqgpEMm5wrl4Hqts3jZt8+kYaqUisuuIGTk=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.0/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90=
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI=
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU=
@@ -260,8 +254,8 @@ github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/illarion/gonotify/v3 v3.0.2 h1:O7S6vcopHexutmpObkeWsnzMJt/r1hONIEogeVNmJMk=
github.com/illarion/gonotify/v3 v3.0.2/go.mod h1:HWGPdPe817GfvY3w7cx6zkbzNZfi3QjcBm/wgVvEL1U=
github.com/illarion/gonotify/v2 v2.0.3 h1:B6+SKPo/0Sw8cRJh1aLzNEeNVFfzE3c6N+o+vyxM+9A=
github.com/illarion/gonotify/v2 v2.0.3/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/insomniacslk/dhcp v0.0.0-20240129002554-15c9b8791914 h1:kD8PseueGeYiid/Mmcv17Q0Qqicc4F46jcX22L/e/Hs=
@@ -270,8 +264,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.7.4 h1:9wKznZrhWa2QiHL+NjTSPP6yjl3451BX3imWDnokYlg=
github.com/jackc/pgx/v5 v5.7.4/go.mod h1:ncY89UGWxg82EykZUwSpUKEfccBGGYq1xjrOpsbsfGQ=
github.com/jackc/pgx/v5 v5.7.1 h1:x7SYsPBYDkHDksogeSmZZ5xzThcTgRz++I5E+ePFUcs=
github.com/jackc/pgx/v5 v5.7.1/go.mod h1:e7O26IywZZ+naJtWWos6i6fvWK+29etgITqrqHLfoZA=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jagottsicher/termcolor v1.0.2 h1:fo0c51pQSuLBN1+yVX2ZE+hE+P7ULb/TY8eRowJnrsM=
@@ -293,13 +287,14 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.10/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU=
github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ=
github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
@@ -322,9 +317,8 @@ github.com/lithammer/fuzzysearch v1.1.8 h1:/HIuJnjHuXS8bKaiTMeeDlW2/AyIWk2brx1V8
github.com/lithammer/fuzzysearch v1.1.8/go.mod h1:IdqeyBClc3FFqSzYq/MXESsS4S0FsZ5ajtkr5xPLts4=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
@@ -350,16 +344,10 @@ github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc
github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=
github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
@@ -370,23 +358,25 @@ github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25 h1:9bCMuD3Tc
github.com/oauth2-proxy/mockoidc v0.0.0-20240214162133-caebfff84d25/go.mod h1:eDjgYHYDJbPLBLsyZ6qRaugP0mX8vePOhZ5id1fdzJw=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/opencontainers/runc v1.3.0 h1:cvP7xbEvD0QQAs0nZKLzkVog2OPZhI/V2w3WmTmUSXI=
github.com/opencontainers/runc v1.3.0/go.mod h1:9wbWt42gV+KRxKRVVugNP6D5+PQciRbenB4fLVsqGPs=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/opencontainers/runc v1.2.3 h1:fxE7amCzfZflJO2lHXf4y/y8M1BoAqp+FVmG19oYB80=
github.com/opencontainers/runc v1.2.3/go.mod h1:nSxcWUydXrsBZVYNSkTjoQ/N6rcyTtn+1SD5D4+kRIM=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
github.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCyRCw=
github.com/ory/dockertest/v3 v3.12.0/go.mod h1:aKNDTva3cp8dwOWwb9cWuX84aH5akkxXRvO7KCwWVjE=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/ory/dockertest/v3 v3.11.0 h1:OiHcxKAvSDUwsEVh2BjxQQc/5EHz9n0va9awCtNGuyA=
github.com/ory/dockertest/v3 v3.11.0/go.mod h1:VIPxS1gwT9NpPOrfD3rACs8Y9Z7yhzO4SB194iUDnUI=
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
github.com/petermattis/goid v0.0.0-20250319124200-ccd6737f222a h1:S+AGcmAESQ0pXCUNnRH7V+bOUIgkSX5qVt2cNKCrm0Q=
github.com/petermattis/goid v0.0.0-20250319124200-ccd6737f222a/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
github.com/petermattis/goid v0.0.0-20241211131331-93ee7e083c43 h1:ah1dvbqPMN5+ocrg/ZSgZ6k8bOk+kcZQ7fnyx6UvOm4=
github.com/petermattis/goid v0.0.0-20241211131331-93ee7e083c43/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
github.com/philip-bui/grpc-zerolog v1.0.1 h1:EMacvLRUd2O1K0eWod27ZP5CY1iTNkhBDLSN+Q4JEvA=
github.com/philip-bui/grpc-zerolog v1.0.1/go.mod h1:qXbiq/2X4ZUMMshsqlWyTHOcw7ns+GZmlqZZN05ZHcQ=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
@@ -398,12 +388,13 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus-community/pro-bing v0.4.0 h1:YMbv+i08gQz97OZZBwLyvmmQEEzyfyrrjEaAchdy3R4=
github.com/prometheus-community/pro-bing v0.4.0/go.mod h1:b7wRYZtCcPmt4Sz319BykUU241rWLe1VFXyiyWK/dH4=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/pterm/pterm v0.12.27/go.mod h1:PhQ89w4i95rhgE+xedAoqous6K9X+r6aSOI2eFF7DZI=
@@ -413,51 +404,54 @@ github.com/pterm/pterm v0.12.31/go.mod h1:32ZAWZVXD7ZfG0s8qqHXePte42kdz8ECtRyEej
github.com/pterm/pterm v0.12.33/go.mod h1:x+h2uL+n7CP/rel9+bImHD5lF3nM9vJj80k9ybiiTTE=
github.com/pterm/pterm v0.12.36/go.mod h1:NjiL09hFhT/vWjQHSj1athJpx6H8cjpHXNAK5bUw8T8=
github.com/pterm/pterm v0.12.40/go.mod h1:ffwPLwlbXxP+rxT0GsgDTzS3y3rmpAO1NMjUkGTYf8s=
github.com/pterm/pterm v0.12.81 h1:ju+j5I2++FO1jBKMmscgh5h5DPFDFMB7epEjSoKehKA=
github.com/pterm/pterm v0.12.81/go.mod h1:TyuyrPjnxfwP+ccJdBTeWHtd/e0ybQHkOS/TakajZCw=
github.com/puzpuzpuz/xsync/v4 v4.1.0 h1:x9eHRl4QhZFIPJ17yl4KKW9xLyVWbb3/Yq4SXpjF71U=
github.com/puzpuzpuz/xsync/v4 v4.1.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
github.com/pterm/pterm v0.12.80 h1:mM55B+GnKUnLMUSqhdINe4s6tOuVQIetQ3my8JGyAIg=
github.com/pterm/pterm v0.12.80/go.mod h1:c6DeF9bSnOSeFPZlfs4ZRAFcf5SCoTwvwQ5xaKGQlHo=
github.com/puzpuzpuz/xsync/v3 v3.4.0 h1:DuVBAdXuGFHv8adVXjWWZ63pJq+NRXOWVXlKDBZ+mJ4=
github.com/puzpuzpuz/xsync/v3 v3.4.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0=
github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs=
github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k=
github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk=
github.com/samber/lo v1.51.0 h1:kysRYLbHy/MB7kQZf5DSN50JHmMsNEdeY24VzJFu7wI=
github.com/samber/lo v1.51.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=
github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk=
github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0=
github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc=
github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU=
github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU=
github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA=
github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo=
github.com/spf13/cast v1.8.0 h1:gEN9K4b8Xws4EX0+a0reLmhq8moKn7ntRlQYgjPeCDk=
github.com/spf13/cast v1.8.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w=
github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.20.0-alpha.6 h1:f65Cr/+2qk4GfHC0xqT/isoupQppwN5+VLRztUGTDbY=
github.com/spf13/viper v1.20.0-alpha.6/go.mod h1:CGBZzv0c9fOUASm6rfus4wdeIjR/04NOLq1P4KRhX3k=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@@ -472,28 +466,28 @@ github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e h1:PtWT87weP
github.com/tailscale/certstore v0.1.1-0.20231202035212-d3fa0460f47e/go.mod h1:XrBNfAFN+pwoWuksbFS9Ccxnopa15zJGgXRFN90l3K4=
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55 h1:Gzfnfk2TWrk8Jj4P4c1a3CtQyMaTVCznlkLZI++hok4=
github.com/tailscale/go-winio v0.0.0-20231025203758-c4f33415bf55/go.mod h1:4k4QO+dQ3R5FofL+SanAUZe+/QfeK0+OIuwDIRu2vSg=
github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869 h1:SRL6irQkKGQKKLzvQP/ke/2ZuB7Py5+XuqtOgSj+iMM=
github.com/tailscale/golang-x-crypto v0.0.0-20250404221719-a5573b049869/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ=
github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4 h1:rXZGgEa+k2vJM8xT0PoSKfVXwFGPQ3z3CJfmnHJkZZw=
github.com/tailscale/golang-x-crypto v0.0.0-20240604161659-3fde5e568aa4/go.mod h1:ikbF+YT089eInTp9f2vmvy4+ZVnW5hzX1q2WknxSprQ=
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPxRs2O36yuGRW3f9SYV+bMTTvMBI0EKio=
github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8=
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33 h1:idh63uw+gsG05HwjZsAENCG4KZfyvjK03bpjxa5qRRk=
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33/go.mod h1:EbW0wDK/qEUYI0A5bqq0C2kF8JTQwWONmGDBbzsxxHo=
github.com/tailscale/hujson v0.0.0-20241010212012-29efb4a0184b h1:MNaGusDfB1qxEsl6iVb33Gbe777IKzPP5PDta0xGC8M=
github.com/tailscale/hujson v0.0.0-20241010212012-29efb4a0184b/go.mod h1:EbW0wDK/qEUYI0A5bqq0C2kF8JTQwWONmGDBbzsxxHo=
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU=
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0=
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA=
github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc=
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d h1:mnqtPWYyvNiPU9l9tzO2YbHXU/xV664XthZYA26lOiE=
github.com/tailscale/setec v0.0.0-20250305161714-445cadbbca3d/go.mod h1:9BzmlFc3OLqLzLTF/5AY+BMs+clxMqyhSGzgXIm8mNI=
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694 h1:95eIP97c88cqAFU/8nURjgI9xxPbD+Ci6mY/a79BI/w=
github.com/tailscale/squibble v0.0.0-20250108170732-a4ca58afa694/go.mod h1:veguaG8tVg1H/JG5RfpoUW41I+O8ClPElo/fTYr8mMk=
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97 h1:JJkDnrAhHvOCttk8z9xeZzcDlzzkRA7+Duxj9cwOyxk=
github.com/tailscale/tailsql v0.0.0-20250421235516-02f85f087b97/go.mod h1:9jS8HxwsP2fU4ESZ7DZL+fpH/U66EVlVMzdgznH12RM=
github.com/tailscale/setec v0.0.0-20240930150730-e6eb93658ed3 h1:Zk341hE1rcVUcDwA9XKmed2acHGGlbeFQzje6gvkuFo=
github.com/tailscale/setec v0.0.0-20240930150730-e6eb93658ed3/go.mod h1:nexjfRM8veJVJ5PTbqYI2YrUj/jbk3deffEHO3DH9Q4=
github.com/tailscale/squibble v0.0.0-20240909231413-32a80b9743f7 h1:nfklwaP8uNz2IbUygSKOQ1aDzzRRRLaIbPpnQWUUMGc=
github.com/tailscale/squibble v0.0.0-20240909231413-32a80b9743f7/go.mod h1:YH/J7n7jNZOq10nTxxPANv2ha/Eg47/6J5b7NnOYAhQ=
github.com/tailscale/tailsql v0.0.0-20241211062219-bf96884c6a49 h1:QFXXdoiYFiUS7a6DH7zE6Uacz3wMzH/1/VvWLnR9To4=
github.com/tailscale/tailsql v0.0.0-20241211062219-bf96884c6a49/go.mod h1:IX3F8T6iILmg94hZGkkOf6rmjIHJCXNVqxOpiSUwHQQ=
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14=
github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ=
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6 h1:l10Gi6w9jxvinoiq15g8OToDdASBni4CyJOdHY1Hr8M=
github.com/tailscale/wf v0.0.0-20240214030419-6fbb0a674ee6/go.mod h1:ZXRML051h7o4OcI0d3AaILDIad/Xw0IkXaHM17dic1Y=
github.com/tailscale/wireguard-go v0.0.0-20250304000100-91a0587fb251 h1:h/41LFTrwMxB9Xvvug0kRdQCU5TlV1+pAMQw0ZtDE3U=
github.com/tailscale/wireguard-go v0.0.0-20250304000100-91a0587fb251/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4=
github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 h1:BcEJP2ewTIK2ZCsqgl6YGpuO6+oKqqag5HHb7ehljKw=
github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4=
github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek=
github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg=
github.com/tc-hib/winres v0.2.1 h1:YDE0FiP0VmtRaDn7+aaChp1KiF4owBiJa5l964l5ujA=
@@ -502,8 +496,8 @@ github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e h1:IWllFTiDjjLIf2
github.com/tcnksm/go-latest v0.0.0-20170313132115-e3007ae9052e/go.mod h1:d7u6HkTYKSv5m6MCKkOQlHwaShTMl3HjqSGW3XtVhXM=
github.com/tink-crypto/tink-go/v2 v2.1.0 h1:QXFBguwMwTIaU17EgZpEJWsUSc60b1BAGTzBIoMdmok=
github.com/tink-crypto/tink-go/v2 v2.1.0/go.mod h1:y1TnYFt1i2eZVfx4OGc+C+EMp4CoKWAw2VSEuoicHHI=
github.com/u-root/u-root v0.14.0 h1:Ka4T10EEML7dQ5XDvO9c3MBN8z4nuSnGjcd1jmU2ivg=
github.com/u-root/u-root v0.14.0/go.mod h1:hAyZorapJe4qzbLWlAkmSVCJGbfoU9Pu4jpJ1WMluqE=
github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs=
github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI=
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM=
github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
@@ -526,26 +520,22 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0 h1:dNzwXjZKpMpE2JhmO+9HsPl42NIXFIFSUSSs0fiqra0=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0 h1:nRVXXvf78e00EwY6Wp0YII8ww2JVWshZ20HfTlE11AM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.36.0/go.mod h1:r49hO7CgrxY9Voaj3Xe8pANWtr0Oq916d0XAmOoCZAQ=
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
go.opentelemetry.io/proto/otlp v1.6.0 h1:jQjP+AQyTf+Fe7OKj/MfkDrmK4MNVtw2NpXsf9fefDI=
go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc=
go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek=
go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g=
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M=
@@ -555,20 +545,29 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA=
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8=
golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/image v0.24.0 h1:AN7zRgVsbvmTfNyqIbbOraYL8mSwcKncEj8ofjgzcMQ=
golang.org/x/image v0.24.0/go.mod h1:4b/ITuLfqYq1hqZcjofwctIhi7sZh2WaCjvsBNjjya8=
golang.org/x/image v0.23.0 h1:HseQ7c2OpPKTPVzNjG5fwJsOTCiiwS4QdsYi5XU6H68=
golang.org/x/image v0.23.0/go.mod h1:wJJBTdLfCCf3tiHa1fNxpZmUI4mmoZvwMCPP0ddoNKY=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -577,21 +576,26 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -602,6 +606,7 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -615,8 +620,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab h1:BMkEEWYOjkvOX7+YKOGbp6jCyQ5pR2j0Ah47p1Vdsx4=
golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -624,8 +629,8 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
@@ -633,18 +638,23 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4=
golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE=
golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -653,15 +663,26 @@ golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeu
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 h1:ChAdCYNQFDk5fYvFZMywKLIijG7TC2m1C2CMEu11G3o=
google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484/go.mod h1:KRUmxRI4JmbpAm8gcZM4Jsffi859fo5LQjILwuqj9z8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 h1:Z7FRVJPSMaHQxD0uXU8WdgFh8PseLM8Q8NzhnpMrBhQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI=
google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ=
google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
@@ -674,47 +695,49 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=
gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=
gorm.io/gorm v1.30.0 h1:qbT5aPv1UH8gI99OsRlvDToLxW5zR7FzS9acZDOZcgs=
gorm.io/gorm v1.30.0/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=
gorm.io/driver/postgres v1.5.11 h1:ubBVAfbKEUld/twyKZ0IYn9rSQh448EdelLYk9Mv314=
gorm.io/driver/postgres v1.5.11/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI=
gorm.io/gorm v1.25.12 h1:I0u8i2hWQItBq1WfE0o2+WuL9+8L21K9e2HHSTE/0f8=
gorm.io/gorm v1.25.12/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k=
gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM=
honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI=
honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4=
gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8=
gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.5.1 h1:4bH5o3b5ZULQ4UrBmP+63W9r7qIkqJClEA9ko5YKx+I=
honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs=
howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
modernc.org/cc/v4 v4.25.2 h1:T2oH7sZdGvTaie0BRNFbIYsabzCxUQg8nLqCdQ2i0ic=
modernc.org/cc/v4 v4.25.2/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
modernc.org/ccgo/v4 v4.25.1 h1:TFSzPrAGmDsdnhT9X2UrcPMI3N/mJ9/X9ykKXwLhDsU=
modernc.org/ccgo/v4 v4.25.1/go.mod h1:njjuAYiPflywOOrm3B7kCB444ONP5pAVr8PIEoE0uDw=
modernc.org/cc/v4 v4.21.4 h1:3Be/Rdo1fpr8GrQ7IVw9OHtplU4gWbb+wNgeoBMmGLQ=
modernc.org/cc/v4 v4.21.4/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ=
modernc.org/ccgo/v4 v4.19.2 h1:lwQZgvboKD0jBwdaeVCTouxhxAyN6iawF3STraAal8Y=
modernc.org/ccgo/v4 v4.19.2/go.mod h1:ysS3mxiMV38XGRTTcgo0DQTeTmAO4oCmJl1nX9VFI3s=
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
modernc.org/libc v1.62.1 h1:s0+fv5E3FymN8eJVmnk0llBe6rOxCu/DEU+XygRbS8s=
modernc.org/libc v1.62.1/go.mod h1:iXhATfJQLjG3NWy56a6WVU73lWOcdYVxsvwCgoPljuo=
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
modernc.org/memory v1.10.0 h1:fzumd51yQ1DxcOxSO+S6X7+QTuVU+n8/Aj7swYjFfC4=
modernc.org/memory v1.10.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw=
modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
modernc.org/sqlite v1.37.0 h1:s1TMe7T3Q3ovQiK2Ouz4Jwh7dw4ZDqbebSDTlSJdfjI=
modernc.org/sqlite v1.37.0/go.mod h1:5YiWv+YviqGMuGw4V+PNplcyaJ5v+vQd7TQOgkACoJM=
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw=
modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU=
modernc.org/libc v1.55.3 h1:AzcW1mhlPNrRtjS5sS+eW2ISCgSOLLNyFzRh/V3Qj/U=
modernc.org/libc v1.55.3/go.mod h1:qFXepLhz+JjFThQ4kzwzOjA/y/artDeg+pcYnY+Q83w=
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E=
modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU=
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc=
modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss=
modernc.org/sqlite v1.34.5 h1:Bb6SR13/fjp15jt70CL4f18JIN7p7dnMExd+UFnF15g=
modernc.org/sqlite v1.34.5/go.mod h1:YLuNmX9NKs8wRNK2ko1LW1NGYcc9FkBO69JOt1AR9JE=
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k=
software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
tailscale.com v1.84.2 h1:v6aM4RWUgYiV52LRAx6ET+dlGnvO/5lnqPXb7/pMnR0=
tailscale.com v1.84.2/go.mod h1:6/S63NMAhmncYT/1zIPDJkvCuZwMw+JnUuOfSPNazpo=
zgo.at/zcache/v2 v2.2.0 h1:K29/IPjMniZfveYE+IRXfrl11tMzHkIPuyGrfVZ2fGo=
zgo.at/zcache/v2 v2.2.0/go.mod h1:gyCeoLVo01QjDZynjime8xUGHHMbsLiPyUTBpDGd4Gk=
tailscale.com v1.80.0 h1:7joWtDtdHEHJvGmOag10RNITKp1I4Ts7Hrn6pU33/1I=
tailscale.com v1.80.0/go.mod h1:4tasV1xjJAMHuX2xWMWAnXEmlrAA6M3w1xnc32DlpMk=
zgo.at/zcache/v2 v2.1.0 h1:USo+ubK+R4vtjw4viGzTe/zjXyPw6R7SK/RL3epBBxs=
zgo.at/zcache/v2 v2.1.0/go.mod h1:gyCeoLVo01QjDZynjime8xUGHHMbsLiPyUTBpDGd4Gk=
zombiezen.com/go/postgrestest v1.0.1 h1:aXoADQAJmZDU3+xilYVut0pHhgc0sF8ZspPW9gFNwP4=
zombiezen.com/go/postgrestest v1.0.1/go.mod h1:marlZezr+k2oSJrvXHnZUs1olHqpE9czlz8ZYkVxliQ=

View File

@@ -5,6 +5,7 @@ import (
"crypto/tls"
"errors"
"fmt"
"io"
"net"
"net/http"
_ "net/http/pprof" // nolint
@@ -19,6 +20,7 @@ import (
"github.com/davecgh/go-spew/spew"
"github.com/gorilla/mux"
grpcMiddleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpcRuntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/juanfont/headscale"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
@@ -29,11 +31,12 @@ import (
"github.com/juanfont/headscale/hscontrol/dns"
"github.com/juanfont/headscale/hscontrol/mapper"
"github.com/juanfont/headscale/hscontrol/notifier"
"github.com/juanfont/headscale/hscontrol/state"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
zerolog "github.com/philip-bui/grpc-zerolog"
"github.com/pkg/profile"
"github.com/prometheus/client_golang/prometheus/promhttp"
zl "github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"golang.org/x/crypto/acme"
@@ -47,11 +50,13 @@ import (
"google.golang.org/grpc/peer"
"google.golang.org/grpc/reflection"
"google.golang.org/grpc/status"
"gorm.io/gorm"
"tailscale.com/envknob"
"tailscale.com/tailcfg"
"tailscale.com/types/dnstype"
"tailscale.com/types/key"
"tailscale.com/util/dnsname"
zcache "zgo.at/zcache/v2"
)
var (
@@ -69,22 +74,32 @@ const (
updateInterval = 5 * time.Second
privateKeyFileMode = 0o600
headscaleDirPerm = 0o700
registerCacheExpiration = time.Minute * 15
registerCacheCleanup = time.Minute * 20
)
// Headscale represents the base app of the service.
type Headscale struct {
cfg *types.Config
state *state.State
db *db.HSDatabase
ipAlloc *db.IPAllocator
noisePrivateKey *key.MachinePrivate
ephemeralGC *db.EphemeralGarbageCollector
DERPMap *tailcfg.DERPMap
DERPServer *derpServer.DERPServer
// Things that generate changes
polManOnce sync.Once
polMan policy.PolicyManager
extraRecordMan *dns.ExtraRecordsMan
mapper *mapper.Mapper
nodeNotifier *notifier.Notifier
authProvider AuthProvider
mapper *mapper.Mapper
nodeNotifier *notifier.Notifier
registrationCache *zcache.Cache[types.RegistrationID, types.RegisterNode]
authProvider AuthProvider
pollNetMapStreamWG sync.WaitGroup
}
@@ -109,42 +124,42 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
return nil, fmt.Errorf("failed to read or create Noise protocol private key: %w", err)
}
s, err := state.NewState(cfg)
if err != nil {
return nil, fmt.Errorf("init state: %w", err)
}
registrationCache := zcache.New[types.RegistrationID, types.RegisterNode](
registerCacheExpiration,
registerCacheCleanup,
)
app := Headscale{
cfg: cfg,
noisePrivateKey: noisePrivateKey,
registrationCache: registrationCache,
pollNetMapStreamWG: sync.WaitGroup{},
nodeNotifier: notifier.NewNotifier(cfg),
state: s,
}
// Initialize ephemeral garbage collector
ephemeralGC := db.NewEphemeralGarbageCollector(func(ni types.NodeID) {
node, err := app.state.GetNodeByID(ni)
if err != nil {
log.Err(err).Uint64("node.id", ni.Uint64()).Msgf("failed to get ephemeral node for deletion")
return
}
app.db, err = db.NewHeadscaleDatabase(
cfg.Database,
cfg.BaseDomain,
registrationCache,
)
if err != nil {
return nil, err
}
policyChanged, err := app.state.DeleteNode(node)
if err != nil {
app.ipAlloc, err = db.NewIPAllocator(app.db, cfg.PrefixV4, cfg.PrefixV6, cfg.IPAllocation)
if err != nil {
return nil, err
}
app.ephemeralGC = db.NewEphemeralGarbageCollector(func(ni types.NodeID) {
if err := app.db.DeleteEphemeralNode(ni); err != nil {
log.Err(err).Uint64("node.id", ni.Uint64()).Msgf("failed to delete ephemeral node")
return
}
// Send policy update notifications if needed
if policyChanged {
ctx := types.NotifyCtx(context.Background(), "ephemeral-gc-policy", node.Hostname)
app.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
}
log.Debug().Uint64("node.id", ni.Uint64()).Msgf("deleted ephemeral node")
})
app.ephemeralGC = ephemeralGC
if err = app.loadPolicyManager(); err != nil {
return nil, fmt.Errorf("failed to load ACL policy: %w", err)
}
var authProvider AuthProvider
authProvider = NewAuthProviderWeb(cfg.ServerURL)
@@ -155,8 +170,10 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
ctx,
cfg.ServerURL,
&cfg.OIDC,
app.state,
app.db,
app.nodeNotifier,
app.ipAlloc,
app.polMan,
)
if err != nil {
if cfg.OIDC.OnlyStartIfOIDCIsAvailable {
@@ -175,14 +192,10 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
var magicDNSDomains []dnsname.FQDN
if cfg.PrefixV4 != nil {
magicDNSDomains = append(
magicDNSDomains,
util.GenerateIPv4DNSRootDomain(*cfg.PrefixV4)...)
magicDNSDomains = append(magicDNSDomains, util.GenerateIPv4DNSRootDomain(*cfg.PrefixV4)...)
}
if cfg.PrefixV6 != nil {
magicDNSDomains = append(
magicDNSDomains,
util.GenerateIPv6DNSRootDomain(*cfg.PrefixV6)...)
magicDNSDomains = append(magicDNSDomains, util.GenerateIPv6DNSRootDomain(*cfg.PrefixV6)...)
}
// we might have routes already from Split DNS
@@ -207,14 +220,6 @@ func NewHeadscale(cfg *types.Config) (*Headscale, error) {
)
}
if cfg.DERP.ServerVerifyClients {
t := http.DefaultTransport.(*http.Transport) //nolint:forcetypeassert
t.RegisterProtocol(
derpServer.DerpVerifyScheme,
derpServer.NewDERPVerifyTransport(app.handleVerifyRequest),
)
}
embeddedDERPServer, err := derpServer.NewDERPServer(
cfg.ServerURL,
key.NodePrivate(*derpServerKey),
@@ -265,7 +270,14 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
var update types.StateUpdate
var changed bool
lastExpiryCheck, update, changed = h.state.ExpireExpiredNodes(lastExpiryCheck)
if err := h.db.Write(func(tx *gorm.DB) error {
lastExpiryCheck, update, changed = db.ExpireExpiredNodes(tx, lastExpiryCheck)
return nil
}); err != nil {
log.Error().Err(err).Msg("database error while expiring nodes")
continue
}
if changed {
log.Trace().Interface("nodes", update.ChangePatches).Msgf("expiring nodes")
@@ -276,16 +288,16 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
case <-derpTickerChan:
log.Info().Msg("Fetching DERPMap updates")
derpMap := derp.GetDERPMap(h.cfg.DERP)
h.DERPMap = derp.GetDERPMap(h.cfg.DERP)
if h.cfg.DERP.ServerEnabled && h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
region, _ := h.DERPServer.GenerateRegion()
derpMap.Regions[region.RegionID] = &region
h.DERPMap.Regions[region.RegionID] = &region
}
ctx := types.NotifyCtx(context.Background(), "derpmap-update", "na")
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
Type: types.StateDERPUpdated,
DERPMap: derpMap,
DERPMap: h.DERPMap,
})
case records, ok := <-extraRecordsUpdate:
@@ -295,9 +307,11 @@ func (h *Headscale) scheduledTasks(ctx context.Context) {
h.cfg.TailcfgDNSConfig.ExtraRecords = records
ctx := types.NotifyCtx(context.Background(), "dns-extrarecord", "all")
// TODO(kradalby): We can probably do better than sending a full update here,
// but for now this will ensure that all of the nodes get the new records.
h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
// TODO(kradalby): We can probably do better than sending a full update here,
// but for now this will ensure that all of the nodes get the new records.
Type: types.StateFullUpdate,
})
}
}
}
@@ -344,7 +358,7 @@ func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
)
}
valid, err := h.state.ValidateAPIKey(strings.TrimPrefix(token, AuthPrefix))
valid, err := h.db.ValidateAPIKey(strings.TrimPrefix(token, AuthPrefix))
if err != nil {
return ctx, status.Error(codes.Internal, "failed to validate token")
}
@@ -389,7 +403,7 @@ func (h *Headscale) httpAuthenticationMiddleware(next http.Handler) http.Handler
return
}
valid, err := h.state.ValidateAPIKey(strings.TrimPrefix(authHeader, AuthPrefix))
valid, err := h.db.ValidateAPIKey(strings.TrimPrefix(authHeader, AuthPrefix))
if err != nil {
log.Error().
Caller().
@@ -445,13 +459,11 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
router := mux.NewRouter()
router.Use(prometheusMiddleware)
router.HandleFunc(ts2021UpgradePath, h.NoiseUpgradeHandler).
Methods(http.MethodPost, http.MethodGet)
router.HandleFunc(ts2021UpgradePath, h.NoiseUpgradeHandler).Methods(http.MethodPost, http.MethodGet)
router.HandleFunc("/health", h.HealthHandler).Methods(http.MethodGet)
router.HandleFunc("/key", h.KeyHandler).Methods(http.MethodGet)
router.HandleFunc("/register/{registration_id}", h.authProvider.RegisterHandler).
Methods(http.MethodGet)
router.HandleFunc("/register/{registration_id}", h.authProvider.RegisterHandler).Methods(http.MethodGet)
if provider, ok := h.authProvider.(*AuthProviderOIDC); ok {
router.HandleFunc("/oidc/callback", provider.OIDCCallbackHandler).Methods(http.MethodGet)
@@ -472,7 +484,7 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
router.HandleFunc("/derp", h.DERPServer.DERPHandler)
router.HandleFunc("/derp/probe", derpServer.DERPProbeHandler)
router.HandleFunc("/derp/latency-check", derpServer.DERPProbeHandler)
router.HandleFunc("/bootstrap-dns", derpServer.DERPBootstrapDNSHandler(h.state.DERPMap()))
router.HandleFunc("/bootstrap-dns", derpServer.DERPBootstrapDNSHandler(h.DERPMap))
}
apiRouter := router.PathPrefix("/api").Subrouter()
@@ -484,57 +496,54 @@ func (h *Headscale) createRouter(grpcMux *grpcRuntime.ServeMux) *mux.Router {
return router
}
// // TODO(kradalby): Do a variant of this, and polman which only updates the node that has changed.
// // Maybe we should attempt a new in memory state and not go via the DB?
// // Maybe this should be implemented as an event bus?
// // A bool is returned indicating if a full update was sent to all nodes
// func usersChangedHook(db *db.HSDatabase, polMan policy.PolicyManager, notif *notifier.Notifier) error {
// users, err := db.ListUsers()
// if err != nil {
// return err
// }
// TODO(kradalby): Do a variant of this, and polman which only updates the node that has changed.
// Maybe we should attempt a new in memory state and not go via the DB?
func usersChangedHook(db *db.HSDatabase, polMan policy.PolicyManager, notif *notifier.Notifier) error {
users, err := db.ListUsers()
if err != nil {
return err
}
// changed, err := polMan.SetUsers(users)
// if err != nil {
// return err
// }
changed, err := polMan.SetUsers(users)
if err != nil {
return err
}
// if changed {
// ctx := types.NotifyCtx(context.Background(), "acl-users-change", "all")
// notif.NotifyAll(ctx, types.UpdateFull())
// }
if changed {
ctx := types.NotifyCtx(context.Background(), "acl-users-change", "all")
notif.NotifyAll(ctx, types.StateUpdate{
Type: types.StateFullUpdate,
})
}
// return nil
// }
return nil
}
// // TODO(kradalby): Do a variant of this, and polman which only updates the node that has changed.
// // Maybe we should attempt a new in memory state and not go via the DB?
// // Maybe this should be implemented as an event bus?
// // A bool is returned indicating if a full update was sent to all nodes
// func nodesChangedHook(
// db *db.HSDatabase,
// polMan policy.PolicyManager,
// notif *notifier.Notifier,
// ) (bool, error) {
// nodes, err := db.ListNodes()
// if err != nil {
// return false, err
// }
// TODO(kradalby): Do a variant of this, and polman which only updates the node that has changed.
// Maybe we should attempt a new in memory state and not go via the DB?
// A bool is returned indicating if a full update was sent to all nodes
func nodesChangedHook(db *db.HSDatabase, polMan policy.PolicyManager, notif *notifier.Notifier) (bool, error) {
nodes, err := db.ListNodes()
if err != nil {
return false, err
}
// filterChanged, err := polMan.SetNodes(nodes)
// if err != nil {
// return false, err
// }
filterChanged, err := polMan.SetNodes(nodes)
if err != nil {
return false, err
}
// if filterChanged {
// ctx := types.NotifyCtx(context.Background(), "acl-nodes-change", "all")
// notif.NotifyAll(ctx, types.UpdateFull())
if filterChanged {
ctx := types.NotifyCtx(context.Background(), "acl-nodes-change", "all")
notif.NotifyAll(ctx, types.StateUpdate{
Type: types.StateFullUpdate,
})
// return true, nil
// }
return true, nil
}
// return false, nil
// }
return false, nil
}
// Serve launches the HTTP and gRPC server service Headscale and the API.
func (h *Headscale) Serve() error {
@@ -557,15 +566,15 @@ func (h *Headscale) Serve() error {
spew.Dump(h.cfg)
}
log.Info().Str("version", types.Version).Str("commit", types.GitCommitHash).Msg("Starting Headscale")
log.Info().
Caller().
Str("minimum_version", capver.TailscaleVersion(capver.MinSupportedCapabilityVersion)).
Msg("Clients with a lower minimum version will be rejected")
// Fetch an initial DERP Map before we start serving
h.mapper = mapper.NewMapper(h.state, h.cfg, h.nodeNotifier)
h.DERPMap = derp.GetDERPMap(h.cfg.DERP)
h.mapper = mapper.NewMapper(h.db, h.cfg, h.DERPMap, h.nodeNotifier, h.polMan)
// TODO(kradalby): fix state part.
if h.cfg.DERP.ServerEnabled {
// When embedded DERP is enabled we always need a STUN server
if h.cfg.DERP.STUNAddr == "" {
@@ -578,13 +587,13 @@ func (h *Headscale) Serve() error {
}
if h.cfg.DERP.AutomaticallyAddEmbeddedDerpRegion {
h.state.DERPMap().Regions[region.RegionID] = &region
h.DERPMap.Regions[region.RegionID] = &region
}
go h.DERPServer.ServeSTUN()
}
if len(h.state.DERPMap().Regions) == 0 {
if len(h.DERPMap.Regions) == 0 {
return errEmptyInitialDERPMap
}
@@ -593,7 +602,7 @@ func (h *Headscale) Serve() error {
// around between restarts, they will reconnect and the GC will
// be cancelled.
go h.ephemeralGC.Start()
ephmNodes, err := h.state.ListEphemeralNodes()
ephmNodes, err := h.db.ListEphemeralNodes()
if err != nil {
return fmt.Errorf("failed to list ephemeral nodes: %w", err)
}
@@ -716,10 +725,12 @@ func (h *Headscale) Serve() error {
log.Info().Msgf("Enabling remote gRPC at %s", h.cfg.GRPCAddr)
grpcOptions := []grpc.ServerOption{
grpc.ChainUnaryInterceptor(
h.grpcAuthenticationInterceptor,
// Uncomment to debug grpc communication.
// zerolog.NewUnaryServerInterceptor(),
grpc.UnaryInterceptor(
grpcMiddleware.ChainUnaryServer(
h.grpcAuthenticationInterceptor,
// Uncomment to debug grpc communication.
// zerolog.NewUnaryServerInterceptor(),
),
),
}
@@ -781,12 +792,26 @@ func (h *Headscale) Serve() error {
log.Info().
Msgf("listening and serving HTTP on: %s", h.cfg.Addr)
debugMux := http.NewServeMux()
debugMux.Handle("/debug/pprof/", http.DefaultServeMux)
debugMux.HandleFunc("/debug/notifier", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(h.nodeNotifier.String()))
})
debugMux.Handle("/metrics", promhttp.Handler())
debugHTTPServer := &http.Server{
Addr: h.cfg.MetricsAddr,
Handler: debugMux,
ReadTimeout: types.HTTPTimeout,
WriteTimeout: 0,
}
debugHTTPListener, err := net.Listen("tcp", h.cfg.MetricsAddr)
if err != nil {
return fmt.Errorf("failed to bind to TCP address: %w", err)
}
debugHTTPServer := h.debugHTTPServer()
errorGroup.Go(func() error { return debugHTTPServer.Serve(debugHTTPListener) })
log.Info().
@@ -828,10 +853,18 @@ func (h *Headscale) Serve() error {
continue
}
changed, err := h.state.ReloadPolicy()
if err := h.loadPolicyManager(); err != nil {
log.Error().Err(err).Msg("failed to reload Policy")
}
pol, err := h.policyBytes()
if err != nil {
log.Error().Err(err).Msgf("reloading policy")
continue
log.Error().Err(err).Msg("failed to get policy blob")
}
changed, err := h.polMan.SetPolicy(pol)
if err != nil {
log.Error().Err(err).Msg("failed to set new policy")
}
if changed {
@@ -839,7 +872,9 @@ func (h *Headscale) Serve() error {
Msg("ACL policy successfully reloaded, notifying nodes of change")
ctx := types.NotifyCtx(context.Background(), "acl-sighup", "na")
h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
Type: types.StateFullUpdate,
})
}
default:
info := func(msg string) { log.Info().Msg(msg) }
@@ -896,7 +931,7 @@ func (h *Headscale) Serve() error {
// Close db connections
info("closing database connection")
err = h.state.Close()
err = h.db.Close()
if err != nil {
log.Error().Err(err).Msg("failed to close db")
}
@@ -996,10 +1031,13 @@ func notFoundHandler(
writer http.ResponseWriter,
req *http.Request,
) {
body, _ := io.ReadAll(req.Body)
log.Trace().
Interface("header", req.Header).
Interface("proto", req.Proto).
Interface("url", req.URL).
Bytes("body", body).
Msg("Request did not match")
writer.WriteHeader(http.StatusNotFound)
}
@@ -1047,3 +1085,90 @@ func readOrCreatePrivateKey(path string) (*key.MachinePrivate, error) {
return &machineKey, nil
}
// policyBytes returns the appropriate policy for the
// current configuration as a []byte array.
func (h *Headscale) policyBytes() ([]byte, error) {
switch h.cfg.Policy.Mode {
case types.PolicyModeFile:
path := h.cfg.Policy.Path
// It is fine to start headscale without a policy file.
if len(path) == 0 {
return nil, nil
}
absPath := util.AbsolutePathFromConfigPath(path)
policyFile, err := os.Open(absPath)
if err != nil {
return nil, err
}
defer policyFile.Close()
return io.ReadAll(policyFile)
case types.PolicyModeDB:
p, err := h.db.GetPolicy()
if err != nil {
if errors.Is(err, types.ErrPolicyNotFound) {
return nil, nil
}
return nil, err
}
if p.Data == "" {
return nil, nil
}
return []byte(p.Data), err
}
return nil, fmt.Errorf("unsupported policy mode: %s", h.cfg.Policy.Mode)
}
func (h *Headscale) loadPolicyManager() error {
var errOut error
h.polManOnce.Do(func() {
// Validate and reject configuration that would error when applied
// when creating a map response. This requires nodes, so there is still
// a scenario where they might be allowed if the server has no nodes
// yet, but it should help for the general case and for hot reloading
// configurations.
// Note that this check is only done for file-based policies in this function
// as the database-based policies are checked in the gRPC API where it is not
// allowed to be written to the database.
nodes, err := h.db.ListNodes()
if err != nil {
errOut = fmt.Errorf("loading nodes from database to validate policy: %w", err)
return
}
users, err := h.db.ListUsers()
if err != nil {
errOut = fmt.Errorf("loading users from database to validate policy: %w", err)
return
}
pol, err := h.policyBytes()
if err != nil {
errOut = fmt.Errorf("loading policy bytes: %w", err)
return
}
h.polMan, err = policy.NewPolicyManager(pol, users, nodes)
if err != nil {
errOut = fmt.Errorf("creating policy manager: %w", err)
return
}
if len(nodes) > 0 {
_, err = h.polMan.SSHPolicy(nodes[0])
if err != nil {
errOut = fmt.Errorf("verifying SSH rules: %w", err)
return
}
}
})
return errOut
}

View File

@@ -9,7 +9,9 @@ import (
"strings"
"time"
"github.com/juanfont/headscale/hscontrol/db"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"gorm.io/gorm"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
@@ -26,7 +28,7 @@ func (h *Headscale) handleRegister(
regReq tailcfg.RegisterRequest,
machineKey key.MachinePublic,
) (*tailcfg.RegisterResponse, error) {
node, err := h.state.GetNodeByNodeKey(regReq.NodeKey)
node, err := h.db.GetNodeByNodeKey(regReq.NodeKey)
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
return nil, fmt.Errorf("looking up node in database: %w", err)
}
@@ -41,7 +43,10 @@ func (h *Headscale) handleRegister(
}
if regReq.Followup != "" {
return h.waitForFollowup(ctx, regReq)
// TODO(kradalby): Does this need to return an error of some sort?
// Maybe if the registration fails down the line it can be sent
// on the channel and returned here?
h.waitForFollowup(ctx, regReq)
}
if regReq.Auth != nil && regReq.Auth.AuthKey != "" {
@@ -82,137 +87,176 @@ func (h *Headscale) handleExistingNode(
// If the request expiry is in the past, we consider it a logout.
if requestExpiry.Before(time.Now()) {
if node.IsEphemeral() {
policyChanged, err := h.state.DeleteNode(node)
changedNodes, err := h.db.DeleteNode(node, h.nodeNotifier.LikelyConnectedMap())
if err != nil {
return nil, fmt.Errorf("deleting ephemeral node: %w", err)
}
// Send policy update notifications if needed
if policyChanged {
ctx := types.NotifyCtx(context.Background(), "auth-logout-ephemeral-policy", "na")
h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
} else {
ctx := types.NotifyCtx(context.Background(), "logout-ephemeral", "na")
h.nodeNotifier.NotifyAll(ctx, types.UpdatePeerRemoved(node.ID))
ctx := types.NotifyCtx(context.Background(), "logout-ephemeral", "na")
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
Type: types.StatePeerRemoved,
Removed: []types.NodeID{node.ID},
})
if changedNodes != nil {
h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: changedNodes,
})
}
return nil, nil
}
expired = true
}
n, policyChanged, err := h.state.SetNodeExpiry(node.ID, requestExpiry)
err := h.db.NodeSetExpiry(node.ID, requestExpiry)
if err != nil {
return nil, fmt.Errorf("setting node expiry: %w", err)
}
// Send policy update notifications if needed
if policyChanged {
ctx := types.NotifyCtx(context.Background(), "auth-expiry-policy", "na")
h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
} else {
ctx := types.NotifyCtx(context.Background(), "logout-expiry", "na")
h.nodeNotifier.NotifyWithIgnore(ctx, types.UpdateExpire(node.ID, requestExpiry), node.ID)
}
return nodeToRegisterResponse(n), nil
ctx := types.NotifyCtx(context.Background(), "logout-expiry", "na")
h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdateExpire(node.ID, requestExpiry), node.ID)
}
return nodeToRegisterResponse(node), nil
}
func nodeToRegisterResponse(node *types.Node) *tailcfg.RegisterResponse {
return &tailcfg.RegisterResponse{
// TODO(kradalby): Only send for user-owned nodes
// and not tagged nodes when tags is working.
User: *node.User.TailscaleUser(),
Login: *node.User.TailscaleLogin(),
NodeKeyExpired: node.IsExpired(),
NodeKeyExpired: expired,
// Headscale does not implement the concept of machine authorization
// so we always return true here.
// Revisit this if #2176 gets implemented.
MachineAuthorized: true,
}
}, nil
}
func (h *Headscale) waitForFollowup(
ctx context.Context,
regReq tailcfg.RegisterRequest,
) (*tailcfg.RegisterResponse, error) {
) {
fu, err := url.Parse(regReq.Followup)
if err != nil {
return nil, NewHTTPError(http.StatusUnauthorized, "invalid followup URL", err)
return
}
followupReg, err := types.RegistrationIDFromString(strings.ReplaceAll(fu.Path, "/register/", ""))
if err != nil {
return nil, NewHTTPError(http.StatusUnauthorized, "invalid registration ID", err)
return
}
if reg, ok := h.state.GetRegistrationCacheEntry(followupReg); ok {
if reg, ok := h.registrationCache.Get(followupReg); ok {
select {
case <-ctx.Done():
return nil, NewHTTPError(http.StatusUnauthorized, "registration timed out", err)
case node := <-reg.Registered:
if node == nil {
return nil, NewHTTPError(http.StatusUnauthorized, "node not found", nil)
}
return nodeToRegisterResponse(node), nil
return
case <-reg.Registered:
return
}
}
}
return nil, NewHTTPError(http.StatusNotFound, "followup registration not found", nil)
// canUsePreAuthKey checks if a pre auth key can be used.
func canUsePreAuthKey(pak *types.PreAuthKey) error {
if pak == nil {
return NewHTTPError(http.StatusUnauthorized, "invalid authkey", nil)
}
if pak.Expiration != nil && pak.Expiration.Before(time.Now()) {
return NewHTTPError(http.StatusUnauthorized, "authkey expired", nil)
}
// we don't need to check if has been used before
if pak.Reusable {
return nil
}
if pak.Used {
return NewHTTPError(http.StatusUnauthorized, "authkey already used", nil)
}
return nil
}
func (h *Headscale) handleRegisterWithAuthKey(
regReq tailcfg.RegisterRequest,
machineKey key.MachinePublic,
) (*tailcfg.RegisterResponse, error) {
node, changed, err := h.state.HandleNodeFromPreAuthKey(
regReq,
machineKey,
)
pak, err := h.db.GetPreAuthKey(regReq.Auth.AuthKey)
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, NewHTTPError(http.StatusUnauthorized, "invalid pre auth key", nil)
}
if perr, ok := err.(types.PAKError); ok {
return nil, NewHTTPError(http.StatusUnauthorized, perr.Error(), nil)
}
return nil, err
}
// This is a bit of a back and forth, but we have a bit of a chicken and egg
// dependency here.
// Because the way the policy manager works, we need to have the node
// in the database, then add it to the policy manager and then we can
// approve the route. This means we get this dance where the node is
// first added to the database, then we add it to the policy manager via
// nodesChangedHook and then we can auto approve the routes.
// As that only approves the struct object, we need to save it again and
// ensure we send an update.
// This works, but might be another good candidate for doing some sort of
// eventbus.
routesChanged := h.state.AutoApproveRoutes(node)
if _, _, err := h.state.SaveNode(node); err != nil {
return nil, fmt.Errorf("saving auto approved routes to node: %w", err)
err = canUsePreAuthKey(pak)
if err != nil {
return nil, err
}
if routesChanged {
nodeToRegister := types.Node{
Hostname: regReq.Hostinfo.Hostname,
UserID: pak.User.ID,
User: pak.User,
MachineKey: machineKey,
NodeKey: regReq.NodeKey,
Hostinfo: regReq.Hostinfo,
LastSeen: ptr.To(time.Now()),
RegisterMethod: util.RegisterMethodAuthKey,
// TODO(kradalby): This should not be set on the node,
// they should be looked up through the key, which is
// attached to the node.
ForcedTags: pak.Proto().GetAclTags(),
AuthKey: pak,
AuthKeyID: &pak.ID,
}
if !regReq.Expiry.IsZero() {
nodeToRegister.Expiry = &regReq.Expiry
}
ipv4, ipv6, err := h.ipAlloc.Next()
if err != nil {
return nil, fmt.Errorf("allocating IPs: %w", err)
}
node, err := db.Write(h.db.DB, func(tx *gorm.DB) (*types.Node, error) {
node, err := db.RegisterNode(tx,
nodeToRegister,
ipv4, ipv6,
)
if err != nil {
return nil, fmt.Errorf("registering node: %w", err)
}
if !pak.Reusable {
err = db.UsePreAuthKey(tx, pak)
if err != nil {
return nil, fmt.Errorf("using pre auth key: %w", err)
}
}
return node, nil
})
if err != nil {
return nil, err
}
updateSent, err := nodesChangedHook(h.db, h.polMan, h.nodeNotifier)
if err != nil {
return nil, fmt.Errorf("nodes changed hook: %w", err)
}
if !updateSent {
ctx := types.NotifyCtx(context.Background(), "node updated", node.Hostname)
h.nodeNotifier.NotifyAll(ctx, types.UpdatePeerChanged(node.ID))
} else if changed {
ctx := types.NotifyCtx(context.Background(), "node created", node.Hostname)
h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
h.nodeNotifier.NotifyAll(ctx, types.StateUpdatePeerAdded(node.ID))
}
return &tailcfg.RegisterResponse{
MachineAuthorized: true,
NodeKeyExpired: node.IsExpired(),
User: *node.User.TailscaleUser(),
Login: *node.User.TailscaleLogin(),
User: *pak.User.TailscaleUser(),
Login: *pak.User.TailscaleLogin(),
}, nil
}
@@ -225,7 +269,7 @@ func (h *Headscale) handleRegisterInteractive(
return nil, fmt.Errorf("generating registration ID: %w", err)
}
nodeToRegister := types.RegisterNode{
newNode := types.RegisterNode{
Node: types.Node{
Hostname: regReq.Hostinfo.Hostname,
MachineKey: machineKey,
@@ -233,16 +277,16 @@ func (h *Headscale) handleRegisterInteractive(
Hostinfo: regReq.Hostinfo,
LastSeen: ptr.To(time.Now()),
},
Registered: make(chan *types.Node),
Registered: make(chan struct{}),
}
if !regReq.Expiry.IsZero() {
nodeToRegister.Node.Expiry = &regReq.Expiry
newNode.Node.Expiry = &regReq.Expiry
}
h.state.SetRegistrationCacheEntry(
h.registrationCache.Set(
registrationId,
nodeToRegister,
newNode,
)
return &tailcfg.RegisterResponse{

View File

@@ -1,10 +1,12 @@
package types
package hscontrol
import (
"net/http"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/juanfont/headscale/hscontrol/types"
)
func TestCanUsePreAuthKey(t *testing.T) {
@@ -14,13 +16,13 @@ func TestCanUsePreAuthKey(t *testing.T) {
tests := []struct {
name string
pak *PreAuthKey
pak *types.PreAuthKey
wantErr bool
err PAKError
err HTTPError
}{
{
name: "valid reusable key",
pak: &PreAuthKey{
pak: &types.PreAuthKey{
Reusable: true,
Used: false,
Expiration: &future,
@@ -29,7 +31,7 @@ func TestCanUsePreAuthKey(t *testing.T) {
},
{
name: "valid non-reusable key",
pak: &PreAuthKey{
pak: &types.PreAuthKey{
Reusable: false,
Used: false,
Expiration: &future,
@@ -38,27 +40,27 @@ func TestCanUsePreAuthKey(t *testing.T) {
},
{
name: "expired key",
pak: &PreAuthKey{
pak: &types.PreAuthKey{
Reusable: false,
Used: false,
Expiration: &past,
},
wantErr: true,
err: PAKError("authkey expired"),
err: NewHTTPError(http.StatusUnauthorized, "authkey expired", nil),
},
{
name: "used non-reusable key",
pak: &PreAuthKey{
pak: &types.PreAuthKey{
Reusable: false,
Used: true,
Expiration: &future,
},
wantErr: true,
err: PAKError("authkey already used"),
err: NewHTTPError(http.StatusUnauthorized, "authkey already used", nil),
},
{
name: "used reusable key",
pak: &PreAuthKey{
pak: &types.PreAuthKey{
Reusable: true,
Used: true,
Expiration: &future,
@@ -67,7 +69,7 @@ func TestCanUsePreAuthKey(t *testing.T) {
},
{
name: "no expiration date",
pak: &PreAuthKey{
pak: &types.PreAuthKey{
Reusable: false,
Used: false,
Expiration: nil,
@@ -78,38 +80,38 @@ func TestCanUsePreAuthKey(t *testing.T) {
name: "nil preauth key",
pak: nil,
wantErr: true,
err: PAKError("invalid authkey"),
err: NewHTTPError(http.StatusUnauthorized, "invalid authkey", nil),
},
{
name: "expired and used key",
pak: &PreAuthKey{
pak: &types.PreAuthKey{
Reusable: false,
Used: true,
Expiration: &past,
},
wantErr: true,
err: PAKError("authkey expired"),
err: NewHTTPError(http.StatusUnauthorized, "authkey expired", nil),
},
{
name: "no expiration and used key",
pak: &PreAuthKey{
pak: &types.PreAuthKey{
Reusable: false,
Used: true,
Expiration: nil,
},
wantErr: true,
err: PAKError("authkey already used"),
err: NewHTTPError(http.StatusUnauthorized, "authkey already used", nil),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := tt.pak.Validate()
err := canUsePreAuthKey(tt.pak)
if tt.wantErr {
if err == nil {
t.Errorf("expected error but got none")
} else {
httpErr, ok := err.(PAKError)
httpErr, ok := err.(HTTPError)
if !ok {
t.Errorf("expected HTTPError but got %T", err)
} else {

View File

@@ -4,8 +4,6 @@ import (
"sort"
"strings"
"slices"
xmaps "golang.org/x/exp/maps"
"tailscale.com/tailcfg"
"tailscale.com/util/set"
@@ -33,7 +31,9 @@ func tailscaleVersSorted() []string {
func capVersSorted() []tailcfg.CapabilityVersion {
capVers := xmaps.Keys(capVerToTailscaleVer)
slices.Sort(capVers)
sort.Slice(capVers, func(i, j int) bool {
return capVers[i] < capVers[j]
})
return capVers
}

View File

@@ -5,6 +5,11 @@ package capver
import "tailscale.com/tailcfg"
var tailscaleToCapVer = map[string]tailcfg.CapabilityVersion{
"v1.44.3": 63,
"v1.56.1": 82,
"v1.58.0": 85,
"v1.58.1": 85,
"v1.58.2": 85,
"v1.60.0": 87,
"v1.60.1": 87,
"v1.62.0": 88,
@@ -31,15 +36,13 @@ var tailscaleToCapVer = map[string]tailcfg.CapabilityVersion{
"v1.78.0": 109,
"v1.78.1": 109,
"v1.80.0": 113,
"v1.80.1": 113,
"v1.80.2": 113,
"v1.80.3": 113,
"v1.82.0": 115,
"v1.82.5": 115,
}
var capVerToTailscaleVer = map[tailcfg.CapabilityVersion]string{
63: "v1.44.3",
82: "v1.56.1",
85: "v1.58.0",
87: "v1.60.0",
88: "v1.62.0",
90: "v1.64.0",
@@ -50,5 +53,4 @@ var capVerToTailscaleVer = map[tailcfg.CapabilityVersion]string{
106: "v1.74.0",
109: "v1.78.0",
113: "v1.80.0",
115: "v1.82.0",
}

View File

@@ -13,10 +13,11 @@ func TestTailscaleLatestMajorMinor(t *testing.T) {
stripV bool
expected []string
}{
{3, false, []string{"v1.78", "v1.80", "v1.82"}},
{2, true, []string{"1.80", "1.82"}},
{3, false, []string{"v1.76", "v1.78", "v1.80"}},
{2, true, []string{"1.78", "1.80"}},
// Lazy way to see all supported versions
{10, true, []string{
"1.62",
"1.64",
"1.66",
"1.68",
@@ -26,7 +27,6 @@ func TestTailscaleLatestMajorMinor(t *testing.T) {
"1.76",
"1.78",
"1.80",
"1.82",
}},
{0, false, nil},
}
@@ -46,7 +46,7 @@ func TestCapVerMinimumTailscaleVersion(t *testing.T) {
input tailcfg.CapabilityVersion
expected string
}{
{88, "v1.62.0"},
{85, "v1.58.0"},
{90, "v1.64.0"},
{95, "v1.66.0"},
{106, "v1.74.0"},

View File

@@ -8,7 +8,6 @@ import (
"fmt"
"net/netip"
"path/filepath"
"slices"
"strconv"
"strings"
"time"
@@ -22,7 +21,6 @@ import (
"gorm.io/gorm"
"gorm.io/gorm/logger"
"gorm.io/gorm/schema"
"tailscale.com/net/tsaddr"
"tailscale.com/util/set"
"zgo.at/zcache/v2"
)
@@ -624,100 +622,6 @@ AND auth_key_id NOT IN (
},
Rollback: func(db *gorm.DB) error { return nil },
},
// Migrate all routes from the Route table to the new field ApprovedRoutes
// in the Node table. Then drop the Route table.
{
ID: "202502131714",
Migrate: func(tx *gorm.DB) error {
if !tx.Migrator().HasColumn(&types.Node{}, "approved_routes") {
err := tx.Migrator().AddColumn(&types.Node{}, "approved_routes")
if err != nil {
return fmt.Errorf("adding column types.Node: %w", err)
}
}
nodeRoutes := map[uint64][]netip.Prefix{}
var routes []types.Route
err = tx.Find(&routes).Error
if err != nil {
return fmt.Errorf("fetching routes: %w", err)
}
for _, route := range routes {
if route.Enabled {
nodeRoutes[route.NodeID] = append(nodeRoutes[route.NodeID], route.Prefix)
}
}
for nodeID, routes := range nodeRoutes {
tsaddr.SortPrefixes(routes)
routes = slices.Compact(routes)
data, err := json.Marshal(routes)
err = tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", data).Error
if err != nil {
return fmt.Errorf("saving approved routes to new column: %w", err)
}
}
// Drop the old table.
_ = tx.Migrator().DropTable(&types.Route{})
return nil
},
Rollback: func(db *gorm.DB) error { return nil },
},
{
ID: "202502171819",
Migrate: func(tx *gorm.DB) error {
// This migration originally removed the last_seen column
// from the node table, but it was added back in
// 202505091439.
return nil
},
Rollback: func(db *gorm.DB) error { return nil },
},
// Add back last_seen column to node table.
{
ID: "202505091439",
Migrate: func(tx *gorm.DB) error {
// Add back last_seen column to node table if it does not exist.
// This is a workaround for the fact that the last_seen column
// was removed in the 202502171819 migration, but only for some
// beta testers.
if !tx.Migrator().HasColumn(&types.Node{}, "last_seen") {
_ = tx.Migrator().AddColumn(&types.Node{}, "last_seen")
}
return nil
},
Rollback: func(db *gorm.DB) error { return nil },
},
// Fix the provider identifier for users that have a double slash in the
// provider identifier.
{
ID: "202505141324",
Migrate: func(tx *gorm.DB) error {
users, err := ListUsers(tx)
if err != nil {
return fmt.Errorf("listing users: %w", err)
}
for _, user := range users {
user.ProviderIdentifier.String = types.CleanIdentifier(user.ProviderIdentifier.String)
err := tx.Save(user).Error
if err != nil {
return fmt.Errorf("saving user: %w", err)
}
}
return nil
},
Rollback: func(db *gorm.DB) error { return nil },
},
},
)

View File

@@ -48,43 +48,25 @@ func TestMigrationsSQLite(t *testing.T) {
{
dbPath: "testdata/0-22-3-to-0-23-0-routes-are-dropped-2063.sqlite",
wantFunc: func(t *testing.T, h *HSDatabase) {
nodes, err := Read(h.DB, func(rx *gorm.DB) (types.Nodes, error) {
n1, err := GetNodeByID(rx, 1)
n26, err := GetNodeByID(rx, 26)
n31, err := GetNodeByID(rx, 31)
n32, err := GetNodeByID(rx, 32)
if err != nil {
return nil, err
}
return types.Nodes{n1, n26, n31, n32}, nil
routes, err := Read(h.DB, func(rx *gorm.DB) (types.Routes, error) {
return GetRoutes(rx)
})
require.NoError(t, err)
// want := types.Routes{
// r(1, "0.0.0.0/0", true, false),
// r(1, "::/0", true, false),
// r(1, "10.9.110.0/24", true, true),
// r(26, "172.100.100.0/24", true, true),
// r(26, "172.100.100.0/24", true, false, false),
// r(31, "0.0.0.0/0", true, false),
// r(31, "0.0.0.0/0", true, false, false),
// r(31, "::/0", true, false),
// r(31, "::/0", true, false, false),
// r(32, "192.168.0.24/32", true, true),
// }
want := [][]netip.Prefix{
{ipp("0.0.0.0/0"), ipp("10.9.110.0/24"), ipp("::/0")},
{ipp("172.100.100.0/24")},
{ipp("0.0.0.0/0"), ipp("::/0")},
{ipp("192.168.0.24/32")},
assert.Len(t, routes, 10)
want := types.Routes{
r(1, "0.0.0.0/0", true, true, false),
r(1, "::/0", true, true, false),
r(1, "10.9.110.0/24", true, true, true),
r(26, "172.100.100.0/24", true, true, true),
r(26, "172.100.100.0/24", true, false, false),
r(31, "0.0.0.0/0", true, true, false),
r(31, "0.0.0.0/0", true, false, false),
r(31, "::/0", true, true, false),
r(31, "::/0", true, false, false),
r(32, "192.168.0.24/32", true, true, true),
}
var got [][]netip.Prefix
for _, node := range nodes {
got = append(got, node.ApprovedRoutes)
}
if diff := cmp.Diff(want, got, util.PrefixComparer); diff != "" {
if diff := cmp.Diff(want, routes, cmpopts.IgnoreFields(types.Route{}, "Model", "Node"), util.PrefixComparer); diff != "" {
t.Errorf("TestMigrations() mismatch (-want +got):\n%s", diff)
}
},
@@ -92,13 +74,13 @@ func TestMigrationsSQLite(t *testing.T) {
{
dbPath: "testdata/0-22-3-to-0-23-0-routes-fail-foreign-key-2076.sqlite",
wantFunc: func(t *testing.T, h *HSDatabase) {
node, err := Read(h.DB, func(rx *gorm.DB) (*types.Node, error) {
return GetNodeByID(rx, 13)
routes, err := Read(h.DB, func(rx *gorm.DB) (types.Routes, error) {
return GetRoutes(rx)
})
require.NoError(t, err)
assert.Len(t, node.ApprovedRoutes, 3)
_ = types.Routes{
assert.Len(t, routes, 4)
want := types.Routes{
// These routes exists, but have no nodes associated with them
// when the migration starts.
// r(1, "0.0.0.0/0", true, true, false),
@@ -129,8 +111,7 @@ func TestMigrationsSQLite(t *testing.T) {
r(13, "::/0", true, true, false),
r(13, "10.18.80.2/32", true, true, true),
}
want := []netip.Prefix{ipp("0.0.0.0/0"), ipp("10.18.80.2/32"), ipp("::/0")}
if diff := cmp.Diff(want, node.ApprovedRoutes, util.PrefixComparer); diff != "" {
if diff := cmp.Diff(want, routes, cmpopts.IgnoreFields(types.Route{}, "Model", "Node"), util.PrefixComparer); diff != "" {
t.Errorf("TestMigrations() mismatch (-want +got):\n%s", diff)
}
},
@@ -244,7 +225,7 @@ func TestMigrationsSQLite(t *testing.T) {
for _, tt := range tests {
t.Run(tt.dbPath, func(t *testing.T) {
dbPath, err := testCopyOfDatabase(t, tt.dbPath)
dbPath, err := testCopyOfDatabase(tt.dbPath)
if err != nil {
t.Fatalf("copying db for test: %s", err)
}
@@ -266,7 +247,7 @@ func TestMigrationsSQLite(t *testing.T) {
}
}
func testCopyOfDatabase(t *testing.T, src string) (string, error) {
func testCopyOfDatabase(src string) (string, error) {
sourceFileStat, err := os.Stat(src)
if err != nil {
return "", err
@@ -282,7 +263,11 @@ func testCopyOfDatabase(t *testing.T, src string) (string, error) {
}
defer source.Close()
tmpDir := t.TempDir()
tmpDir, err := os.MkdirTemp("", "hsdb-test-*")
if err != nil {
return "", err
}
fn := filepath.Base(src)
dst := filepath.Join(tmpDir, fn)
@@ -469,27 +454,3 @@ func TestMigrationsPostgres(t *testing.T) {
})
}
}
func dbForTest(t *testing.T) *HSDatabase {
t.Helper()
dbPath := t.TempDir() + "/headscale_test.db"
db, err := NewHeadscaleDatabase(
types.DatabaseConfig{
Type: "sqlite3",
Sqlite: types.SqliteConfig{
Path: dbPath,
},
},
"",
emptyCache(),
)
if err != nil {
t.Fatalf("setting up database: %s", err)
}
t.Logf("database set up at: %s", dbPath)
return db
}

View File

@@ -1,389 +0,0 @@
package db
import (
"math/rand"
"runtime"
"sync"
"testing"
"time"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/stretchr/testify/assert"
)
const fiveHundredMillis = 500 * time.Millisecond
const oneHundredMillis = 100 * time.Millisecond
const fiftyMillis = 50 * time.Millisecond
// TestEphemeralGarbageCollectorGoRoutineLeak is a test for a goroutine leak in EphemeralGarbageCollector().
// It creates a new EphemeralGarbageCollector, schedules several nodes for deletion with a short expiry,
// and verifies that the nodes are deleted when the expiry time passes, and then
// for any leaked goroutines after the garbage collector is closed.
func TestEphemeralGarbageCollectorGoRoutineLeak(t *testing.T) {
// Count goroutines at the start
initialGoroutines := runtime.NumGoroutine()
t.Logf("Initial number of goroutines: %d", initialGoroutines)
// Basic deletion tracking mechanism
var deletedIDs []types.NodeID
var deleteMutex sync.Mutex
var deletionWg sync.WaitGroup
deleteFunc := func(nodeID types.NodeID) {
deleteMutex.Lock()
deletedIDs = append(deletedIDs, nodeID)
deleteMutex.Unlock()
deletionWg.Done()
}
// Start the GC
gc := NewEphemeralGarbageCollector(deleteFunc)
go gc.Start()
// Schedule several nodes for deletion with short expiry
const expiry = fiftyMillis
const numNodes = 100
// Set up wait group for expected deletions
deletionWg.Add(numNodes)
for i := 1; i <= numNodes; i++ {
gc.Schedule(types.NodeID(i), expiry)
}
// Wait for all scheduled deletions to complete
deletionWg.Wait()
// Check nodes are deleted
deleteMutex.Lock()
assert.Equal(t, numNodes, len(deletedIDs), "Not all nodes were deleted")
deleteMutex.Unlock()
// Schedule and immediately cancel to test that part of the code
for i := numNodes + 1; i <= numNodes*2; i++ {
nodeID := types.NodeID(i)
gc.Schedule(nodeID, time.Hour)
gc.Cancel(nodeID)
}
// Create a channel to signal when we're done with cleanup checks
cleanupDone := make(chan struct{})
// Close GC and check for leaks in a separate goroutine
go func() {
// Close GC
gc.Close()
// Give any potential leaked goroutines a chance to exit
// Still need a small sleep here as we're checking for absence of goroutines
time.Sleep(oneHundredMillis)
// Check for leaked goroutines
finalGoroutines := runtime.NumGoroutine()
t.Logf("Final number of goroutines: %d", finalGoroutines)
// NB: We have to allow for a small number of extra goroutines because of test itself
assert.LessOrEqual(t, finalGoroutines, initialGoroutines+5,
"There are significantly more goroutines after GC usage, which suggests a leak")
close(cleanupDone)
}()
// Wait for cleanup to complete
<-cleanupDone
}
// TestEphemeralGarbageCollectorReschedule is a test for the rescheduling of nodes in EphemeralGarbageCollector().
// It creates a new EphemeralGarbageCollector, schedules a node for deletion with a longer expiry,
// and then reschedules it with a shorter expiry, and verifies that the node is deleted only once.
func TestEphemeralGarbageCollectorReschedule(t *testing.T) {
// Deletion tracking mechanism
var deletedIDs []types.NodeID
var deleteMutex sync.Mutex
deleteFunc := func(nodeID types.NodeID) {
deleteMutex.Lock()
deletedIDs = append(deletedIDs, nodeID)
deleteMutex.Unlock()
}
// Start GC
gc := NewEphemeralGarbageCollector(deleteFunc)
go gc.Start()
defer gc.Close()
const shortExpiry = fiftyMillis
const longExpiry = 1 * time.Hour
nodeID := types.NodeID(1)
// Schedule node for deletion with long expiry
gc.Schedule(nodeID, longExpiry)
// Reschedule the same node with a shorter expiry
gc.Schedule(nodeID, shortExpiry)
// Wait for deletion
time.Sleep(shortExpiry * 2)
// Verify that the node was deleted once
deleteMutex.Lock()
assert.Equal(t, 1, len(deletedIDs), "Node should be deleted exactly once")
assert.Equal(t, nodeID, deletedIDs[0], "The correct node should be deleted")
deleteMutex.Unlock()
}
// TestEphemeralGarbageCollectorCancelAndReschedule is a test for the cancellation and rescheduling of nodes in EphemeralGarbageCollector().
// It creates a new EphemeralGarbageCollector, schedules a node for deletion, cancels it, and then reschedules it,
// and verifies that the node is deleted only once.
func TestEphemeralGarbageCollectorCancelAndReschedule(t *testing.T) {
// Deletion tracking mechanism
var deletedIDs []types.NodeID
var deleteMutex sync.Mutex
deletionNotifier := make(chan types.NodeID, 1)
deleteFunc := func(nodeID types.NodeID) {
deleteMutex.Lock()
deletedIDs = append(deletedIDs, nodeID)
deleteMutex.Unlock()
deletionNotifier <- nodeID
}
// Start the GC
gc := NewEphemeralGarbageCollector(deleteFunc)
go gc.Start()
defer gc.Close()
nodeID := types.NodeID(1)
const expiry = fiftyMillis
// Schedule node for deletion
gc.Schedule(nodeID, expiry)
// Cancel the scheduled deletion
gc.Cancel(nodeID)
// Use a timeout to verify no deletion occurred
select {
case <-deletionNotifier:
t.Fatal("Node was deleted after cancellation")
case <-time.After(expiry * 2): // Still need a timeout for negative test
// This is expected - no deletion should occur
}
deleteMutex.Lock()
assert.Equal(t, 0, len(deletedIDs), "Node should not be deleted after cancellation")
deleteMutex.Unlock()
// Reschedule the node
gc.Schedule(nodeID, expiry)
// Wait for deletion with timeout
select {
case deletedNodeID := <-deletionNotifier:
// Verify the correct node was deleted
assert.Equal(t, nodeID, deletedNodeID, "The correct node should be deleted")
case <-time.After(time.Second): // Longer timeout as a safety net
t.Fatal("Timed out waiting for node deletion")
}
// Verify final state
deleteMutex.Lock()
assert.Equal(t, 1, len(deletedIDs), "Node should be deleted after rescheduling")
assert.Equal(t, nodeID, deletedIDs[0], "The correct node should be deleted")
deleteMutex.Unlock()
}
// TestEphemeralGarbageCollectorCloseBeforeTimerFires is a test for the closing of the EphemeralGarbageCollector before the timer fires.
// It creates a new EphemeralGarbageCollector, schedules a node for deletion, closes the GC, and verifies that the node is not deleted.
func TestEphemeralGarbageCollectorCloseBeforeTimerFires(t *testing.T) {
// Deletion tracking
var deletedIDs []types.NodeID
var deleteMutex sync.Mutex
deleteFunc := func(nodeID types.NodeID) {
deleteMutex.Lock()
deletedIDs = append(deletedIDs, nodeID)
deleteMutex.Unlock()
}
// Start the GC
gc := NewEphemeralGarbageCollector(deleteFunc)
go gc.Start()
const longExpiry = 1 * time.Hour
const shortExpiry = fiftyMillis
// Schedule node deletion with a long expiry
gc.Schedule(types.NodeID(1), longExpiry)
// Close the GC before the timer
gc.Close()
// Wait a short time
time.Sleep(shortExpiry * 2)
// Verify that no deletion occurred
deleteMutex.Lock()
assert.Equal(t, 0, len(deletedIDs), "No node should be deleted when GC is closed before timer fires")
deleteMutex.Unlock()
}
// TestEphemeralGarbageCollectorScheduleAfterClose verifies that calling Schedule after Close
// is a no-op and doesn't cause any panics, goroutine leaks, or other issues.
func TestEphemeralGarbageCollectorScheduleAfterClose(t *testing.T) {
// Count initial goroutines to check for leaks
initialGoroutines := runtime.NumGoroutine()
t.Logf("Initial number of goroutines: %d", initialGoroutines)
// Deletion tracking
var deletedIDs []types.NodeID
var deleteMutex sync.Mutex
nodeDeleted := make(chan struct{})
deleteFunc := func(nodeID types.NodeID) {
deleteMutex.Lock()
deletedIDs = append(deletedIDs, nodeID)
deleteMutex.Unlock()
close(nodeDeleted) // Signal that deletion happened
}
// Start new GC
gc := NewEphemeralGarbageCollector(deleteFunc)
// Use a WaitGroup to ensure the GC has started
var startWg sync.WaitGroup
startWg.Add(1)
go func() {
startWg.Done() // Signal that the goroutine has started
gc.Start()
}()
startWg.Wait() // Wait for the GC to start
// Close GC right away
gc.Close()
// Use a channel to signal when we should check for goroutine count
gcClosedCheck := make(chan struct{})
go func() {
// Give the GC time to fully close and clean up resources
// This is still time-based but only affects when we check the goroutine count,
// not the actual test logic
time.Sleep(oneHundredMillis)
close(gcClosedCheck)
}()
// Now try to schedule node for deletion with a very short expiry
// If the Schedule operation incorrectly creates a timer, it would fire quickly
nodeID := types.NodeID(1)
gc.Schedule(nodeID, 1*time.Millisecond)
// Set up a timeout channel for our test
timeout := time.After(fiveHundredMillis)
// Check if any node was deleted (which shouldn't happen)
select {
case <-nodeDeleted:
t.Fatal("Node was deleted after GC was closed, which should not happen")
case <-timeout:
// This is the expected path - no deletion should occur
}
// Check no node was deleted
deleteMutex.Lock()
nodesDeleted := len(deletedIDs)
deleteMutex.Unlock()
assert.Equal(t, 0, nodesDeleted, "No nodes should be deleted when Schedule is called after Close")
// Check for goroutine leaks after GC is fully closed
<-gcClosedCheck
finalGoroutines := runtime.NumGoroutine()
t.Logf("Final number of goroutines: %d", finalGoroutines)
// Allow for small fluctuations in goroutine count for testing routines etc
assert.LessOrEqual(t, finalGoroutines, initialGoroutines+2,
"There should be no significant goroutine leaks when Schedule is called after Close")
}
// TestEphemeralGarbageCollectorConcurrentScheduleAndClose tests the behavior of the garbage collector
// when Schedule and Close are called concurrently from multiple goroutines.
func TestEphemeralGarbageCollectorConcurrentScheduleAndClose(t *testing.T) {
// Count initial goroutines
initialGoroutines := runtime.NumGoroutine()
t.Logf("Initial number of goroutines: %d", initialGoroutines)
// Deletion tracking mechanism
var deletedIDs []types.NodeID
var deleteMutex sync.Mutex
deleteFunc := func(nodeID types.NodeID) {
deleteMutex.Lock()
deletedIDs = append(deletedIDs, nodeID)
deleteMutex.Unlock()
}
// Start the GC
gc := NewEphemeralGarbageCollector(deleteFunc)
go gc.Start()
// Number of concurrent scheduling goroutines
const numSchedulers = 10
const nodesPerScheduler = 50
const schedulingDuration = fiveHundredMillis
// Use WaitGroup to wait for all scheduling goroutines to finish
var wg sync.WaitGroup
wg.Add(numSchedulers + 1) // +1 for the closer goroutine
// Create a stopper channel to signal scheduling goroutines to stop
stopScheduling := make(chan struct{})
// Launch goroutines that continuously schedule nodes
for i := 0; i < numSchedulers; i++ {
go func(schedulerID int) {
defer wg.Done()
baseNodeID := schedulerID * nodesPerScheduler
// Keep scheduling nodes until signaled to stop
for j := 0; j < nodesPerScheduler; j++ {
select {
case <-stopScheduling:
return
default:
nodeID := types.NodeID(baseNodeID + j + 1)
gc.Schedule(nodeID, 1*time.Hour) // Long expiry to ensure it doesn't trigger during test
// Random (short) sleep to introduce randomness/variability
time.Sleep(time.Duration(rand.Intn(5)) * time.Millisecond)
}
}
}(i)
}
// After a short delay, close the garbage collector while schedulers are still running
go func() {
defer wg.Done()
time.Sleep(schedulingDuration / 2)
// Close GC
gc.Close()
// Signal schedulers to stop
close(stopScheduling)
}()
// Wait for all goroutines to complete
wg.Wait()
// Wait a bit longer to allow any leaked goroutines to do their work
time.Sleep(oneHundredMillis)
// Check for leaks
finalGoroutines := runtime.NumGoroutine()
t.Logf("Final number of goroutines: %d", finalGoroutines)
// Allow for a reasonable small variable routine count due to testing
assert.LessOrEqual(t, finalGoroutines, initialGoroutines+5,
"There should be no significant goroutine leaks during concurrent Schedule and Close operations")
}

View File

@@ -91,7 +91,7 @@ func TestIPAllocatorSequential(t *testing.T) {
{
name: "simple-with-db",
dbFunc: func() *HSDatabase {
db := dbForTest(t)
db := dbForTest(t, "simple-with-db")
user := types.User{Name: ""}
db.DB.Save(&user)
@@ -119,7 +119,7 @@ func TestIPAllocatorSequential(t *testing.T) {
{
name: "before-after-free-middle-in-db",
dbFunc: func() *HSDatabase {
db := dbForTest(t)
db := dbForTest(t, "before-after-free-middle-in-db")
user := types.User{Name: ""}
db.DB.Save(&user)
@@ -309,7 +309,7 @@ func TestBackfillIPAddresses(t *testing.T) {
{
name: "simple-backfill-ipv6",
dbFunc: func() *HSDatabase {
db := dbForTest(t)
db := dbForTest(t, "simple-backfill-ipv6")
user := types.User{Name: ""}
db.DB.Save(&user)
@@ -334,7 +334,7 @@ func TestBackfillIPAddresses(t *testing.T) {
{
name: "simple-backfill-ipv4",
dbFunc: func() *HSDatabase {
db := dbForTest(t)
db := dbForTest(t, "simple-backfill-ipv4")
user := types.User{Name: ""}
db.DB.Save(&user)
@@ -359,7 +359,7 @@ func TestBackfillIPAddresses(t *testing.T) {
{
name: "simple-backfill-remove-ipv6",
dbFunc: func() *HSDatabase {
db := dbForTest(t)
db := dbForTest(t, "simple-backfill-remove-ipv6")
user := types.User{Name: ""}
db.DB.Save(&user)
@@ -383,7 +383,7 @@ func TestBackfillIPAddresses(t *testing.T) {
{
name: "simple-backfill-remove-ipv4",
dbFunc: func() *HSDatabase {
db := dbForTest(t)
db := dbForTest(t, "simple-backfill-remove-ipv4")
user := types.User{Name: ""}
db.DB.Save(&user)
@@ -407,7 +407,7 @@ func TestBackfillIPAddresses(t *testing.T) {
{
name: "multi-backfill-ipv6",
dbFunc: func() *HSDatabase {
db := dbForTest(t)
db := dbForTest(t, "simple-backfill-ipv6")
user := types.User{Name: ""}
db.DB.Save(&user)
@@ -449,6 +449,7 @@ func TestBackfillIPAddresses(t *testing.T) {
"UserID",
"Endpoints",
"Hostinfo",
"Routes",
"CreatedAt",
"UpdatedAt",
))
@@ -487,10 +488,6 @@ func TestBackfillIPAddresses(t *testing.T) {
}
func TestIPAllocatorNextNoReservedIPs(t *testing.T) {
db, err := newSQLiteTestDB()
require.NoError(t, err)
defer db.Close()
alloc, err := NewIPAllocator(
db,
ptr.To(tsaddr.CGNATRange()),

View File

@@ -12,6 +12,7 @@ import (
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/puzpuzpuz/xsync/v3"
"github.com/rs/zerolog/log"
"gorm.io/gorm"
"tailscale.com/tailcfg"
@@ -35,26 +36,22 @@ var (
)
)
// ListPeers returns peers of node, regardless of any Policy or if the node is expired.
// If no peer IDs are given, all peers are returned.
// If at least one peer ID is given, only these peer nodes will be returned.
func (hsdb *HSDatabase) ListPeers(nodeID types.NodeID, peerIDs ...types.NodeID) (types.Nodes, error) {
func (hsdb *HSDatabase) ListPeers(nodeID types.NodeID) (types.Nodes, error) {
return Read(hsdb.DB, func(rx *gorm.DB) (types.Nodes, error) {
return ListPeers(rx, nodeID, peerIDs...)
return ListPeers(rx, nodeID)
})
}
// ListPeers returns peers of node, regardless of any Policy or if the node is expired.
// If no peer IDs are given, all peers are returned.
// If at least one peer ID is given, only these peer nodes will be returned.
func ListPeers(tx *gorm.DB, nodeID types.NodeID, peerIDs ...types.NodeID) (types.Nodes, error) {
// ListPeers returns all peers of node, regardless of any Policy or if the node is expired.
func ListPeers(tx *gorm.DB, nodeID types.NodeID) (types.Nodes, error) {
nodes := types.Nodes{}
if err := tx.
Preload("AuthKey").
Preload("AuthKey.User").
Preload("User").
Where("id <> ?", nodeID).
Where(peerIDs).Find(&nodes).Error; err != nil {
Preload("Routes").
Where("id <> ?",
nodeID).Find(&nodes).Error; err != nil {
return types.Nodes{}, err
}
@@ -63,23 +60,20 @@ func ListPeers(tx *gorm.DB, nodeID types.NodeID, peerIDs ...types.NodeID) (types
return nodes, nil
}
// ListNodes queries the database for either all nodes if no parameters are given
// or for the given nodes if at least one node ID is given as parameter
func (hsdb *HSDatabase) ListNodes(nodeIDs ...types.NodeID) (types.Nodes, error) {
func (hsdb *HSDatabase) ListNodes() (types.Nodes, error) {
return Read(hsdb.DB, func(rx *gorm.DB) (types.Nodes, error) {
return ListNodes(rx, nodeIDs...)
return ListNodes(rx)
})
}
// ListNodes queries the database for either all nodes if no parameters are given
// or for the given nodes if at least one node ID is given as parameter
func ListNodes(tx *gorm.DB, nodeIDs ...types.NodeID) (types.Nodes, error) {
func ListNodes(tx *gorm.DB) (types.Nodes, error) {
nodes := types.Nodes{}
if err := tx.
Preload("AuthKey").
Preload("AuthKey.User").
Preload("User").
Where(nodeIDs).Find(&nodes).Error; err != nil {
Preload("Routes").
Find(&nodes).Error; err != nil {
return nil, err
}
@@ -132,6 +126,7 @@ func GetNodeByID(tx *gorm.DB, id types.NodeID) (*types.Node, error) {
Preload("AuthKey").
Preload("AuthKey.User").
Preload("User").
Preload("Routes").
Find(&types.Node{ID: id}).First(&mach); result.Error != nil {
return nil, result.Error
}
@@ -155,6 +150,7 @@ func GetNodeByMachineKey(
Preload("AuthKey").
Preload("AuthKey.User").
Preload("User").
Preload("Routes").
First(&mach, "machine_key = ?", machineKey.String()); result.Error != nil {
return nil, result.Error
}
@@ -178,6 +174,7 @@ func GetNodeByNodeKey(
Preload("AuthKey").
Preload("AuthKey.User").
Preload("User").
Preload("Routes").
First(&mach, "node_key = ?", nodeKey.String()); result.Error != nil {
return nil, result.Error
}
@@ -194,8 +191,7 @@ func (hsdb *HSDatabase) SetTags(
})
}
// SetTags takes a NodeID and update the forced tags.
// It will overwrite any tags with the new list.
// SetTags takes a Node struct pointer and update the forced tags.
func SetTags(
tx *gorm.DB,
nodeID types.NodeID,
@@ -204,67 +200,31 @@ func SetTags(
if len(tags) == 0 {
// if no tags are provided, we remove all forced tags
if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("forced_tags", "[]").Error; err != nil {
return fmt.Errorf("removing tags: %w", err)
return fmt.Errorf("failed to remove tags for node in the database: %w", err)
}
return nil
}
slices.Sort(tags)
tags = slices.Compact(tags)
b, err := json.Marshal(tags)
var newTags []string
for _, tag := range tags {
if !slices.Contains(newTags, tag) {
newTags = append(newTags, tag)
}
}
b, err := json.Marshal(newTags)
if err != nil {
return err
}
if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("forced_tags", string(b)).Error; err != nil {
return fmt.Errorf("updating tags: %w", err)
return fmt.Errorf("failed to update tags for node in the database: %w", err)
}
return nil
}
// SetTags takes a Node struct pointer and update the forced tags.
func SetApprovedRoutes(
tx *gorm.DB,
nodeID types.NodeID,
routes []netip.Prefix,
) error {
if len(routes) == 0 {
// if no routes are provided, we remove all
if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", "[]").Error; err != nil {
return fmt.Errorf("removing approved routes: %w", err)
}
return nil
}
b, err := json.Marshal(routes)
if err != nil {
return err
}
if err := tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("approved_routes", string(b)).Error; err != nil {
return fmt.Errorf("updating approved routes: %w", err)
}
return nil
}
// SetLastSeen sets a node's last seen field indicating that we
// have recently communicating with this node.
func (hsdb *HSDatabase) SetLastSeen(nodeID types.NodeID, lastSeen time.Time) error {
return hsdb.Write(func(tx *gorm.DB) error {
return SetLastSeen(tx, nodeID, lastSeen)
})
}
// SetLastSeen sets a node's last seen field indicating that we
// have recently communicating with this node.
func SetLastSeen(tx *gorm.DB, nodeID types.NodeID, lastSeen time.Time) error {
return tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("last_seen", lastSeen).Error
}
// RenameNode takes a Node struct and a new GivenName for the nodes
// and renames it. If the name is not unique, it will return an error.
func RenameNode(tx *gorm.DB,
@@ -306,9 +266,9 @@ func NodeSetExpiry(tx *gorm.DB,
return tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("expiry", expiry).Error
}
func (hsdb *HSDatabase) DeleteNode(node *types.Node) error {
return hsdb.Write(func(tx *gorm.DB) error {
return DeleteNode(tx, node)
func (hsdb *HSDatabase) DeleteNode(node *types.Node, isLikelyConnected *xsync.MapOf[types.NodeID, bool]) ([]types.NodeID, error) {
return Write(hsdb.DB, func(tx *gorm.DB) ([]types.NodeID, error) {
return DeleteNode(tx, node, isLikelyConnected)
})
}
@@ -316,13 +276,19 @@ func (hsdb *HSDatabase) DeleteNode(node *types.Node) error {
// Caller is responsible for notifying all of change.
func DeleteNode(tx *gorm.DB,
node *types.Node,
) error {
// Unscoped causes the node to be fully removed from the database.
if err := tx.Unscoped().Delete(&types.Node{}, node.ID).Error; err != nil {
return err
isLikelyConnected *xsync.MapOf[types.NodeID, bool],
) ([]types.NodeID, error) {
changed, err := deleteNodeRoutes(tx, node, isLikelyConnected)
if err != nil {
return changed, err
}
return nil
// Unscoped causes the node to be fully removed from the database.
if err := tx.Unscoped().Delete(&types.Node{}, node.ID).Error; err != nil {
return changed, err
}
return changed, nil
}
// DeleteEphemeralNode deletes a Node from the database, note that this method
@@ -339,6 +305,12 @@ func (hsdb *HSDatabase) DeleteEphemeralNode(
})
}
// SetLastSeen sets a node's last seen field indicating that we
// have recently communicating with this node.
func SetLastSeen(tx *gorm.DB, nodeID types.NodeID, lastSeen time.Time) error {
return tx.Model(&types.Node{}).Where("id = ?", nodeID).Update("last_seen", lastSeen).Error
}
// HandleNodeFromAuthPath is called from the OIDC or CLI auth path
// with a registrationID to register or reauthenticate a node.
// If the node found in the registration cache is not already registered,
@@ -399,12 +371,7 @@ func (hsdb *HSDatabase) HandleNodeFromAuthPath(
}
// Signal to waiting clients that the machine has been registered.
select {
case reg.Registered <- node:
default:
}
close(reg.Registered)
newNode = true
return node, err
} else {
@@ -527,6 +494,145 @@ func NodeSave(tx *gorm.DB, node *types.Node) error {
return tx.Save(node).Error
}
func (hsdb *HSDatabase) GetAdvertisedRoutes(node *types.Node) ([]netip.Prefix, error) {
return Read(hsdb.DB, func(rx *gorm.DB) ([]netip.Prefix, error) {
return GetAdvertisedRoutes(rx, node)
})
}
// GetAdvertisedRoutes returns the routes that are be advertised by the given node.
func GetAdvertisedRoutes(tx *gorm.DB, node *types.Node) ([]netip.Prefix, error) {
routes := types.Routes{}
err := tx.
Preload("Node").
Where("node_id = ? AND advertised = ?", node.ID, true).Find(&routes).Error
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
return nil, fmt.Errorf("getting advertised routes for node(%d): %w", node.ID, err)
}
var prefixes []netip.Prefix
for _, route := range routes {
prefixes = append(prefixes, netip.Prefix(route.Prefix))
}
return prefixes, nil
}
func (hsdb *HSDatabase) GetEnabledRoutes(node *types.Node) ([]netip.Prefix, error) {
return Read(hsdb.DB, func(rx *gorm.DB) ([]netip.Prefix, error) {
return GetEnabledRoutes(rx, node)
})
}
// GetEnabledRoutes returns the routes that are enabled for the node.
func GetEnabledRoutes(tx *gorm.DB, node *types.Node) ([]netip.Prefix, error) {
routes := types.Routes{}
err := tx.
Preload("Node").
Where("node_id = ? AND advertised = ? AND enabled = ?", node.ID, true, true).
Find(&routes).Error
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
return nil, fmt.Errorf("getting enabled routes for node(%d): %w", node.ID, err)
}
var prefixes []netip.Prefix
for _, route := range routes {
prefixes = append(prefixes, netip.Prefix(route.Prefix))
}
return prefixes, nil
}
func IsRoutesEnabled(tx *gorm.DB, node *types.Node, routeStr string) bool {
route, err := netip.ParsePrefix(routeStr)
if err != nil {
return false
}
enabledRoutes, err := GetEnabledRoutes(tx, node)
if err != nil {
return false
}
for _, enabledRoute := range enabledRoutes {
if route == enabledRoute {
return true
}
}
return false
}
func (hsdb *HSDatabase) enableRoutes(
node *types.Node,
newRoutes ...netip.Prefix,
) (*types.StateUpdate, error) {
return Write(hsdb.DB, func(tx *gorm.DB) (*types.StateUpdate, error) {
return enableRoutes(tx, node, newRoutes...)
})
}
// enableRoutes enables new routes based on a list of new routes.
func enableRoutes(tx *gorm.DB,
node *types.Node, newRoutes ...netip.Prefix,
) (*types.StateUpdate, error) {
advertisedRoutes, err := GetAdvertisedRoutes(tx, node)
if err != nil {
return nil, err
}
for _, newRoute := range newRoutes {
if !slices.Contains(advertisedRoutes, newRoute) {
return nil, fmt.Errorf(
"route (%s) is not available on node %s: %w",
node.Hostname,
newRoute, ErrNodeRouteIsNotAvailable,
)
}
}
// Separate loop so we don't leave things in a half-updated state
for _, prefix := range newRoutes {
route := types.Route{}
err := tx.Preload("Node").
Where("node_id = ? AND prefix = ?", node.ID, prefix.String()).
First(&route).Error
if err == nil {
route.Enabled = true
// Mark already as primary if there is only this node offering this subnet
// (and is not an exit route)
if !route.IsExitRoute() {
route.IsPrimary = isUniquePrefix(tx, route)
}
err = tx.Save(&route).Error
if err != nil {
return nil, fmt.Errorf("failed to enable route: %w", err)
}
} else {
return nil, fmt.Errorf("failed to find route: %w", err)
}
}
// Ensure the node has the latest routes when notifying the other
// nodes
nRoutes, err := GetNodeRoutes(tx, node)
if err != nil {
return nil, fmt.Errorf("failed to read back routes: %w", err)
}
node.Routes = nRoutes
return &types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: []types.NodeID{node.ID},
Message: "created in db.enableRoutes",
}, nil
}
func generateGivenName(suppliedName string, randomSuffix bool) (string, error) {
suppliedName = util.ConvertWithFQDNRules(suppliedName)
if len(suppliedName) > util.LabelHostnameLength {
@@ -587,9 +693,6 @@ func ensureUniqueGivenName(
return givenName, nil
}
// ExpireExpiredNodes checks for nodes that have expired since the last check
// and returns a time to be used for the next check, a StateUpdate
// containing the expired nodes, and a boolean indicating if any nodes were found.
func ExpireExpiredNodes(tx *gorm.DB,
lastCheck time.Time,
) (time.Time, types.StateUpdate, bool) {
@@ -614,7 +717,10 @@ func ExpireExpiredNodes(tx *gorm.DB,
}
if len(expired) > 0 {
return started, types.UpdatePeerPatch(expired...), true
return started, types.StateUpdate{
Type: types.StatePeerChangedPatch,
ChangePatches: expired,
}, true
}
return started, types.StateUpdate{}, false
@@ -647,59 +753,22 @@ func NewEphemeralGarbageCollector(deleteFunc func(types.NodeID)) *EphemeralGarba
// Close stops the garbage collector.
func (e *EphemeralGarbageCollector) Close() {
e.mu.Lock()
defer e.mu.Unlock()
// Stop all timers
for _, timer := range e.toBeDeleted {
timer.Stop()
}
// Close the cancel channel to signal all goroutines to exit
close(e.cancelCh)
e.cancelCh <- struct{}{}
}
// Schedule schedules a node for deletion after the expiry duration.
// If the garbage collector is already closed, this is a no-op.
func (e *EphemeralGarbageCollector) Schedule(nodeID types.NodeID, expiry time.Duration) {
e.mu.Lock()
defer e.mu.Unlock()
// Don't schedule new timers if the garbage collector is already closed
select {
case <-e.cancelCh:
// The cancel channel is closed, meaning the GC is shutting down
// or already shut down, so we shouldn't schedule anything new
return
default:
// Continue with scheduling
}
// If a timer already exists for this node, stop it first
if oldTimer, exists := e.toBeDeleted[nodeID]; exists {
oldTimer.Stop()
}
timer := time.NewTimer(expiry)
e.toBeDeleted[nodeID] = timer
// Start a goroutine to handle the timer completion
e.mu.Unlock()
go func() {
select {
case <-timer.C:
// This is to handle the situation where the GC is shutting down and
// we are trying to schedule a new node for deletion at the same time
// i.e. We don't want to send to deleteCh if the GC is shutting down
// So, we try to send to deleteCh, but also watch for cancelCh
select {
case e.deleteCh <- nodeID:
// Successfully sent to deleteCh
case <-e.cancelCh:
// GC is shutting down, don't send to deleteCh
return
case _, ok := <-timer.C:
if ok {
e.deleteCh <- nodeID
}
case <-e.cancelCh:
// If the GC is closed, exit the goroutine
return
}
}()
}

View File

@@ -15,6 +15,7 @@ import (
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/puzpuzpuz/xsync/v3"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/check.v1"
@@ -101,7 +102,7 @@ func (s *Suite) TestHardDeleteNode(c *check.C) {
trx := db.DB.Save(&node)
c.Assert(trx.Error, check.IsNil)
err = db.DeleteNode(&node)
_, err = db.DeleteNode(&node, xsync.NewMapOf[types.NodeID, bool]())
c.Assert(err, check.IsNil)
_, err = db.getNode(types.UserID(user.ID), "testnode3")
@@ -147,6 +148,105 @@ func (s *Suite) TestListPeers(c *check.C) {
c.Assert(peersOfNode0[8].Hostname, check.Equals, "testnode10")
}
func (s *Suite) TestGetACLFilteredPeers(c *check.C) {
type base struct {
user *types.User
key *types.PreAuthKey
}
stor := make([]base, 0)
for _, name := range []string{"test", "admin"} {
user, err := db.CreateUser(types.User{Name: name})
c.Assert(err, check.IsNil)
pak, err := db.CreatePreAuthKey(types.UserID(user.ID), false, false, nil, nil)
c.Assert(err, check.IsNil)
stor = append(stor, base{user, pak})
}
_, err := db.GetNodeByID(0)
c.Assert(err, check.NotNil)
for index := 0; index <= 10; index++ {
nodeKey := key.NewNode()
machineKey := key.NewMachine()
v4 := netip.MustParseAddr(fmt.Sprintf("100.64.0.%d", index+1))
node := types.Node{
ID: types.NodeID(index),
MachineKey: machineKey.Public(),
NodeKey: nodeKey.Public(),
IPv4: &v4,
Hostname: "testnode" + strconv.Itoa(index),
UserID: stor[index%2].user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
AuthKeyID: ptr.To(stor[index%2].key.ID),
}
trx := db.DB.Save(&node)
c.Assert(trx.Error, check.IsNil)
}
aclPolicy := &policy.ACLPolicy{
Groups: map[string][]string{
"group:test": {"admin"},
},
Hosts: map[string]netip.Prefix{},
TagOwners: map[string][]string{},
ACLs: []policy.ACL{
{
Action: "accept",
Sources: []string{"admin"},
Destinations: []string{"*:*"},
},
{
Action: "accept",
Sources: []string{"test"},
Destinations: []string{"test:*"},
},
},
Tests: []policy.ACLTest{},
}
adminNode, err := db.GetNodeByID(1)
c.Logf("Node(%v), user: %v", adminNode.Hostname, adminNode.User)
c.Assert(adminNode.IPv4, check.NotNil)
c.Assert(adminNode.IPv6, check.IsNil)
c.Assert(err, check.IsNil)
testNode, err := db.GetNodeByID(2)
c.Logf("Node(%v), user: %v", testNode.Hostname, testNode.User)
c.Assert(err, check.IsNil)
adminPeers, err := db.ListPeers(adminNode.ID)
c.Assert(err, check.IsNil)
c.Assert(len(adminPeers), check.Equals, 9)
testPeers, err := db.ListPeers(testNode.ID)
c.Assert(err, check.IsNil)
c.Assert(len(testPeers), check.Equals, 9)
adminRules, _, err := policy.GenerateFilterAndSSHRulesForTests(aclPolicy, adminNode, adminPeers, []types.User{*stor[0].user, *stor[1].user})
c.Assert(err, check.IsNil)
testRules, _, err := policy.GenerateFilterAndSSHRulesForTests(aclPolicy, testNode, testPeers, []types.User{*stor[0].user, *stor[1].user})
c.Assert(err, check.IsNil)
peersOfAdminNode := policy.FilterNodesByACL(adminNode, adminPeers, adminRules)
peersOfTestNode := policy.FilterNodesByACL(testNode, testPeers, testRules)
c.Log(peersOfAdminNode)
c.Log(peersOfTestNode)
c.Assert(len(peersOfTestNode), check.Equals, 9)
c.Assert(peersOfTestNode[0].Hostname, check.Equals, "testnode1")
c.Assert(peersOfTestNode[1].Hostname, check.Equals, "testnode3")
c.Assert(peersOfTestNode[3].Hostname, check.Equals, "testnode5")
c.Assert(len(peersOfAdminNode), check.Equals, 9)
c.Assert(peersOfAdminNode[0].Hostname, check.Equals, "testnode2")
c.Assert(peersOfAdminNode[2].Hostname, check.Equals, "testnode4")
c.Assert(peersOfAdminNode[5].Hostname, check.Equals, "testnode7")
}
func (s *Suite) TestExpireNode(c *check.C) {
user, err := db.CreateUser(types.User{Name: "test"})
c.Assert(err, check.IsNil)
@@ -364,23 +464,22 @@ func TestAutoApproveRoutes(t *testing.T) {
acl string
routes []netip.Prefix
want []netip.Prefix
want2 []netip.Prefix
}{
{
name: "2068-approve-issue-sub-kube",
name: "2068-approve-issue-sub",
acl: `
{
"groups": {
"group:k8s": ["test@"]
"group:k8s": ["test"]
},
// "acls": [
// {"action": "accept", "users": ["*"], "ports": ["*:*"]},
// ],
"acls": [
{"action": "accept", "users": ["*"], "ports": ["*:*"]},
],
"autoApprovers": {
"routes": {
"10.42.0.0/16": ["test@"],
"10.42.0.0/16": ["test"],
}
}
}`,
@@ -388,27 +487,26 @@ func TestAutoApproveRoutes(t *testing.T) {
want: []netip.Prefix{netip.MustParsePrefix("10.42.7.0/24")},
},
{
name: "2068-approve-issue-sub-exit-tag",
name: "2068-approve-issue-sub",
acl: `
{
"tagOwners": {
"tag:exit": ["test@"],
"tag:exit": ["test"],
},
"groups": {
"group:test": ["test@"]
"group:test": ["test"]
},
// "acls": [
// {"action": "accept", "users": ["*"], "ports": ["*:*"]},
// ],
"acls": [
{"action": "accept", "users": ["*"], "ports": ["*:*"]},
],
"autoApprovers": {
"exitNode": ["tag:exit"],
"routes": {
"10.10.0.0/16": ["group:test"],
"10.11.0.0/16": ["test@"],
"8.11.0.0/24": ["test2@"], // No nodes
"10.11.0.0/16": ["test"],
}
}
}`,
@@ -417,104 +515,83 @@ func TestAutoApproveRoutes(t *testing.T) {
tsaddr.AllIPv6(),
netip.MustParsePrefix("10.10.0.0/16"),
netip.MustParsePrefix("10.11.0.0/24"),
// Not approved
netip.MustParsePrefix("8.11.0.0/24"),
},
want: []netip.Prefix{
tsaddr.AllIPv4(),
netip.MustParsePrefix("10.10.0.0/16"),
netip.MustParsePrefix("10.11.0.0/24"),
},
want2: []netip.Prefix{
tsaddr.AllIPv4(),
tsaddr.AllIPv6(),
},
},
}
for _, tt := range tests {
pmfs := policy.PolicyManagerFuncsForTest([]byte(tt.acl))
for i, pmf := range pmfs {
t.Run(fmt.Sprintf("%s-policy-index%d", tt.name, i), func(t *testing.T) {
adb, err := newSQLiteTestDB()
require.NoError(t, err)
t.Run(tt.name, func(t *testing.T) {
adb, err := newSQLiteTestDB()
require.NoError(t, err)
pol, err := policy.LoadACLPolicyFromBytes([]byte(tt.acl))
user, err := adb.CreateUser(types.User{Name: "test"})
require.NoError(t, err)
_, err = adb.CreateUser(types.User{Name: "test2"})
require.NoError(t, err)
taggedUser, err := adb.CreateUser(types.User{Name: "tagged"})
require.NoError(t, err)
require.NoError(t, err)
require.NotNil(t, pol)
node := types.Node{
ID: 1,
MachineKey: key.NewMachine().Public(),
NodeKey: key.NewNode().Public(),
Hostname: "testnode",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: tt.routes,
},
IPv4: ptr.To(netip.MustParseAddr("100.64.0.1")),
}
user, err := adb.CreateUser(types.User{Name: "test"})
require.NoError(t, err)
err = adb.DB.Save(&node).Error
require.NoError(t, err)
pak, err := adb.CreatePreAuthKey(types.UserID(user.ID), false, false, nil, nil)
require.NoError(t, err)
nodeTagged := types.Node{
ID: 2,
MachineKey: key.NewMachine().Public(),
NodeKey: key.NewNode().Public(),
Hostname: "taggednode",
UserID: taggedUser.ID,
RegisterMethod: util.RegisterMethodAuthKey,
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: tt.routes,
},
ForcedTags: []string{"tag:exit"},
IPv4: ptr.To(netip.MustParseAddr("100.64.0.2")),
}
nodeKey := key.NewNode()
machineKey := key.NewMachine()
err = adb.DB.Save(&nodeTagged).Error
require.NoError(t, err)
v4 := netip.MustParseAddr("100.64.0.1")
node := types.Node{
ID: 0,
MachineKey: machineKey.Public(),
NodeKey: nodeKey.Public(),
Hostname: "test",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
AuthKeyID: ptr.To(pak.ID),
Hostinfo: &tailcfg.Hostinfo{
RequestTags: []string{"tag:exit"},
RoutableIPs: tt.routes,
},
IPv4: &v4,
}
users, err := adb.ListUsers()
assert.NoError(t, err)
trx := adb.DB.Save(&node)
require.NoError(t, trx.Error)
nodes, err := adb.ListNodes()
assert.NoError(t, err)
sendUpdate, err := adb.SaveNodeRoutes(&node)
require.NoError(t, err)
assert.False(t, sendUpdate)
pm, err := pmf(users, nodes)
require.NoError(t, err)
require.NotNil(t, pm)
node0ByID, err := adb.GetNodeByID(0)
require.NoError(t, err)
changed1 := policy.AutoApproveRoutes(pm, &node)
assert.True(t, changed1)
users, err := adb.ListUsers()
assert.NoError(t, err)
err = adb.DB.Save(&node).Error
require.NoError(t, err)
nodes, err := adb.ListNodes()
assert.NoError(t, err)
_ = policy.AutoApproveRoutes(pm, &nodeTagged)
pm, err := policy.NewPolicyManager([]byte(tt.acl), users, nodes)
assert.NoError(t, err)
err = adb.DB.Save(&nodeTagged).Error
require.NoError(t, err)
// TODO(kradalby): Check state update
err = adb.EnableAutoApprovedRoutes(pm, node0ByID)
require.NoError(t, err)
node1ByID, err := adb.GetNodeByID(1)
require.NoError(t, err)
enabledRoutes, err := adb.GetEnabledRoutes(node0ByID)
require.NoError(t, err)
assert.Len(t, enabledRoutes, len(tt.want))
if diff := cmp.Diff(tt.want, node1ByID.SubnetRoutes(), util.Comparers...); diff != "" {
t.Errorf("unexpected enabled routes (-want +got):\n%s", diff)
}
tsaddr.SortPrefixes(enabledRoutes)
node2ByID, err := adb.GetNodeByID(2)
require.NoError(t, err)
if diff := cmp.Diff(tt.want2, node2ByID.SubnetRoutes(), util.Comparers...); diff != "" {
t.Errorf("unexpected enabled routes (-want +got):\n%s", diff)
}
})
}
if diff := cmp.Diff(tt.want, enabledRoutes, util.Comparers...); diff != "" {
t.Errorf("unexpected enabled routes (-want +got):\n%s", diff)
}
})
}
}
@@ -566,7 +643,7 @@ func TestEphemeralGarbageCollectorLoads(t *testing.T) {
})
go e.Start()
for i := range want {
for i := 0; i < want; i++ {
go e.Schedule(types.NodeID(i), 1*time.Second)
}
@@ -667,7 +744,6 @@ func TestRenameNode(t *testing.T) {
Hostname: "test",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
Hostinfo: &tailcfg.Hostinfo{},
}
node2 := types.Node{
@@ -677,7 +753,6 @@ func TestRenameNode(t *testing.T) {
Hostname: "test",
UserID: user2.ID,
RegisterMethod: util.RegisterMethodAuthKey,
Hostinfo: &tailcfg.Hostinfo{},
}
err = db.DB.Save(&node).Error
@@ -746,174 +821,3 @@ func TestRenameNode(t *testing.T) {
})
assert.ErrorContains(t, err, "name is not unique")
}
func TestListPeers(t *testing.T) {
// Setup test database
db, err := newSQLiteTestDB()
if err != nil {
t.Fatalf("creating db: %s", err)
}
user, err := db.CreateUser(types.User{Name: "test"})
require.NoError(t, err)
user2, err := db.CreateUser(types.User{Name: "user2"})
require.NoError(t, err)
node1 := types.Node{
ID: 0,
MachineKey: key.NewMachine().Public(),
NodeKey: key.NewNode().Public(),
Hostname: "test1",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
Hostinfo: &tailcfg.Hostinfo{},
}
node2 := types.Node{
ID: 0,
MachineKey: key.NewMachine().Public(),
NodeKey: key.NewNode().Public(),
Hostname: "test2",
UserID: user2.ID,
RegisterMethod: util.RegisterMethodAuthKey,
Hostinfo: &tailcfg.Hostinfo{},
}
err = db.DB.Save(&node1).Error
require.NoError(t, err)
err = db.DB.Save(&node2).Error
require.NoError(t, err)
err = db.DB.Transaction(func(tx *gorm.DB) error {
_, err := RegisterNode(tx, node1, nil, nil)
if err != nil {
return err
}
_, err = RegisterNode(tx, node2, nil, nil)
return err
})
require.NoError(t, err)
nodes, err := db.ListNodes()
require.NoError(t, err)
assert.Len(t, nodes, 2)
// No parameter means no filter, should return all peers
nodes, err = db.ListPeers(1)
require.NoError(t, err)
assert.Equal(t, len(nodes), 1)
assert.Equal(t, "test2", nodes[0].Hostname)
// Empty node list should return all peers
nodes, err = db.ListPeers(1, types.NodeIDs{}...)
require.NoError(t, err)
assert.Equal(t, len(nodes), 1)
assert.Equal(t, "test2", nodes[0].Hostname)
// No match in IDs should return empty list and no error
nodes, err = db.ListPeers(1, types.NodeIDs{3, 4, 5}...)
require.NoError(t, err)
assert.Equal(t, len(nodes), 0)
// Partial match in IDs
nodes, err = db.ListPeers(1, types.NodeIDs{2, 3}...)
require.NoError(t, err)
assert.Equal(t, len(nodes), 1)
assert.Equal(t, "test2", nodes[0].Hostname)
// Several matched IDs, but node ID is still filtered out
nodes, err = db.ListPeers(1, types.NodeIDs{1, 2, 3}...)
require.NoError(t, err)
assert.Equal(t, len(nodes), 1)
assert.Equal(t, "test2", nodes[0].Hostname)
}
func TestListNodes(t *testing.T) {
// Setup test database
db, err := newSQLiteTestDB()
if err != nil {
t.Fatalf("creating db: %s", err)
}
user, err := db.CreateUser(types.User{Name: "test"})
require.NoError(t, err)
user2, err := db.CreateUser(types.User{Name: "user2"})
require.NoError(t, err)
node1 := types.Node{
ID: 0,
MachineKey: key.NewMachine().Public(),
NodeKey: key.NewNode().Public(),
Hostname: "test1",
UserID: user.ID,
RegisterMethod: util.RegisterMethodAuthKey,
Hostinfo: &tailcfg.Hostinfo{},
}
node2 := types.Node{
ID: 0,
MachineKey: key.NewMachine().Public(),
NodeKey: key.NewNode().Public(),
Hostname: "test2",
UserID: user2.ID,
RegisterMethod: util.RegisterMethodAuthKey,
Hostinfo: &tailcfg.Hostinfo{},
}
err = db.DB.Save(&node1).Error
require.NoError(t, err)
err = db.DB.Save(&node2).Error
require.NoError(t, err)
err = db.DB.Transaction(func(tx *gorm.DB) error {
_, err := RegisterNode(tx, node1, nil, nil)
if err != nil {
return err
}
_, err = RegisterNode(tx, node2, nil, nil)
return err
})
require.NoError(t, err)
nodes, err := db.ListNodes()
require.NoError(t, err)
assert.Len(t, nodes, 2)
// No parameter means no filter, should return all nodes
nodes, err = db.ListNodes()
require.NoError(t, err)
assert.Equal(t, len(nodes), 2)
assert.Equal(t, "test1", nodes[0].Hostname)
assert.Equal(t, "test2", nodes[1].Hostname)
// Empty node list should return all nodes
nodes, err = db.ListNodes(types.NodeIDs{}...)
require.NoError(t, err)
assert.Equal(t, len(nodes), 2)
assert.Equal(t, "test1", nodes[0].Hostname)
assert.Equal(t, "test2", nodes[1].Hostname)
// No match in IDs should return empty list and no error
nodes, err = db.ListNodes(types.NodeIDs{3, 4, 5}...)
require.NoError(t, err)
assert.Equal(t, len(nodes), 0)
// Partial match in IDs
nodes, err = db.ListNodes(types.NodeIDs{2, 3}...)
require.NoError(t, err)
assert.Equal(t, len(nodes), 1)
assert.Equal(t, "test2", nodes[0].Hostname)
// Several matched IDs
nodes, err = db.ListNodes(types.NodeIDs{1, 2, 3}...)
require.NoError(t, err)
assert.Equal(t, len(nodes), 2)
assert.Equal(t, "test1", nodes[0].Hostname)
assert.Equal(t, "test2", nodes[1].Hostname)
}

679
hscontrol/db/routes.go Normal file
View File

@@ -0,0 +1,679 @@
package db
import (
"errors"
"fmt"
"net/netip"
"sort"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/puzpuzpuz/xsync/v3"
"github.com/rs/zerolog/log"
"gorm.io/gorm"
"tailscale.com/net/tsaddr"
"tailscale.com/util/set"
)
var ErrRouteIsNotAvailable = errors.New("route is not available")
func GetRoutes(tx *gorm.DB) (types.Routes, error) {
var routes types.Routes
err := tx.
Preload("Node").
Preload("Node.User").
Find(&routes).Error
if err != nil {
return nil, err
}
return routes, nil
}
func getAdvertisedAndEnabledRoutes(tx *gorm.DB) (types.Routes, error) {
var routes types.Routes
err := tx.
Preload("Node").
Preload("Node.User").
Where("advertised = ? AND enabled = ?", true, true).
Find(&routes).Error
if err != nil {
return nil, err
}
return routes, nil
}
func getRoutesByPrefix(tx *gorm.DB, pref netip.Prefix) (types.Routes, error) {
var routes types.Routes
err := tx.
Preload("Node").
Preload("Node.User").
Where("prefix = ?", pref.String()).
Find(&routes).Error
if err != nil {
return nil, err
}
return routes, nil
}
func GetNodeAdvertisedRoutes(tx *gorm.DB, node *types.Node) (types.Routes, error) {
var routes types.Routes
err := tx.
Preload("Node").
Preload("Node.User").
Where("node_id = ? AND advertised = true", node.ID).
Find(&routes).Error
if err != nil {
return nil, err
}
return routes, nil
}
func (hsdb *HSDatabase) GetNodeRoutes(node *types.Node) (types.Routes, error) {
return Read(hsdb.DB, func(rx *gorm.DB) (types.Routes, error) {
return GetNodeRoutes(rx, node)
})
}
func GetNodeRoutes(tx *gorm.DB, node *types.Node) (types.Routes, error) {
var routes types.Routes
err := tx.
Preload("Node").
Preload("Node.User").
Where("node_id = ?", node.ID).
Find(&routes).Error
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
return nil, err
}
return routes, nil
}
func GetRoute(tx *gorm.DB, id uint64) (*types.Route, error) {
var route types.Route
err := tx.
Preload("Node").
Preload("Node.User").
First(&route, id).Error
if err != nil {
return nil, err
}
return &route, nil
}
func EnableRoute(tx *gorm.DB, id uint64) (*types.StateUpdate, error) {
route, err := GetRoute(tx, id)
if err != nil {
return nil, err
}
// Tailscale requires both IPv4 and IPv6 exit routes to
// be enabled at the same time, as per
// https://github.com/juanfont/headscale/issues/804#issuecomment-1399314002
if route.IsExitRoute() {
return enableRoutes(
tx,
route.Node,
tsaddr.AllIPv4(),
tsaddr.AllIPv6(),
)
}
return enableRoutes(tx, route.Node, netip.Prefix(route.Prefix))
}
func DisableRoute(tx *gorm.DB,
id uint64,
isLikelyConnected *xsync.MapOf[types.NodeID, bool],
) ([]types.NodeID, error) {
route, err := GetRoute(tx, id)
if err != nil {
return nil, err
}
var routes types.Routes
node := route.Node
// Tailscale requires both IPv4 and IPv6 exit routes to
// be enabled at the same time, as per
// https://github.com/juanfont/headscale/issues/804#issuecomment-1399314002
var update []types.NodeID
if !route.IsExitRoute() {
route.Enabled = false
err = tx.Save(route).Error
if err != nil {
return nil, err
}
update, err = failoverRouteTx(tx, isLikelyConnected, route)
if err != nil {
return nil, err
}
} else {
routes, err = GetNodeRoutes(tx, node)
if err != nil {
return nil, err
}
for i := range routes {
if routes[i].IsExitRoute() {
routes[i].Enabled = false
routes[i].IsPrimary = false
err = tx.Save(&routes[i]).Error
if err != nil {
return nil, err
}
}
}
}
// If update is empty, it means that one was not created
// by failover (as a failover was not necessary), create
// one and return to the caller.
if update == nil {
update = []types.NodeID{node.ID}
}
return update, nil
}
func (hsdb *HSDatabase) DeleteRoute(
id uint64,
isLikelyConnected *xsync.MapOf[types.NodeID, bool],
) ([]types.NodeID, error) {
return Write(hsdb.DB, func(tx *gorm.DB) ([]types.NodeID, error) {
return DeleteRoute(tx, id, isLikelyConnected)
})
}
func DeleteRoute(
tx *gorm.DB,
id uint64,
isLikelyConnected *xsync.MapOf[types.NodeID, bool],
) ([]types.NodeID, error) {
route, err := GetRoute(tx, id)
if err != nil {
return nil, err
}
if route.Node == nil {
// If the route is not assigned to a node, just delete it,
// there are no updates to be sent as no nodes are
// dependent on it
if err := tx.Unscoped().Delete(&route).Error; err != nil {
return nil, err
}
return nil, nil
}
var routes types.Routes
node := route.Node
// Tailscale requires both IPv4 and IPv6 exit routes to
// be enabled at the same time, as per
// https://github.com/juanfont/headscale/issues/804#issuecomment-1399314002
// This means that if we delete a route which is an exit route, delete both.
var update []types.NodeID
if route.IsExitRoute() {
routes, err = GetNodeRoutes(tx, node)
if err != nil {
return nil, err
}
var routesToDelete types.Routes
for _, r := range routes {
if r.IsExitRoute() {
routesToDelete = append(routesToDelete, r)
}
}
if err := tx.Unscoped().Delete(&routesToDelete).Error; err != nil {
return nil, err
}
} else {
update, err = failoverRouteTx(tx, isLikelyConnected, route)
if err != nil {
return nil, nil
}
if err := tx.Unscoped().Delete(&route).Error; err != nil {
return nil, err
}
}
// If update is empty, it means that one was not created
// by failover (as a failover was not necessary), create
// one and return to the caller.
if routes == nil {
routes, err = GetNodeRoutes(tx, node)
if err != nil {
return nil, err
}
}
node.Routes = routes
if update == nil {
update = []types.NodeID{node.ID}
}
return update, nil
}
func deleteNodeRoutes(tx *gorm.DB, node *types.Node, isLikelyConnected *xsync.MapOf[types.NodeID, bool]) ([]types.NodeID, error) {
routes, err := GetNodeRoutes(tx, node)
if err != nil {
return nil, fmt.Errorf("getting node routes: %w", err)
}
var changed []types.NodeID
for i := range routes {
if err := tx.Unscoped().Delete(&routes[i]).Error; err != nil {
return nil, fmt.Errorf("deleting route(%d): %w", &routes[i].ID, err)
}
// TODO(kradalby): This is a bit too aggressive, we could probably
// figure out which routes needs to be failed over rather than all.
chn, err := failoverRouteTx(tx, isLikelyConnected, &routes[i])
if err != nil {
return changed, fmt.Errorf("failing over route after delete: %w", err)
}
if chn != nil {
changed = append(changed, chn...)
}
}
return changed, nil
}
// isUniquePrefix returns if there is another node providing the same route already.
func isUniquePrefix(tx *gorm.DB, route types.Route) bool {
var count int64
tx.Model(&types.Route{}).
Where("prefix = ? AND node_id != ? AND advertised = ? AND enabled = ?",
route.Prefix.String(),
route.NodeID,
true, true).Count(&count)
return count == 0
}
func getPrimaryRoute(tx *gorm.DB, prefix netip.Prefix) (*types.Route, error) {
var route types.Route
err := tx.
Preload("Node").
Where("prefix = ? AND advertised = ? AND enabled = ? AND is_primary = ?", prefix.String(), true, true, true).
First(&route).Error
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
return nil, err
}
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, gorm.ErrRecordNotFound
}
return &route, nil
}
func (hsdb *HSDatabase) GetNodePrimaryRoutes(node *types.Node) (types.Routes, error) {
return Read(hsdb.DB, func(rx *gorm.DB) (types.Routes, error) {
return GetNodePrimaryRoutes(rx, node)
})
}
// getNodePrimaryRoutes returns the routes that are enabled and marked as primary (for subnet failover)
// Exit nodes are not considered for this, as they are never marked as Primary.
func GetNodePrimaryRoutes(tx *gorm.DB, node *types.Node) (types.Routes, error) {
var routes types.Routes
err := tx.
Preload("Node").
Where("node_id = ? AND advertised = ? AND enabled = ? AND is_primary = ?", node.ID, true, true, true).
Find(&routes).Error
if err != nil {
return nil, err
}
return routes, nil
}
func (hsdb *HSDatabase) SaveNodeRoutes(node *types.Node) (bool, error) {
return Write(hsdb.DB, func(tx *gorm.DB) (bool, error) {
return SaveNodeRoutes(tx, node)
})
}
// SaveNodeRoutes takes a node and updates the database with
// the new routes.
// It returns a bool whether an update should be sent as the
// saved route impacts nodes.
func SaveNodeRoutes(tx *gorm.DB, node *types.Node) (bool, error) {
sendUpdate := false
currentRoutes := types.Routes{}
err := tx.Where("node_id = ?", node.ID).Find(&currentRoutes).Error
if err != nil {
return sendUpdate, err
}
advertisedRoutes := map[netip.Prefix]bool{}
for _, prefix := range node.Hostinfo.RoutableIPs {
advertisedRoutes[prefix] = false
}
log.Trace().
Str("node", node.Hostname).
Interface("advertisedRoutes", advertisedRoutes).
Interface("currentRoutes", currentRoutes).
Msg("updating routes")
for pos, route := range currentRoutes {
if _, ok := advertisedRoutes[netip.Prefix(route.Prefix)]; ok {
if !route.Advertised {
currentRoutes[pos].Advertised = true
err := tx.Save(&currentRoutes[pos]).Error
if err != nil {
return sendUpdate, err
}
// If a route that is newly "saved" is already
// enabled, set sendUpdate to true as it is now
// available.
if route.Enabled {
sendUpdate = true
}
}
advertisedRoutes[netip.Prefix(route.Prefix)] = true
} else if route.Advertised {
currentRoutes[pos].Advertised = false
currentRoutes[pos].Enabled = false
err := tx.Save(&currentRoutes[pos]).Error
if err != nil {
return sendUpdate, err
}
}
}
for prefix, exists := range advertisedRoutes {
if !exists {
route := types.Route{
NodeID: node.ID.Uint64(),
Prefix: prefix,
Advertised: true,
Enabled: false,
}
err := tx.Create(&route).Error
if err != nil {
return sendUpdate, err
}
}
}
return sendUpdate, nil
}
// FailoverNodeRoutesIfNecessary takes a node and checks if the node's route
// need to be failed over to another host.
// If needed, the failover will be attempted.
func FailoverNodeRoutesIfNecessary(
tx *gorm.DB,
isLikelyConnected *xsync.MapOf[types.NodeID, bool],
node *types.Node,
) (*types.StateUpdate, error) {
nodeRoutes, err := GetNodeRoutes(tx, node)
if err != nil {
return nil, nil
}
changedNodes := make(set.Set[types.NodeID])
nodeRouteLoop:
for _, nodeRoute := range nodeRoutes {
routes, err := getRoutesByPrefix(tx, netip.Prefix(nodeRoute.Prefix))
if err != nil {
return nil, fmt.Errorf("getting routes by prefix: %w", err)
}
for _, route := range routes {
if route.IsPrimary {
// if we have a primary route, and the node is connected
// nothing needs to be done.
if val, ok := isLikelyConnected.Load(route.Node.ID); ok && val {
continue nodeRouteLoop
}
// if not, we need to failover the route
failover := failoverRoute(isLikelyConnected, &route, routes)
if failover != nil {
err := failover.save(tx)
if err != nil {
return nil, fmt.Errorf("saving failover routes: %w", err)
}
changedNodes.Add(failover.old.Node.ID)
changedNodes.Add(failover.new.Node.ID)
continue nodeRouteLoop
}
}
}
}
chng := changedNodes.Slice()
sort.SliceStable(chng, func(i, j int) bool {
return chng[i] < chng[j]
})
if len(changedNodes) != 0 {
return &types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: chng,
Message: "called from db.FailoverNodeRoutesIfNecessary",
}, nil
}
return nil, nil
}
// failoverRouteTx takes a route that is no longer available,
// this can be either from:
// - being disabled
// - being deleted
// - host going offline
//
// and tries to find a new route to take over its place.
// If the given route was not primary, it returns early.
func failoverRouteTx(
tx *gorm.DB,
isLikelyConnected *xsync.MapOf[types.NodeID, bool],
r *types.Route,
) ([]types.NodeID, error) {
if r == nil {
return nil, nil
}
// This route is not a primary route, and it is not
// being served to nodes.
if !r.IsPrimary {
return nil, nil
}
// We do not have to failover exit nodes
if r.IsExitRoute() {
return nil, nil
}
routes, err := getRoutesByPrefix(tx, netip.Prefix(r.Prefix))
if err != nil {
return nil, fmt.Errorf("getting routes by prefix: %w", err)
}
fo := failoverRoute(isLikelyConnected, r, routes)
if fo == nil {
return nil, nil
}
err = fo.save(tx)
if err != nil {
return nil, fmt.Errorf("saving failover route: %w", err)
}
log.Trace().
Str("hostname", fo.new.Node.Hostname).
Msgf("set primary to new route, was: id(%d), host(%s), now: id(%d), host(%s)", fo.old.ID, fo.old.Node.Hostname, fo.new.ID, fo.new.Node.Hostname)
// Return a list of the machinekeys of the changed nodes.
return []types.NodeID{fo.old.Node.ID, fo.new.Node.ID}, nil
}
type failover struct {
old *types.Route
new *types.Route
}
func (f *failover) save(tx *gorm.DB) error {
err := tx.Save(f.old).Error
if err != nil {
return fmt.Errorf("saving old primary: %w", err)
}
err = tx.Save(f.new).Error
if err != nil {
return fmt.Errorf("saving new primary: %w", err)
}
return nil
}
func failoverRoute(
isLikelyConnected *xsync.MapOf[types.NodeID, bool],
routeToReplace *types.Route,
altRoutes types.Routes,
) *failover {
if routeToReplace == nil {
return nil
}
// This route is not a primary route, and it is not
// being served to nodes.
if !routeToReplace.IsPrimary {
return nil
}
// We do not have to failover exit nodes
if routeToReplace.IsExitRoute() {
return nil
}
var newPrimary *types.Route
// Find a new suitable route
for idx, route := range altRoutes {
if routeToReplace.ID == route.ID {
continue
}
if !route.Enabled {
continue
}
if isLikelyConnected != nil {
if val, ok := isLikelyConnected.Load(route.Node.ID); ok && val {
newPrimary = &altRoutes[idx]
break
}
}
}
// If a new route was not found/available,
// return without an error.
// We do not want to update the database as
// the one currently marked as primary is the
// best we got.
if newPrimary == nil {
return nil
}
routeToReplace.IsPrimary = false
newPrimary.IsPrimary = true
return &failover{
old: routeToReplace,
new: newPrimary,
}
}
func (hsdb *HSDatabase) EnableAutoApprovedRoutes(
polMan policy.PolicyManager,
node *types.Node,
) error {
return hsdb.Write(func(tx *gorm.DB) error {
return EnableAutoApprovedRoutes(tx, polMan, node)
})
}
// EnableAutoApprovedRoutes enables any routes advertised by a node that match the ACL autoApprovers policy.
func EnableAutoApprovedRoutes(
tx *gorm.DB,
polMan policy.PolicyManager,
node *types.Node,
) error {
if node.IPv4 == nil && node.IPv6 == nil {
return nil // This node has no IPAddresses, so can't possibly match any autoApprovers ACLs
}
routes, err := GetNodeAdvertisedRoutes(tx, node)
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
return fmt.Errorf("getting advertised routes for node(%s %d): %w", node.Hostname, node.ID, err)
}
log.Trace().Interface("routes", routes).Msg("routes for autoapproving")
var approvedRoutes types.Routes
for _, advertisedRoute := range routes {
if advertisedRoute.Enabled {
continue
}
routeApprovers := polMan.ApproversForRoute(netip.Prefix(advertisedRoute.Prefix))
log.Trace().
Str("node", node.Hostname).
Uint("user.id", node.User.ID).
Strs("routeApprovers", routeApprovers).
Str("prefix", netip.Prefix(advertisedRoute.Prefix).String()).
Msg("looking up route for autoapproving")
for _, approvedAlias := range routeApprovers {
if approvedAlias == node.User.Username() {
approvedRoutes = append(approvedRoutes, advertisedRoute)
} else {
// TODO(kradalby): figure out how to get this to depend on less stuff
approvedIps, err := polMan.ExpandAlias(approvedAlias)
if err != nil {
return fmt.Errorf("expanding alias %q for autoApprovers: %w", approvedAlias, err)
}
// approvedIPs should contain all of node's IPs if it matches the rule, so check for first
if approvedIps.Contains(*node.IPv4) {
approvedRoutes = append(approvedRoutes, advertisedRoute)
}
}
}
}
for _, approvedRoute := range approvedRoutes {
_, err := EnableRoute(tx, uint64(approvedRoute.ID))
if err != nil {
return fmt.Errorf("enabling approved route(%d): %w", approvedRoute.ID, err)
}
}
return nil
}

1233
hscontrol/db/routes_test.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -199,18 +199,19 @@ func ListNodesByUser(tx *gorm.DB, uid types.UserID) (types.Nodes, error) {
return nodes, nil
}
func (hsdb *HSDatabase) AssignNodeToUser(node *types.Node, uid types.UserID) error {
return hsdb.Write(func(tx *gorm.DB) error {
return AssignNodeToUser(tx, node, uid)
})
}
// AssignNodeToUser assigns a Node to a user.
func AssignNodeToUser(tx *gorm.DB, nodeID types.NodeID, uid types.UserID) error {
node, err := GetNodeByID(tx, nodeID)
if err != nil {
return err
}
func AssignNodeToUser(tx *gorm.DB, node *types.Node, uid types.UserID) error {
user, err := GetUserByID(tx, uid)
if err != nil {
return err
}
node.User = *user
node.UserID = user.ID
if result := tx.Save(&node); result.Error != nil {
return result.Error
}

View File

@@ -108,7 +108,7 @@ func (s *Suite) TestSetMachineUser(c *check.C) {
c.Assert(err, check.IsNil)
node := types.Node{
ID: 12,
ID: 0,
Hostname: "testnode",
UserID: oldUser.ID,
RegisterMethod: util.RegisterMethodAuthKey,
@@ -118,28 +118,16 @@ func (s *Suite) TestSetMachineUser(c *check.C) {
c.Assert(trx.Error, check.IsNil)
c.Assert(node.UserID, check.Equals, oldUser.ID)
err = db.Write(func(tx *gorm.DB) error {
return AssignNodeToUser(tx, 12, types.UserID(newUser.ID))
})
err = db.AssignNodeToUser(&node, types.UserID(newUser.ID))
c.Assert(err, check.IsNil)
// Reload node from database to see updated values
updatedNode, err := db.GetNodeByID(12)
c.Assert(err, check.IsNil)
c.Assert(updatedNode.UserID, check.Equals, newUser.ID)
c.Assert(updatedNode.User.Name, check.Equals, newUser.Name)
c.Assert(node.UserID, check.Equals, newUser.ID)
c.Assert(node.User.Name, check.Equals, newUser.Name)
err = db.Write(func(tx *gorm.DB) error {
return AssignNodeToUser(tx, 12, 9584849)
})
err = db.AssignNodeToUser(&node, 9584849)
c.Assert(err, check.Equals, ErrUserNotFound)
err = db.Write(func(tx *gorm.DB) error {
return AssignNodeToUser(tx, 12, types.UserID(newUser.ID))
})
err = db.AssignNodeToUser(&node, types.UserID(newUser.ID))
c.Assert(err, check.IsNil)
// Reload node from database again to see updated values
finalNode, err := db.GetNodeByID(12)
c.Assert(err, check.IsNil)
c.Assert(finalNode.UserID, check.Equals, newUser.ID)
c.Assert(finalNode.User.Name, check.Equals, newUser.Name)
c.Assert(node.UserID, check.Equals, newUser.ID)
c.Assert(node.User.Name, check.Equals, newUser.Name)
}

View File

@@ -1,144 +0,0 @@
package hscontrol
import (
"encoding/json"
"fmt"
"net/http"
"os"
"github.com/arl/statsviz"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/prometheus/client_golang/prometheus/promhttp"
"tailscale.com/tailcfg"
"tailscale.com/tsweb"
)
func (h *Headscale) debugHTTPServer() *http.Server {
debugMux := http.NewServeMux()
debug := tsweb.Debugger(debugMux)
debug.Handle("notifier", "Connected nodes in notifier", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(h.nodeNotifier.String()))
}))
debug.Handle("config", "Current configuration", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
config, err := json.MarshalIndent(h.cfg, "", " ")
if err != nil {
httpError(w, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(config)
}))
debug.Handle("policy", "Current policy", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch h.cfg.Policy.Mode {
case types.PolicyModeDB:
p, err := h.state.GetPolicy()
if err != nil {
httpError(w, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(p.Data))
case types.PolicyModeFile:
// Read the file directly for debug purposes
absPath := util.AbsolutePathFromConfigPath(h.cfg.Policy.Path)
pol, err := os.ReadFile(absPath)
if err != nil {
httpError(w, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(pol)
default:
httpError(w, fmt.Errorf("unsupported policy mode: %s", h.cfg.Policy.Mode))
}
}))
debug.Handle("filter", "Current filter", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
filter, _ := h.state.Filter()
filterJSON, err := json.MarshalIndent(filter, "", " ")
if err != nil {
httpError(w, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(filterJSON)
}))
debug.Handle("ssh", "SSH Policy per node", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
nodes, err := h.state.ListNodes()
if err != nil {
httpError(w, err)
return
}
sshPol := make(map[string]*tailcfg.SSHPolicy)
for _, node := range nodes {
pol, err := h.state.SSHPolicy(node)
if err != nil {
httpError(w, err)
return
}
sshPol[fmt.Sprintf("id:%d hostname:%s givenname:%s", node.ID, node.Hostname, node.GivenName)] = pol
}
sshJSON, err := json.MarshalIndent(sshPol, "", " ")
if err != nil {
httpError(w, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(sshJSON)
}))
debug.Handle("derpmap", "Current DERPMap", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
dm := h.state.DERPMap()
dmJSON, err := json.MarshalIndent(dm, "", " ")
if err != nil {
httpError(w, err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(dmJSON)
}))
debug.Handle("registration-cache", "Pending registrations", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// TODO(kradalby): This should be replaced with a proper state method that returns registration info
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte("{}")) // For now, return empty object
}))
debug.Handle("routes", "Routes", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain")
w.WriteHeader(http.StatusOK)
w.Write([]byte(h.state.PrimaryRoutesString()))
}))
debug.Handle("policy-manager", "Policy Manager", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain")
w.WriteHeader(http.StatusOK)
w.Write([]byte(h.state.PolicyDebugString()))
}))
err := statsviz.Register(debugMux)
if err == nil {
debug.URL("/debug/statsviz", "Statsviz (visualise go metrics)")
}
debug.URL("/metrics", "Prometheus metrics")
debugMux.Handle("/metrics", promhttp.Handler())
debugHTTPServer := &http.Server{
Addr: h.cfg.MetricsAddr,
Handler: debugMux,
ReadTimeout: types.HTTPTimeout,
WriteTimeout: 0,
}
return debugHTTPServer
}

View File

@@ -2,11 +2,9 @@ package server
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"net/netip"
@@ -30,10 +28,7 @@ import (
// server that the DERP HTTP client does not want the HTTP 101 response
// headers and it will begin writing & reading the DERP protocol immediately
// following its HTTP request.
const (
fastStartHeader = "Derp-Fast-Start"
DerpVerifyScheme = "headscale-derp-verify"
)
const fastStartHeader = "Derp-Fast-Start"
type DERPServer struct {
serverURL string
@@ -50,11 +45,6 @@ func NewDERPServer(
log.Trace().Caller().Msg("Creating new embedded DERP server")
server := derp.NewServer(derpKey, util.TSLogfWrapper()) // nolint // zerolinter complains
if cfg.ServerVerifyClients {
server.SetVerifyClientURL(DerpVerifyScheme + "://verify")
server.SetVerifyClientURLFailOpen(false)
}
return &DERPServer{
serverURL: serverURL,
key: derpKey,
@@ -370,29 +360,3 @@ func serverSTUNListener(ctx context.Context, packetConn *net.UDPConn) {
}
}
}
func NewDERPVerifyTransport(handleVerifyRequest func(*http.Request, io.Writer) error) *DERPVerifyTransport {
return &DERPVerifyTransport{
handleVerifyRequest: handleVerifyRequest,
}
}
type DERPVerifyTransport struct {
handleVerifyRequest func(*http.Request, io.Writer) error
}
func (t *DERPVerifyTransport) RoundTrip(req *http.Request) (*http.Response, error) {
buf := new(bytes.Buffer)
if err := t.handleVerifyRequest(req, buf); err != nil {
log.Error().Caller().Err(err).Msg("Failed to handle client verify request: ")
return nil, err
}
resp := &http.Response{
StatusCode: http.StatusOK,
Body: io.NopCloser(buf),
}
return resp, nil
}

View File

@@ -1,14 +1,13 @@
package dns
import (
"context"
"crypto/sha256"
"encoding/json"
"fmt"
"os"
"sync"
"github.com/cenkalti/backoff/v5"
"github.com/cenkalti/backoff/v4"
"github.com/fsnotify/fsnotify"
"github.com/rs/zerolog/log"
"tailscale.com/tailcfg"
@@ -96,13 +95,13 @@ func (e *ExtraRecordsMan) Run() {
// If a file is removed or renamed, fsnotify will loose track of it
// and not watch it. We will therefore attempt to re-add it with a backoff.
case fsnotify.Remove, fsnotify.Rename:
_, err := backoff.Retry(context.Background(), func() (struct{}, error) {
err := backoff.Retry(func() error {
if _, err := os.Stat(e.path); err != nil {
return struct{}{}, err
return err
}
return struct{}{}, nil
}, backoff.WithBackOff(backoff.NewExponentialBackOff()))
return nil
}, backoff.NewExponentialBackOff())
if err != nil {
log.Error().Caller().Err(err).Msgf("extra records filewatcher retrying to find file after delete")

View File

@@ -6,26 +6,24 @@ import (
"errors"
"fmt"
"io"
"net/netip"
"os"
"slices"
"sort"
"strings"
"time"
"github.com/puzpuzpuz/xsync/v4"
"github.com/puzpuzpuz/xsync/v3"
"github.com/rs/zerolog/log"
"github.com/samber/lo"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/timestamppb"
"gorm.io/gorm"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"github.com/juanfont/headscale/hscontrol/state"
"github.com/juanfont/headscale/hscontrol/db"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
)
@@ -51,15 +49,14 @@ func (api headscaleV1APIServer) CreateUser(
Email: request.GetEmail(),
ProfilePicURL: request.GetPictureUrl(),
}
user, policyChanged, err := api.h.state.CreateUser(newUser)
user, err := api.h.db.CreateUser(newUser)
if err != nil {
return nil, status.Errorf(codes.Internal, "failed to create user: %s", err)
return nil, err
}
// Send policy update notifications if needed
if policyChanged {
ctx := types.NotifyCtx(context.Background(), "grpc-user-created", user.Name)
api.h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
err = usersChangedHook(api.h.db, api.h.polMan, api.h.nodeNotifier)
if err != nil {
return nil, fmt.Errorf("updating resources using user: %w", err)
}
return &v1.CreateUserResponse{User: user.Proto()}, nil
@@ -69,23 +66,17 @@ func (api headscaleV1APIServer) RenameUser(
ctx context.Context,
request *v1.RenameUserRequest,
) (*v1.RenameUserResponse, error) {
oldUser, err := api.h.state.GetUserByID(types.UserID(request.GetOldId()))
oldUser, err := api.h.db.GetUserByID(types.UserID(request.GetOldId()))
if err != nil {
return nil, err
}
_, policyChanged, err := api.h.state.RenameUser(types.UserID(oldUser.ID), request.GetNewName())
err = api.h.db.RenameUser(types.UserID(oldUser.ID), request.GetNewName())
if err != nil {
return nil, err
}
// Send policy update notifications if needed
if policyChanged {
ctx := types.NotifyCtx(context.Background(), "grpc-user-renamed", request.GetNewName())
api.h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
}
newUser, err := api.h.state.GetUserByName(request.GetNewName())
newUser, err := api.h.db.GetUserByName(request.GetNewName())
if err != nil {
return nil, err
}
@@ -97,16 +88,21 @@ func (api headscaleV1APIServer) DeleteUser(
ctx context.Context,
request *v1.DeleteUserRequest,
) (*v1.DeleteUserResponse, error) {
user, err := api.h.state.GetUserByID(types.UserID(request.GetId()))
user, err := api.h.db.GetUserByID(types.UserID(request.GetId()))
if err != nil {
return nil, err
}
err = api.h.state.DeleteUser(types.UserID(user.ID))
err = api.h.db.DestroyUser(types.UserID(user.ID))
if err != nil {
return nil, err
}
err = usersChangedHook(api.h.db, api.h.polMan, api.h.nodeNotifier)
if err != nil {
return nil, fmt.Errorf("updating resources using user: %w", err)
}
return &v1.DeleteUserResponse{}, nil
}
@@ -119,13 +115,13 @@ func (api headscaleV1APIServer) ListUsers(
switch {
case request.GetName() != "":
users, err = api.h.state.ListUsersWithFilter(&types.User{Name: request.GetName()})
users, err = api.h.db.ListUsers(&types.User{Name: request.GetName()})
case request.GetEmail() != "":
users, err = api.h.state.ListUsersWithFilter(&types.User{Email: request.GetEmail()})
users, err = api.h.db.ListUsers(&types.User{Email: request.GetEmail()})
case request.GetId() != 0:
users, err = api.h.state.ListUsersWithFilter(&types.User{Model: gorm.Model{ID: uint(request.GetId())}})
users, err = api.h.db.ListUsers(&types.User{Model: gorm.Model{ID: uint(request.GetId())}})
default:
users, err = api.h.state.ListAllUsers()
users, err = api.h.db.ListUsers()
}
if err != nil {
return nil, err
@@ -161,12 +157,12 @@ func (api headscaleV1APIServer) CreatePreAuthKey(
}
}
user, err := api.h.state.GetUserByID(types.UserID(request.GetUser()))
user, err := api.h.db.GetUserByName(request.GetUser())
if err != nil {
return nil, err
}
preAuthKey, err := api.h.state.CreatePreAuthKey(
preAuthKey, err := api.h.db.CreatePreAuthKey(
types.UserID(user.ID),
request.GetReusable(),
request.GetEphemeral(),
@@ -184,16 +180,18 @@ func (api headscaleV1APIServer) ExpirePreAuthKey(
ctx context.Context,
request *v1.ExpirePreAuthKeyRequest,
) (*v1.ExpirePreAuthKeyResponse, error) {
preAuthKey, err := api.h.state.GetPreAuthKey(request.Key)
if err != nil {
return nil, err
}
err := api.h.db.Write(func(tx *gorm.DB) error {
preAuthKey, err := db.GetPreAuthKey(tx, request.Key)
if err != nil {
return err
}
if uint64(preAuthKey.User.ID) != request.GetUser() {
return nil, fmt.Errorf("preauth key does not belong to user")
}
if preAuthKey.User.Name != request.GetUser() {
return fmt.Errorf("preauth key does not belong to user")
}
err = api.h.state.ExpirePreAuthKey(preAuthKey)
return db.ExpirePreAuthKey(tx, preAuthKey)
})
if err != nil {
return nil, err
}
@@ -205,12 +203,12 @@ func (api headscaleV1APIServer) ListPreAuthKeys(
ctx context.Context,
request *v1.ListPreAuthKeysRequest,
) (*v1.ListPreAuthKeysResponse, error) {
user, err := api.h.state.GetUserByID(types.UserID(request.GetUser()))
user, err := api.h.db.GetUserByName(request.GetUser())
if err != nil {
return nil, err
}
preAuthKeys, err := api.h.state.ListPreAuthKeys(types.UserID(user.ID))
preAuthKeys, err := api.h.db.ListPreAuthKeys(types.UserID(user.ID))
if err != nil {
return nil, err
}
@@ -241,47 +239,37 @@ func (api headscaleV1APIServer) RegisterNode(
return nil, err
}
user, err := api.h.state.GetUserByName(request.GetUser())
ipv4, ipv6, err := api.h.ipAlloc.Next()
if err != nil {
return nil, err
}
user, err := api.h.db.GetUserByName(request.GetUser())
if err != nil {
return nil, fmt.Errorf("looking up user: %w", err)
}
node, _, err := api.h.state.HandleNodeFromAuthPath(
node, _, err := api.h.db.HandleNodeFromAuthPath(
registrationId,
types.UserID(user.ID),
nil,
util.RegisterMethodCLI,
ipv4, ipv6,
)
if err != nil {
return nil, err
}
// This is a bit of a back and forth, but we have a bit of a chicken and egg
// dependency here.
// Because the way the policy manager works, we need to have the node
// in the database, then add it to the policy manager and then we can
// approve the route. This means we get this dance where the node is
// first added to the database, then we add it to the policy manager via
// SaveNode (which automatically updates the policy manager) and then we can auto approve the routes.
// As that only approves the struct object, we need to save it again and
// ensure we send an update.
// This works, but might be another good candidate for doing some sort of
// eventbus.
routesChanged := api.h.state.AutoApproveRoutes(node)
_, policyChanged, err := api.h.state.SaveNode(node)
updateSent, err := nodesChangedHook(api.h.db, api.h.polMan, api.h.nodeNotifier)
if err != nil {
return nil, fmt.Errorf("saving auto approved routes to node: %w", err)
return nil, fmt.Errorf("updating resources using node: %w", err)
}
// Send policy update notifications if needed (from SaveNode or route changes)
if policyChanged {
ctx := types.NotifyCtx(context.Background(), "grpc-nodes-change", "all")
api.h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
}
if routesChanged {
if !updateSent {
ctx = types.NotifyCtx(context.Background(), "web-node-login", node.Hostname)
api.h.nodeNotifier.NotifyAll(ctx, types.UpdatePeerChanged(node.ID))
api.h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: []types.NodeID{node.ID},
})
}
return &v1.RegisterNodeResponse{Node: node.Proto()}, nil
@@ -291,7 +279,7 @@ func (api headscaleV1APIServer) GetNode(
ctx context.Context,
request *v1.GetNodeRequest,
) (*v1.GetNodeResponse, error) {
node, err := api.h.state.GetNodeByID(types.NodeID(request.GetNodeId()))
node, err := api.h.db.GetNodeByID(types.NodeID(request.GetNodeId()))
if err != nil {
return nil, err
}
@@ -316,21 +304,26 @@ func (api headscaleV1APIServer) SetTags(
}
}
node, policyChanged, err := api.h.state.SetNodeTags(types.NodeID(request.GetNodeId()), request.GetTags())
node, err := db.Write(api.h.db.DB, func(tx *gorm.DB) (*types.Node, error) {
err := db.SetTags(tx, types.NodeID(request.GetNodeId()), request.GetTags())
if err != nil {
return nil, err
}
return db.GetNodeByID(tx, types.NodeID(request.GetNodeId()))
})
if err != nil {
return &v1.SetTagsResponse{
Node: nil,
}, status.Error(codes.InvalidArgument, err.Error())
}
// Send policy update notifications if needed
if policyChanged {
ctx := types.NotifyCtx(context.Background(), "grpc-node-tags", node.Hostname)
api.h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
}
ctx = types.NotifyCtx(ctx, "cli-settags", node.Hostname)
api.h.nodeNotifier.NotifyWithIgnore(ctx, types.UpdatePeerChanged(node.ID), node.ID)
api.h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: []types.NodeID{node.ID},
Message: "called from api.SetTags",
}, node.ID)
log.Trace().
Str("node", node.Hostname).
@@ -340,53 +333,6 @@ func (api headscaleV1APIServer) SetTags(
return &v1.SetTagsResponse{Node: node.Proto()}, nil
}
func (api headscaleV1APIServer) SetApprovedRoutes(
ctx context.Context,
request *v1.SetApprovedRoutesRequest,
) (*v1.SetApprovedRoutesResponse, error) {
var routes []netip.Prefix
for _, route := range request.GetRoutes() {
prefix, err := netip.ParsePrefix(route)
if err != nil {
return nil, fmt.Errorf("parsing route: %w", err)
}
// If the prefix is an exit route, add both. The client expect both
// to annotate the node as an exit node.
if prefix == tsaddr.AllIPv4() || prefix == tsaddr.AllIPv6() {
routes = append(routes, tsaddr.AllIPv4(), tsaddr.AllIPv6())
} else {
routes = append(routes, prefix)
}
}
tsaddr.SortPrefixes(routes)
routes = slices.Compact(routes)
node, policyChanged, err := api.h.state.SetApprovedRoutes(types.NodeID(request.GetNodeId()), routes)
if err != nil {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
// Send policy update notifications if needed
if policyChanged {
ctx := types.NotifyCtx(context.Background(), "grpc-routes-approved", node.Hostname)
api.h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
}
if api.h.state.SetNodeRoutes(node.ID, node.SubnetRoutes()...) {
ctx := types.NotifyCtx(ctx, "poll-primary-change", node.Hostname)
api.h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
} else {
ctx = types.NotifyCtx(ctx, "cli-approveroutes", node.Hostname)
api.h.nodeNotifier.NotifyWithIgnore(ctx, types.UpdatePeerChanged(node.ID), node.ID)
}
proto := node.Proto()
proto.SubnetRoutes = util.PrefixesToString(api.h.state.GetNodePrimaryRoutes(node.ID))
return &v1.SetApprovedRoutesResponse{Node: proto}, nil
}
func validateTag(tag string) error {
if strings.Index(tag, "tag:") != 0 {
return errors.New("tag must start with the string 'tag:'")
@@ -404,24 +350,31 @@ func (api headscaleV1APIServer) DeleteNode(
ctx context.Context,
request *v1.DeleteNodeRequest,
) (*v1.DeleteNodeResponse, error) {
node, err := api.h.state.GetNodeByID(types.NodeID(request.GetNodeId()))
node, err := api.h.db.GetNodeByID(types.NodeID(request.GetNodeId()))
if err != nil {
return nil, err
}
policyChanged, err := api.h.state.DeleteNode(node)
changedNodes, err := api.h.db.DeleteNode(
node,
api.h.nodeNotifier.LikelyConnectedMap(),
)
if err != nil {
return nil, err
}
// Send policy update notifications if needed
if policyChanged {
ctx := types.NotifyCtx(context.Background(), "grpc-node-deleted", node.Hostname)
api.h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
}
ctx = types.NotifyCtx(ctx, "cli-deletenode", node.Hostname)
api.h.nodeNotifier.NotifyAll(ctx, types.UpdatePeerRemoved(node.ID))
api.h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
Type: types.StatePeerRemoved,
Removed: []types.NodeID{node.ID},
})
if changedNodes != nil {
api.h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: changedNodes,
})
}
return &v1.DeleteNodeResponse{}, nil
}
@@ -432,25 +385,30 @@ func (api headscaleV1APIServer) ExpireNode(
) (*v1.ExpireNodeResponse, error) {
now := time.Now()
node, policyChanged, err := api.h.state.SetNodeExpiry(types.NodeID(request.GetNodeId()), now)
node, err := db.Write(api.h.db.DB, func(tx *gorm.DB) (*types.Node, error) {
db.NodeSetExpiry(
tx,
types.NodeID(request.GetNodeId()),
now,
)
return db.GetNodeByID(tx, types.NodeID(request.GetNodeId()))
})
if err != nil {
return nil, err
}
// Send policy update notifications if needed
if policyChanged {
ctx := types.NotifyCtx(context.Background(), "grpc-node-expired", node.Hostname)
api.h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
}
ctx = types.NotifyCtx(ctx, "cli-expirenode-self", node.Hostname)
api.h.nodeNotifier.NotifyByNodeID(
ctx,
types.UpdateSelf(node.ID),
types.StateUpdate{
Type: types.StateSelfUpdate,
ChangeNodes: []types.NodeID{node.ID},
},
node.ID)
ctx = types.NotifyCtx(ctx, "cli-expirenode-peers", node.Hostname)
api.h.nodeNotifier.NotifyWithIgnore(ctx, types.UpdateExpire(node.ID, now), node.ID)
api.h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdateExpire(node.ID, now), node.ID)
log.Trace().
Str("node", node.Hostname).
@@ -464,19 +422,28 @@ func (api headscaleV1APIServer) RenameNode(
ctx context.Context,
request *v1.RenameNodeRequest,
) (*v1.RenameNodeResponse, error) {
node, policyChanged, err := api.h.state.RenameNode(types.NodeID(request.GetNodeId()), request.GetNewName())
node, err := db.Write(api.h.db.DB, func(tx *gorm.DB) (*types.Node, error) {
err := db.RenameNode(
tx,
types.NodeID(request.GetNodeId()),
request.GetNewName(),
)
if err != nil {
return nil, err
}
return db.GetNodeByID(tx, types.NodeID(request.GetNodeId()))
})
if err != nil {
return nil, err
}
// Send policy update notifications if needed
if policyChanged {
ctx := types.NotifyCtx(context.Background(), "grpc-node-renamed", node.Hostname)
api.h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
}
ctx = types.NotifyCtx(ctx, "cli-renamenode", node.Hostname)
api.h.nodeNotifier.NotifyWithIgnore(ctx, types.UpdatePeerChanged(node.ID), node.ID)
api.h.nodeNotifier.NotifyWithIgnore(ctx, types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: []types.NodeID{node.ID},
Message: "called from api.RenameNode",
}, node.ID)
log.Trace().
Str("node", node.Hostname).
@@ -497,21 +464,23 @@ func (api headscaleV1APIServer) ListNodes(
isLikelyConnected := api.h.nodeNotifier.LikelyConnectedMap()
if request.GetUser() != "" {
user, err := api.h.state.GetUserByName(request.GetUser())
user, err := api.h.db.GetUserByName(request.GetUser())
if err != nil {
return nil, err
}
nodes, err := api.h.state.ListNodesByUser(types.UserID(user.ID))
nodes, err := db.Read(api.h.db.DB, func(rx *gorm.DB) (types.Nodes, error) {
return db.ListNodesByUser(rx, types.UserID(user.ID))
})
if err != nil {
return nil, err
}
response := nodesToProto(api.h.state, isLikelyConnected, nodes)
response := nodesToProto(api.h.polMan, isLikelyConnected, nodes)
return &v1.ListNodesResponse{Nodes: response}, nil
}
nodes, err := api.h.state.ListNodes()
nodes, err := api.h.db.ListNodes()
if err != nil {
return nil, err
}
@@ -520,11 +489,11 @@ func (api headscaleV1APIServer) ListNodes(
return nodes[i].ID < nodes[j].ID
})
response := nodesToProto(api.h.state, isLikelyConnected, nodes)
response := nodesToProto(api.h.polMan, isLikelyConnected, nodes)
return &v1.ListNodesResponse{Nodes: response}, nil
}
func nodesToProto(state *state.State, isLikelyConnected *xsync.MapOf[types.NodeID, bool], nodes types.Nodes) []*v1.Node {
func nodesToProto(polMan policy.PolicyManager, isLikelyConnected *xsync.MapOf[types.NodeID, bool], nodes types.Nodes) []*v1.Node {
response := make([]*v1.Node, len(nodes))
for index, node := range nodes {
resp := node.Proto()
@@ -535,14 +504,8 @@ func nodesToProto(state *state.State, isLikelyConnected *xsync.MapOf[types.NodeI
resp.Online = true
}
var tags []string
for _, tag := range node.RequestTags() {
if state.NodeCanHaveTag(node, tag) {
tags = append(tags, tag)
}
}
tags := polMan.Tags(node)
resp.ValidTags = lo.Uniq(append(tags, node.ForcedTags...))
resp.SubnetRoutes = util.PrefixesToString(append(state.GetNodePrimaryRoutes(node.ID), node.ExitRoutes()...))
response[index] = resp
}
@@ -553,24 +516,21 @@ func (api headscaleV1APIServer) MoveNode(
ctx context.Context,
request *v1.MoveNodeRequest,
) (*v1.MoveNodeResponse, error) {
node, policyChanged, err := api.h.state.AssignNodeToUser(types.NodeID(request.GetNodeId()), types.UserID(request.GetUser()))
// TODO(kradalby): This should be done in one tx.
node, err := api.h.db.GetNodeByID(types.NodeID(request.GetNodeId()))
if err != nil {
return nil, err
}
// Send policy update notifications if needed
if policyChanged {
ctx := types.NotifyCtx(context.Background(), "grpc-node-moved", node.Hostname)
api.h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
user, err := api.h.db.GetUserByName(request.GetUser())
if err != nil {
return nil, err
}
ctx = types.NotifyCtx(ctx, "cli-movenode-self", node.Hostname)
api.h.nodeNotifier.NotifyByNodeID(
ctx,
types.UpdateSelf(node.ID),
node.ID)
ctx = types.NotifyCtx(ctx, "cli-movenode", node.Hostname)
api.h.nodeNotifier.NotifyWithIgnore(ctx, types.UpdatePeerChanged(node.ID), node.ID)
err = api.h.db.AssignNodeToUser(node, types.UserID(user.ID))
if err != nil {
return nil, err
}
return &v1.MoveNodeResponse{Node: node.Proto()}, nil
}
@@ -585,7 +545,7 @@ func (api headscaleV1APIServer) BackfillNodeIPs(
return nil, errors.New("not confirmed, aborting")
}
changes, err := api.h.state.BackfillNodeIPs()
changes, err := api.h.db.BackfillNodeIPs(api.h.ipAlloc)
if err != nil {
return nil, err
}
@@ -593,6 +553,106 @@ func (api headscaleV1APIServer) BackfillNodeIPs(
return &v1.BackfillNodeIPsResponse{Changes: changes}, nil
}
func (api headscaleV1APIServer) GetRoutes(
ctx context.Context,
request *v1.GetRoutesRequest,
) (*v1.GetRoutesResponse, error) {
routes, err := db.Read(api.h.db.DB, func(rx *gorm.DB) (types.Routes, error) {
return db.GetRoutes(rx)
})
if err != nil {
return nil, err
}
return &v1.GetRoutesResponse{
Routes: types.Routes(routes).Proto(),
}, nil
}
func (api headscaleV1APIServer) EnableRoute(
ctx context.Context,
request *v1.EnableRouteRequest,
) (*v1.EnableRouteResponse, error) {
update, err := db.Write(api.h.db.DB, func(tx *gorm.DB) (*types.StateUpdate, error) {
return db.EnableRoute(tx, request.GetRouteId())
})
if err != nil {
return nil, err
}
if update != nil {
ctx := types.NotifyCtx(ctx, "cli-enableroute", "unknown")
api.h.nodeNotifier.NotifyAll(
ctx, *update)
}
return &v1.EnableRouteResponse{}, nil
}
func (api headscaleV1APIServer) DisableRoute(
ctx context.Context,
request *v1.DisableRouteRequest,
) (*v1.DisableRouteResponse, error) {
update, err := db.Write(api.h.db.DB, func(tx *gorm.DB) ([]types.NodeID, error) {
return db.DisableRoute(tx, request.GetRouteId(), api.h.nodeNotifier.LikelyConnectedMap())
})
if err != nil {
return nil, err
}
if update != nil {
ctx := types.NotifyCtx(ctx, "cli-disableroute", "unknown")
api.h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: update,
})
}
return &v1.DisableRouteResponse{}, nil
}
func (api headscaleV1APIServer) GetNodeRoutes(
ctx context.Context,
request *v1.GetNodeRoutesRequest,
) (*v1.GetNodeRoutesResponse, error) {
node, err := api.h.db.GetNodeByID(types.NodeID(request.GetNodeId()))
if err != nil {
return nil, err
}
routes, err := api.h.db.GetNodeRoutes(node)
if err != nil {
return nil, err
}
return &v1.GetNodeRoutesResponse{
Routes: types.Routes(routes).Proto(),
}, nil
}
func (api headscaleV1APIServer) DeleteRoute(
ctx context.Context,
request *v1.DeleteRouteRequest,
) (*v1.DeleteRouteResponse, error) {
isConnected := api.h.nodeNotifier.LikelyConnectedMap()
update, err := db.Write(api.h.db.DB, func(tx *gorm.DB) ([]types.NodeID, error) {
return db.DeleteRoute(tx, request.GetRouteId(), isConnected)
})
if err != nil {
return nil, err
}
if update != nil {
ctx := types.NotifyCtx(ctx, "cli-deleteroute", "unknown")
api.h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: update,
})
}
return &v1.DeleteRouteResponse{}, nil
}
func (api headscaleV1APIServer) CreateApiKey(
ctx context.Context,
request *v1.CreateApiKeyRequest,
@@ -602,7 +662,9 @@ func (api headscaleV1APIServer) CreateApiKey(
expiration = request.GetExpiration().AsTime()
}
apiKey, _, err := api.h.state.CreateAPIKey(&expiration)
apiKey, _, err := api.h.db.CreateAPIKey(
&expiration,
)
if err != nil {
return nil, err
}
@@ -617,12 +679,12 @@ func (api headscaleV1APIServer) ExpireApiKey(
var apiKey *types.APIKey
var err error
apiKey, err = api.h.state.GetAPIKey(request.Prefix)
apiKey, err = api.h.db.GetAPIKey(request.Prefix)
if err != nil {
return nil, err
}
err = api.h.state.ExpireAPIKey(apiKey)
err = api.h.db.ExpireAPIKey(apiKey)
if err != nil {
return nil, err
}
@@ -634,7 +696,7 @@ func (api headscaleV1APIServer) ListApiKeys(
ctx context.Context,
request *v1.ListApiKeysRequest,
) (*v1.ListApiKeysResponse, error) {
apiKeys, err := api.h.state.ListAPIKeys()
apiKeys, err := api.h.db.ListAPIKeys()
if err != nil {
return nil, err
}
@@ -660,12 +722,12 @@ func (api headscaleV1APIServer) DeleteApiKey(
err error
)
apiKey, err = api.h.state.GetAPIKey(request.Prefix)
apiKey, err = api.h.db.GetAPIKey(request.Prefix)
if err != nil {
return nil, err
}
if err := api.h.state.DestroyAPIKey(*apiKey); err != nil {
if err := api.h.db.DestroyAPIKey(*apiKey); err != nil {
return nil, err
}
@@ -678,7 +740,7 @@ func (api headscaleV1APIServer) GetPolicy(
) (*v1.GetPolicyResponse, error) {
switch api.h.cfg.Policy.Mode {
case types.PolicyModeDB:
p, err := api.h.state.GetPolicy()
p, err := api.h.db.GetPolicy()
if err != nil {
return nil, fmt.Errorf("loading ACL from database: %w", err)
}
@@ -723,36 +785,33 @@ func (api headscaleV1APIServer) SetPolicy(
// a scenario where they might be allowed if the server has no nodes
// yet, but it should help for the general case and for hot reloading
// configurations.
nodes, err := api.h.state.ListNodes()
nodes, err := api.h.db.ListNodes()
if err != nil {
return nil, fmt.Errorf("loading nodes from database to validate policy: %w", err)
}
changed, err := api.h.state.SetPolicy([]byte(p))
changed, err := api.h.polMan.SetPolicy([]byte(p))
if err != nil {
return nil, fmt.Errorf("setting policy: %w", err)
}
if len(nodes) > 0 {
_, err = api.h.state.SSHPolicy(nodes[0])
_, err = api.h.polMan.SSHPolicy(nodes[0])
if err != nil {
return nil, fmt.Errorf("verifying SSH rules: %w", err)
}
}
updated, err := api.h.state.SetPolicyInDB(p)
updated, err := api.h.db.SetPolicy(p)
if err != nil {
return nil, err
}
// Only send update if the packet filter has changed.
if changed {
err = api.h.state.AutoApproveNodes()
if err != nil {
return nil, err
}
ctx := types.NotifyCtx(context.Background(), "acl-update", "na")
api.h.nodeNotifier.NotifyAll(ctx, types.UpdateFull())
api.h.nodeNotifier.NotifyAll(ctx, types.StateUpdate{
Type: types.StateFullUpdate,
})
}
response := &v1.SetPolicyResponse{
@@ -768,7 +827,7 @@ func (api headscaleV1APIServer) DebugCreateNode(
ctx context.Context,
request *v1.DebugCreateNodeRequest,
) (*v1.DebugCreateNodeResponse, error) {
user, err := api.h.state.GetUserByName(request.GetUser())
user, err := api.h.db.GetUserByName(request.GetUser())
if err != nil {
return nil, err
}
@@ -807,14 +866,17 @@ func (api headscaleV1APIServer) DebugCreateNode(
Hostinfo: &hostinfo,
},
Registered: make(chan *types.Node),
Registered: make(chan struct{}),
}
log.Debug().
Str("registration_id", registrationId.String()).
Msg("adding debug machine via CLI, appending to registration cache")
api.h.state.SetRegistrationCacheEntry(registrationId, newNode)
api.h.registrationCache.Set(
registrationId,
newNode,
)
return &v1.DebugCreateNodeResponse{Node: newNode.Node.Proto()}, nil
}

View File

@@ -81,33 +81,28 @@ func parseCabailityVersion(req *http.Request) (tailcfg.CapabilityVersion, error)
return tailcfg.CapabilityVersion(clientCapabilityVersion), nil
}
func (h *Headscale) handleVerifyRequest(
func (h *Headscale) derpRequestIsAllowed(
req *http.Request,
writer io.Writer,
) error {
) (bool, error) {
body, err := io.ReadAll(req.Body)
if err != nil {
return fmt.Errorf("cannot read request body: %w", err)
return false, fmt.Errorf("cannot read request body: %w", err)
}
var derpAdmitClientRequest tailcfg.DERPAdmitClientRequest
if err := json.Unmarshal(body, &derpAdmitClientRequest); err != nil {
return fmt.Errorf("cannot parse derpAdmitClientRequest: %w", err)
return false, fmt.Errorf("cannot parse derpAdmitClientRequest: %w", err)
}
nodes, err := h.state.ListNodes()
nodes, err := h.db.ListNodes()
if err != nil {
return fmt.Errorf("cannot list nodes: %w", err)
return false, fmt.Errorf("cannot list nodes: %w", err)
}
resp := &tailcfg.DERPAdmitClientResponse{
Allow: nodes.ContainsNodeKey(derpAdmitClientRequest.NodePublic),
}
return json.NewEncoder(writer).Encode(resp)
return nodes.ContainsNodeKey(derpAdmitClientRequest.NodePublic), nil
}
// VerifyHandler see https://github.com/tailscale/tailscale/blob/964282d34f06ecc06ce644769c66b0b31d118340/derp/derp_server.go#L1159
// DERP use verifyClientsURL to verify whether a client is allowed to connect to the DERP server.
// see https://github.com/tailscale/tailscale/blob/964282d34f06ecc06ce644769c66b0b31d118340/derp/derp_server.go#L1159, Derp use verifyClientsURL to verify whether a client is allowed to connect to the DERP server.
func (h *Headscale) VerifyHandler(
writer http.ResponseWriter,
req *http.Request,
@@ -117,12 +112,18 @@ func (h *Headscale) VerifyHandler(
return
}
err := h.handleVerifyRequest(req, writer)
allow, err := h.derpRequestIsAllowed(req)
if err != nil {
httpError(writer, err)
return
}
resp := tailcfg.DERPAdmitClientResponse{
Allow: allow,
}
writer.Header().Set("Content-Type", "application/json")
json.NewEncoder(writer).Encode(resp)
}
// KeyHandler provides the Headscale pub key
@@ -171,7 +172,7 @@ func (h *Headscale) HealthHandler(
json.NewEncoder(writer).Encode(res)
}
if err := h.state.PingDB(req.Context()); err != nil {
if err := h.db.PingDB(req.Context()); err != nil {
respond(err)
return

View File

@@ -5,7 +5,6 @@ import (
"encoding/json"
"fmt"
"io/fs"
"net/netip"
"net/url"
"os"
"path"
@@ -16,9 +15,9 @@ import (
"sync/atomic"
"time"
"github.com/juanfont/headscale/hscontrol/db"
"github.com/juanfont/headscale/hscontrol/notifier"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/state"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/klauspost/compress/zstd"
@@ -51,9 +50,12 @@ var debugDumpMapResponsePath = envknob.String("HEADSCALE_DEBUG_DUMP_MAPRESPONSE_
type Mapper struct {
// Configuration
state *state.State
cfg *types.Config
notif *notifier.Notifier
// TODO(kradalby): figure out if this is the format we want this in
db *db.HSDatabase
cfg *types.Config
derpMap *tailcfg.DERPMap
notif *notifier.Notifier
polMan policy.PolicyManager
uid string
created time.Time
@@ -66,16 +68,20 @@ type patch struct {
}
func NewMapper(
state *state.State,
db *db.HSDatabase,
cfg *types.Config,
derpMap *tailcfg.DERPMap,
notif *notifier.Notifier,
polMan policy.PolicyManager,
) *Mapper {
uid, _ := util.GenerateRandomStringDNSSafe(mapperIDLength)
return &Mapper{
state: state,
cfg: cfg,
notif: notif,
db: db,
cfg: cfg,
derpMap: derpMap,
notif: notif,
polMan: polMan,
uid: uid,
created: time.Now(),
@@ -91,22 +97,15 @@ func generateUserProfiles(
node *types.Node,
peers types.Nodes,
) []tailcfg.UserProfile {
userMap := make(map[uint]*types.User)
ids := make([]uint, 0, len(userMap))
userMap[node.User.ID] = &node.User
ids = append(ids, node.User.ID)
userMap := make(map[uint]types.User)
userMap[node.User.ID] = node.User
for _, peer := range peers {
userMap[peer.User.ID] = &peer.User
ids = append(ids, peer.User.ID)
userMap[peer.User.ID] = peer.User // not worth checking if already is there
}
slices.Sort(ids)
ids = slices.Compact(ids)
var profiles []tailcfg.UserProfile
for _, id := range ids {
if userMap[id] != nil {
profiles = append(profiles, userMap[id].TailscaleUserProfile())
}
for _, user := range userMap {
profiles = append(profiles, user.TailscaleUserProfile())
}
return profiles
@@ -166,7 +165,7 @@ func (m *Mapper) fullMapResponse(
err = appendPeerChanges(
resp,
true, // full change
m.state,
m.polMan,
node,
capVer,
peers,
@@ -229,6 +228,8 @@ func (m *Mapper) DERPMapResponse(
node *types.Node,
derpMap *tailcfg.DERPMap,
) ([]byte, error) {
m.derpMap = derpMap
resp := m.baseMapResponse()
resp.DERPMap = derpMap
@@ -242,32 +243,34 @@ func (m *Mapper) PeerChangedResponse(
patches []*tailcfg.PeerChange,
messages ...string,
) ([]byte, error) {
var err error
resp := m.baseMapResponse()
peers, err := m.ListPeers(node.ID)
if err != nil {
return nil, err
}
var removedIDs []tailcfg.NodeID
var changedIDs []types.NodeID
for nodeID, nodeChanged := range changed {
if nodeChanged {
if nodeID != node.ID {
changedIDs = append(changedIDs, nodeID)
}
changedIDs = append(changedIDs, nodeID)
} else {
removedIDs = append(removedIDs, nodeID.NodeID())
}
}
changedNodes := types.Nodes{}
if len(changedIDs) > 0 {
changedNodes, err = m.ListNodes(changedIDs...)
if err != nil {
return nil, err
changedNodes := make(types.Nodes, 0, len(changedIDs))
for _, peer := range peers {
if slices.Contains(changedIDs, peer.ID) {
changedNodes = append(changedNodes, peer)
}
}
err = appendPeerChanges(
&resp,
false, // partial change
m.state,
m.polMan,
node,
mapRequest.Version,
changedNodes,
@@ -294,15 +297,9 @@ func (m *Mapper) PeerChangedResponse(
resp.PeersChangedPatch = patches
}
_, matchers := m.state.Filter()
// Add the node itself, it might have changed, and particularly
// if there are no patches or changes, this is a self update.
tailnode, err := tailNode(
node, mapRequest.Version, m.state,
func(id types.NodeID) []netip.Prefix {
return policy.ReduceRoutes(node, m.state.GetNodePrimaryRoutes(id), matchers)
},
m.cfg)
tailnode, err := tailNode(node, mapRequest.Version, m.polMan, m.cfg)
if err != nil {
return nil, err
}
@@ -339,7 +336,7 @@ func (m *Mapper) marshalMapResponse(
}
if debugDumpMapResponsePath != "" {
data := map[string]any{
data := map[string]interface{}{
"Messages": messages,
"MapRequest": mapRequest,
"MapResponse": resp,
@@ -449,19 +446,13 @@ func (m *Mapper) baseWithConfigMapResponse(
) (*tailcfg.MapResponse, error) {
resp := m.baseMapResponse()
_, matchers := m.state.Filter()
tailnode, err := tailNode(
node, capVer, m.state,
func(id types.NodeID) []netip.Prefix {
return policy.ReduceRoutes(node, m.state.GetNodePrimaryRoutes(id), matchers)
},
m.cfg)
tailnode, err := tailNode(node, capVer, m.polMan, m.cfg)
if err != nil {
return nil, err
}
resp.Node = tailnode
resp.DERPMap = m.state.DERPMap()
resp.DERPMap = m.derpMap
resp.Domain = m.cfg.Domain()
@@ -478,11 +469,8 @@ func (m *Mapper) baseWithConfigMapResponse(
return &resp, nil
}
// ListPeers returns peers of node, regardless of any Policy or if the node is expired.
// If no peer IDs are given, all peers are returned.
// If at least one peer ID is given, only these peer nodes will be returned.
func (m *Mapper) ListPeers(nodeID types.NodeID, peerIDs ...types.NodeID) (types.Nodes, error) {
peers, err := m.state.ListPeers(nodeID, peerIDs...)
func (m *Mapper) ListPeers(nodeID types.NodeID) (types.Nodes, error) {
peers, err := m.db.ListPeers(nodeID)
if err != nil {
return nil, err
}
@@ -495,42 +483,31 @@ func (m *Mapper) ListPeers(nodeID types.NodeID, peerIDs ...types.NodeID) (types.
return peers, nil
}
// ListNodes queries the database for either all nodes if no parameters are given
// or for the given nodes if at least one node ID is given as parameter
func (m *Mapper) ListNodes(nodeIDs ...types.NodeID) (types.Nodes, error) {
nodes, err := m.state.ListNodes(nodeIDs...)
if err != nil {
return nil, err
}
func nodeMapToList(nodes map[uint64]*types.Node) types.Nodes {
ret := make(types.Nodes, 0)
for _, node := range nodes {
online := m.notif.IsLikelyConnected(node.ID)
node.IsOnline = &online
ret = append(ret, node)
}
return nodes, nil
return ret
}
// routeFilterFunc is a function that takes a node ID and returns a list of
// netip.Prefixes that are allowed for that node. It is used to filter routes
// from the primary route manager to the node.
type routeFilterFunc func(id types.NodeID) []netip.Prefix
// appendPeerChanges mutates a tailcfg.MapResponse with all the
// necessary changes when peers have changed.
func appendPeerChanges(
resp *tailcfg.MapResponse,
fullChange bool,
state *state.State,
polMan policy.PolicyManager,
node *types.Node,
capVer tailcfg.CapabilityVersion,
changed types.Nodes,
cfg *types.Config,
) error {
filter, matchers := state.Filter()
filter := polMan.Filter()
sshPolicy, err := state.SSHPolicy(node)
sshPolicy, err := polMan.SSHPolicy(node)
if err != nil {
return err
}
@@ -538,19 +515,14 @@ func appendPeerChanges(
// If there are filter rules present, see if there are any nodes that cannot
// access each-other at all and remove them from the peers.
if len(filter) > 0 {
changed = policy.ReduceNodes(node, changed, matchers)
changed = policy.FilterNodesByACL(node, changed, filter)
}
profiles := generateUserProfiles(node, changed)
dnsConfig := generateDNSConfig(cfg, node)
tailPeers, err := tailNodes(
changed, capVer, state,
func(id types.NodeID) []netip.Prefix {
return policy.ReduceRoutes(node, state.GetNodePrimaryRoutes(id), matchers)
},
cfg)
tailPeers, err := tailNodes(changed, capVer, polMan, cfg)
if err != nil {
return err
}
@@ -569,12 +541,26 @@ func appendPeerChanges(
resp.UserProfiles = profiles
resp.SSHPolicy = sshPolicy
// CapVer 81: 2023-11-17: MapResponse.PacketFilters (incremental packet filter updates)
// Currently, we do not send incremental package filters, however using the
// new PacketFilters field and "base" allows us to send a full update when we
// have to send an empty list, avoiding the hack in the else block.
resp.PacketFilters = map[string][]tailcfg.FilterRule{
"base": policy.ReduceFilterRules(node, filter),
// 81: 2023-11-17: MapResponse.PacketFilters (incremental packet filter updates)
if capVer >= 81 {
// Currently, we do not send incremental package filters, however using the
// new PacketFilters field and "base" allows us to send a full update when we
// have to send an empty list, avoiding the hack in the else block.
resp.PacketFilters = map[string][]tailcfg.FilterRule{
"base": policy.ReduceFilterRules(node, filter),
}
} else {
// This is a hack to avoid sending an empty list of packet filters.
// Since tailcfg.PacketFilter has omitempty, any empty PacketFilter will
// be omitted, causing the client to consider it unchanged, keeping the
// previous packet filter. Worst case, this can cause a node that previously
// has access to a node to _not_ loose access if an empty (allow none) is sent.
reduced := policy.ReduceFilterRules(node, filter)
if len(reduced) > 0 {
resp.PacketFilter = reduced
} else {
resp.PacketFilter = filter
}
}
return nil

View File

@@ -4,15 +4,19 @@ import (
"fmt"
"net/netip"
"testing"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/policy/matcher"
"github.com/juanfont/headscale/hscontrol/routes"
"github.com/juanfont/headscale/hscontrol/types"
"gopkg.in/check.v1"
"gorm.io/gorm"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
"tailscale.com/types/dnstype"
"tailscale.com/types/key"
)
var iap = func(ipStr string) *netip.Addr {
@@ -20,6 +24,51 @@ var iap = func(ipStr string) *netip.Addr {
return &ip
}
func (s *Suite) TestGetMapResponseUserProfiles(c *check.C) {
mach := func(hostname, username string, userid uint) *types.Node {
return &types.Node{
Hostname: hostname,
UserID: userid,
User: types.User{
Model: gorm.Model{
ID: userid,
},
Name: username,
},
}
}
nodeInShared1 := mach("test_get_shared_nodes_1", "user1", 1)
nodeInShared2 := mach("test_get_shared_nodes_2", "user2", 2)
nodeInShared3 := mach("test_get_shared_nodes_3", "user3", 3)
node2InShared1 := mach("test_get_shared_nodes_4", "user1", 1)
userProfiles := generateUserProfiles(
nodeInShared1,
types.Nodes{
nodeInShared2, nodeInShared3, node2InShared1,
},
)
c.Assert(len(userProfiles), check.Equals, 3)
users := []string{
"user1", "user2", "user3",
}
for _, user := range users {
found := false
for _, userProfile := range userProfiles {
if userProfile.DisplayName == user {
found = true
break
}
}
c.Assert(found, check.Equals, true)
}
}
func TestDNSConfigMapResponse(t *testing.T) {
tests := []struct {
magicDNS bool
@@ -80,91 +129,373 @@ func TestDNSConfigMapResponse(t *testing.T) {
}
}
// mockState is a mock implementation that provides the required methods
type mockState struct {
polMan policy.PolicyManager
derpMap *tailcfg.DERPMap
primary *routes.PrimaryRoutes
nodes types.Nodes
peers types.Nodes
}
func (m *mockState) DERPMap() *tailcfg.DERPMap {
return m.derpMap
}
func (m *mockState) Filter() ([]tailcfg.FilterRule, []matcher.Match) {
if m.polMan == nil {
return tailcfg.FilterAllowAll, nil
}
return m.polMan.Filter()
}
func (m *mockState) SSHPolicy(node *types.Node) (*tailcfg.SSHPolicy, error) {
if m.polMan == nil {
return nil, nil
}
return m.polMan.SSHPolicy(node)
}
func (m *mockState) NodeCanHaveTag(node *types.Node, tag string) bool {
if m.polMan == nil {
return false
}
return m.polMan.NodeCanHaveTag(node, tag)
}
func (m *mockState) GetNodePrimaryRoutes(nodeID types.NodeID) []netip.Prefix {
if m.primary == nil {
return nil
}
return m.primary.PrimaryRoutes(nodeID)
}
func (m *mockState) ListPeers(nodeID types.NodeID, peerIDs ...types.NodeID) (types.Nodes, error) {
if len(peerIDs) > 0 {
// Filter peers by the provided IDs
var filtered types.Nodes
for _, peer := range m.peers {
for _, id := range peerIDs {
if peer.ID == id {
filtered = append(filtered, peer)
break
}
}
}
return filtered, nil
}
// Return all peers except the node itself
var filtered types.Nodes
for _, peer := range m.peers {
if peer.ID != nodeID {
filtered = append(filtered, peer)
}
}
return filtered, nil
}
func (m *mockState) ListNodes(nodeIDs ...types.NodeID) (types.Nodes, error) {
if len(nodeIDs) > 0 {
// Filter nodes by the provided IDs
var filtered types.Nodes
for _, node := range m.nodes {
for _, id := range nodeIDs {
if node.ID == id {
filtered = append(filtered, node)
break
}
}
}
return filtered, nil
}
return m.nodes, nil
}
func Test_fullMapResponse(t *testing.T) {
t.Skip("Test needs to be refactored for new state-based architecture")
// TODO: Refactor this test to work with the new state-based mapper
// The test architecture needs to be updated to work with the state interface
// instead of the old direct dependency injection pattern
mustNK := func(str string) key.NodePublic {
var k key.NodePublic
_ = k.UnmarshalText([]byte(str))
return k
}
mustDK := func(str string) key.DiscoPublic {
var k key.DiscoPublic
_ = k.UnmarshalText([]byte(str))
return k
}
mustMK := func(str string) key.MachinePublic {
var k key.MachinePublic
_ = k.UnmarshalText([]byte(str))
return k
}
hiview := func(hoin tailcfg.Hostinfo) tailcfg.HostinfoView {
return hoin.View()
}
created := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
lastSeen := time.Date(2009, time.November, 10, 23, 9, 0, 0, time.UTC)
expire := time.Date(2500, time.November, 11, 23, 0, 0, 0, time.UTC)
user1 := types.User{Model: gorm.Model{ID: 0}, Name: "mini"}
user2 := types.User{Model: gorm.Model{ID: 1}, Name: "peer2"}
mini := &types.Node{
ID: 0,
MachineKey: mustMK(
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
),
NodeKey: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
IPv4: iap("100.64.0.1"),
Hostname: "mini",
GivenName: "mini",
UserID: user1.ID,
User: user1,
ForcedTags: []string{},
AuthKey: &types.PreAuthKey{},
LastSeen: &lastSeen,
Expiry: &expire,
Hostinfo: &tailcfg.Hostinfo{},
Routes: []types.Route{
{
Prefix: tsaddr.AllIPv4(),
Advertised: true,
Enabled: true,
IsPrimary: false,
},
{
Prefix: netip.MustParsePrefix("192.168.0.0/24"),
Advertised: true,
Enabled: true,
IsPrimary: true,
},
{
Prefix: netip.MustParsePrefix("172.0.0.0/10"),
Advertised: true,
Enabled: false,
IsPrimary: true,
},
},
CreatedAt: created,
}
tailMini := &tailcfg.Node{
ID: 0,
StableID: "0",
Name: "mini",
User: 0,
Key: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
KeyExpiry: expire,
Machine: mustMK(
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")},
AllowedIPs: []netip.Prefix{
netip.MustParsePrefix("100.64.0.1/32"),
tsaddr.AllIPv4(),
netip.MustParsePrefix("192.168.0.0/24"),
},
HomeDERP: 0,
LegacyDERPString: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}),
Created: created,
Tags: []string{},
PrimaryRoutes: []netip.Prefix{netip.MustParsePrefix("192.168.0.0/24")},
LastSeen: &lastSeen,
MachineAuthorized: true,
CapMap: tailcfg.NodeCapMap{
tailcfg.CapabilityFileSharing: []tailcfg.RawMessage{},
tailcfg.CapabilityAdmin: []tailcfg.RawMessage{},
tailcfg.CapabilitySSH: []tailcfg.RawMessage{},
},
}
peer1 := &types.Node{
ID: 1,
MachineKey: mustMK(
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
),
NodeKey: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
IPv4: iap("100.64.0.2"),
Hostname: "peer1",
GivenName: "peer1",
UserID: user1.ID,
User: user1,
ForcedTags: []string{},
LastSeen: &lastSeen,
Expiry: &expire,
Hostinfo: &tailcfg.Hostinfo{},
Routes: []types.Route{},
CreatedAt: created,
}
tailPeer1 := &tailcfg.Node{
ID: 1,
StableID: "1",
Name: "peer1",
Key: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
KeyExpiry: expire,
Machine: mustMK(
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.2/32")},
AllowedIPs: []netip.Prefix{netip.MustParsePrefix("100.64.0.2/32")},
HomeDERP: 0,
LegacyDERPString: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}),
Created: created,
Tags: []string{},
PrimaryRoutes: []netip.Prefix{},
LastSeen: &lastSeen,
MachineAuthorized: true,
CapMap: tailcfg.NodeCapMap{
tailcfg.CapabilityFileSharing: []tailcfg.RawMessage{},
tailcfg.CapabilityAdmin: []tailcfg.RawMessage{},
tailcfg.CapabilitySSH: []tailcfg.RawMessage{},
},
}
peer2 := &types.Node{
ID: 2,
MachineKey: mustMK(
"mkey:f08305b4ee4250b95a70f3b7504d048d75d899993c624a26d422c67af0422507",
),
NodeKey: mustNK(
"nodekey:9b2ffa7e08cc421a3d2cca9012280f6a236fd0de0b4ce005b30a98ad930306fe",
),
DiscoKey: mustDK(
"discokey:cf7b0fd05da556fdc3bab365787b506fd82d64a70745db70e00e86c1b1c03084",
),
IPv4: iap("100.64.0.3"),
Hostname: "peer2",
GivenName: "peer2",
UserID: user2.ID,
User: user2,
ForcedTags: []string{},
LastSeen: &lastSeen,
Expiry: &expire,
Hostinfo: &tailcfg.Hostinfo{},
Routes: []types.Route{},
CreatedAt: created,
}
tests := []struct {
name string
pol *policy.ACLPolicy
node *types.Node
peers types.Nodes
derpMap *tailcfg.DERPMap
cfg *types.Config
want *tailcfg.MapResponse
wantErr bool
}{
// {
// name: "empty-node",
// node: types.Node{},
// pol: &policy.ACLPolicy{},
// dnsConfig: &tailcfg.DNSConfig{},
// baseDomain: "",
// want: nil,
// wantErr: true,
// },
{
name: "no-pol-no-peers-map-response",
pol: &policy.ACLPolicy{},
node: mini,
peers: types.Nodes{},
derpMap: &tailcfg.DERPMap{},
cfg: &types.Config{
BaseDomain: "",
TailcfgDNSConfig: &tailcfg.DNSConfig{},
LogTail: types.LogTailConfig{Enabled: false},
RandomizeClientPort: false,
},
want: &tailcfg.MapResponse{
Node: tailMini,
KeepAlive: false,
DERPMap: &tailcfg.DERPMap{},
Peers: []*tailcfg.Node{},
DNSConfig: &tailcfg.DNSConfig{},
Domain: "",
CollectServices: "false",
PacketFilter: []tailcfg.FilterRule{},
UserProfiles: []tailcfg.UserProfile{{LoginName: "mini", DisplayName: "mini"}},
SSHPolicy: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{}},
ControlTime: &time.Time{},
Debug: &tailcfg.Debug{
DisableLogTail: true,
},
},
wantErr: false,
},
{
name: "no-pol-with-peer-map-response",
pol: &policy.ACLPolicy{},
node: mini,
peers: types.Nodes{
peer1,
},
derpMap: &tailcfg.DERPMap{},
cfg: &types.Config{
BaseDomain: "",
TailcfgDNSConfig: &tailcfg.DNSConfig{},
LogTail: types.LogTailConfig{Enabled: false},
RandomizeClientPort: false,
},
want: &tailcfg.MapResponse{
KeepAlive: false,
Node: tailMini,
DERPMap: &tailcfg.DERPMap{},
Peers: []*tailcfg.Node{
tailPeer1,
},
DNSConfig: &tailcfg.DNSConfig{},
Domain: "",
CollectServices: "false",
PacketFilter: []tailcfg.FilterRule{},
UserProfiles: []tailcfg.UserProfile{{LoginName: "mini", DisplayName: "mini"}},
SSHPolicy: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{}},
ControlTime: &time.Time{},
Debug: &tailcfg.Debug{
DisableLogTail: true,
},
},
wantErr: false,
},
{
name: "with-pol-map-response",
pol: &policy.ACLPolicy{
ACLs: []policy.ACL{
{
Action: "accept",
Sources: []string{"100.64.0.2"},
Destinations: []string{"mini:*"},
},
},
},
node: mini,
peers: types.Nodes{
peer1,
peer2,
},
derpMap: &tailcfg.DERPMap{},
cfg: &types.Config{
BaseDomain: "",
TailcfgDNSConfig: &tailcfg.DNSConfig{},
LogTail: types.LogTailConfig{Enabled: false},
RandomizeClientPort: false,
},
want: &tailcfg.MapResponse{
KeepAlive: false,
Node: tailMini,
DERPMap: &tailcfg.DERPMap{},
Peers: []*tailcfg.Node{
tailPeer1,
},
DNSConfig: &tailcfg.DNSConfig{},
Domain: "",
CollectServices: "false",
PacketFilter: []tailcfg.FilterRule{
{
SrcIPs: []string{"100.64.0.2/32"},
DstPorts: []tailcfg.NetPortRange{
{IP: "100.64.0.1/32", Ports: tailcfg.PortRangeAny},
},
},
},
UserProfiles: []tailcfg.UserProfile{
{LoginName: "mini", DisplayName: "mini"},
},
SSHPolicy: &tailcfg.SSHPolicy{Rules: []*tailcfg.SSHRule{}},
ControlTime: &time.Time{},
Debug: &tailcfg.Debug{
DisableLogTail: true,
},
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
polMan, _ := policy.NewPolicyManagerForTest(tt.pol, []types.User{user1, user2}, append(tt.peers, tt.node))
mappy := NewMapper(
nil,
tt.cfg,
tt.derpMap,
nil,
polMan,
)
got, err := mappy.fullMapResponse(
tt.node,
tt.peers,
0,
)
if (err != nil) != tt.wantErr {
t.Errorf("fullMapResponse() error = %v, wantErr %v", err, tt.wantErr)
return
}
spew.Dump(got)
if diff := cmp.Diff(
tt.want,
got,
cmpopts.EquateEmpty(),
// Ignore ControlTime, it is set to now and we dont really need to mock it.
cmpopts.IgnoreFields(tailcfg.MapResponse{}, "ControlTime"),
); diff != "" {
t.Errorf("fullMapResponse() unexpected result (-want +got):\n%s", diff)
}
})
}
}

View File

@@ -2,24 +2,19 @@ package mapper
import (
"fmt"
"net/netip"
"time"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/samber/lo"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
)
// NodeCanHaveTagChecker is an interface for checking if a node can have a tag
type NodeCanHaveTagChecker interface {
NodeCanHaveTag(node *types.Node, tag string) bool
}
func tailNodes(
nodes types.Nodes,
capVer tailcfg.CapabilityVersion,
checker NodeCanHaveTagChecker,
primaryRouteFunc routeFilterFunc,
polMan policy.PolicyManager,
cfg *types.Config,
) ([]*tailcfg.Node, error) {
tNodes := make([]*tailcfg.Node, len(nodes))
@@ -28,8 +23,7 @@ func tailNodes(
node, err := tailNode(
node,
capVer,
checker,
primaryRouteFunc,
polMan,
cfg,
)
if err != nil {
@@ -46,12 +40,28 @@ func tailNodes(
func tailNode(
node *types.Node,
capVer tailcfg.CapabilityVersion,
checker NodeCanHaveTagChecker,
primaryRouteFunc routeFilterFunc,
polMan policy.PolicyManager,
cfg *types.Config,
) (*tailcfg.Node, error) {
addrs := node.Prefixes()
allowedIPs := append(
[]netip.Prefix{},
addrs...) // we append the node own IP, as it is required by the clients
primaryPrefixes := []netip.Prefix{}
for _, route := range node.Routes {
if route.Enabled {
if route.IsPrimary {
allowedIPs = append(allowedIPs, netip.Prefix(route.Prefix))
primaryPrefixes = append(primaryPrefixes, netip.Prefix(route.Prefix))
} else if route.IsExitRoute() {
allowedIPs = append(allowedIPs, netip.Prefix(route.Prefix))
}
}
}
var derp int
// TODO(kradalby): legacyDERP was removed in tailscale/tailscale@2fc4455e6dd9ab7f879d4e2f7cffc2be81f14077
@@ -76,19 +86,9 @@ func tailNode(
return nil, fmt.Errorf("tailNode, failed to create FQDN: %s", err)
}
var tags []string
for _, tag := range node.RequestTags() {
if checker.NodeCanHaveTag(node, tag) {
tags = append(tags, tag)
}
}
tags := polMan.Tags(node)
tags = lo.Uniq(append(tags, node.ForcedTags...))
routes := primaryRouteFunc(node.ID)
allowed := append(node.Prefixes(), routes...)
allowed = append(allowed, node.ExitRoutes()...)
tsaddr.SortPrefixes(allowed)
tNode := tailcfg.Node{
ID: tailcfg.NodeID(node.ID), // this is the actual ID
StableID: node.ID.StableID(),
@@ -103,8 +103,7 @@ func tailNode(
Machine: node.MachineKey,
DiscoKey: node.DiscoKey,
Addresses: addrs,
PrimaryRoutes: routes,
AllowedIPs: allowed,
AllowedIPs: allowedIPs,
Endpoints: node.Endpoints,
HomeDERP: derp,
LegacyDERPString: legacyDERP,
@@ -115,6 +114,8 @@ func tailNode(
Tags: tags,
PrimaryRoutes: primaryPrefixes,
MachineAuthorized: !node.IsExpired(),
Expired: node.IsExpired(),
}

View File

@@ -9,9 +9,7 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/routes"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/stretchr/testify/require"
"tailscale.com/net/tsaddr"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
@@ -50,7 +48,7 @@ func TestTailNode(t *testing.T) {
tests := []struct {
name string
node *types.Node
pol []byte
pol *policy.ACLPolicy
dnsConfig *tailcfg.DNSConfig
baseDomain string
want *tailcfg.Node
@@ -62,15 +60,19 @@ func TestTailNode(t *testing.T) {
GivenName: "empty",
Hostinfo: &tailcfg.Hostinfo{},
},
pol: &policy.ACLPolicy{},
dnsConfig: &tailcfg.DNSConfig{},
baseDomain: "",
want: &tailcfg.Node{
Name: "empty",
StableID: "0",
Addresses: []netip.Prefix{},
AllowedIPs: []netip.Prefix{},
HomeDERP: 0,
LegacyDERPString: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}),
Tags: []string{},
PrimaryRoutes: []netip.Prefix{},
MachineAuthorized: true,
CapMap: tailcfg.NodeCapMap{
@@ -105,16 +107,30 @@ func TestTailNode(t *testing.T) {
AuthKey: &types.PreAuthKey{},
LastSeen: &lastSeen,
Expiry: &expire,
Hostinfo: &tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{
tsaddr.AllIPv4(),
netip.MustParsePrefix("192.168.0.0/24"),
netip.MustParsePrefix("172.0.0.0/10"),
Hostinfo: &tailcfg.Hostinfo{},
Routes: []types.Route{
{
Prefix: tsaddr.AllIPv4(),
Advertised: true,
Enabled: true,
IsPrimary: false,
},
{
Prefix: netip.MustParsePrefix("192.168.0.0/24"),
Advertised: true,
Enabled: true,
IsPrimary: true,
},
{
Prefix: netip.MustParsePrefix("172.0.0.0/10"),
Advertised: true,
Enabled: false,
IsPrimary: true,
},
},
ApprovedRoutes: []netip.Prefix{tsaddr.AllIPv4(), netip.MustParsePrefix("192.168.0.0/24")},
CreatedAt: created,
CreatedAt: created,
},
pol: &policy.ACLPolicy{},
dnsConfig: &tailcfg.DNSConfig{},
baseDomain: "",
want: &tailcfg.Node{
@@ -137,54 +153,22 @@ func TestTailNode(t *testing.T) {
),
Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")},
AllowedIPs: []netip.Prefix{
tsaddr.AllIPv4(),
netip.MustParsePrefix("192.168.0.0/24"),
netip.MustParsePrefix("100.64.0.1/32"),
tsaddr.AllIPv6(),
},
PrimaryRoutes: []netip.Prefix{
tsaddr.AllIPv4(),
netip.MustParsePrefix("192.168.0.0/24"),
},
HomeDERP: 0,
LegacyDERPString: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{
RoutableIPs: []netip.Prefix{
tsaddr.AllIPv4(),
netip.MustParsePrefix("192.168.0.0/24"),
netip.MustParsePrefix("172.0.0.0/10"),
},
}),
Created: created,
Hostinfo: hiview(tailcfg.Hostinfo{}),
Created: created,
Tags: []string{},
LastSeen: &lastSeen,
MachineAuthorized: true,
CapMap: tailcfg.NodeCapMap{
tailcfg.CapabilityFileSharing: []tailcfg.RawMessage{},
tailcfg.CapabilityAdmin: []tailcfg.RawMessage{},
tailcfg.CapabilitySSH: []tailcfg.RawMessage{},
PrimaryRoutes: []netip.Prefix{
netip.MustParsePrefix("192.168.0.0/24"),
},
},
wantErr: false,
},
{
name: "check-dot-suffix-on-node-name",
node: &types.Node{
GivenName: "minimal",
Hostinfo: &tailcfg.Hostinfo{},
},
dnsConfig: &tailcfg.DNSConfig{},
baseDomain: "example.com",
want: &tailcfg.Node{
// a node name should have a dot appended
Name: "minimal.example.com.",
StableID: "0",
HomeDERP: 0,
LegacyDERPString: "127.3.3.40:0",
Hostinfo: hiview(tailcfg.Hostinfo{}),
Tags: []string{},
LastSeen: &lastSeen,
MachineAuthorized: true,
CapMap: tailcfg.NodeCapMap{
@@ -202,26 +186,16 @@ func TestTailNode(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
polMan, err := policy.NewPolicyManager(tt.pol, []types.User{}, types.Nodes{tt.node})
require.NoError(t, err)
primary := routes.New()
polMan, _ := policy.NewPolicyManagerForTest(tt.pol, []types.User{}, types.Nodes{tt.node})
cfg := &types.Config{
BaseDomain: tt.baseDomain,
TailcfgDNSConfig: tt.dnsConfig,
RandomizeClientPort: false,
}
_ = primary.SetRoutes(tt.node.ID, tt.node.SubnetRoutes()...)
// This is a hack to avoid having a second node to test the primary route.
// This should be baked into the test case proper if it is extended in the future.
_ = primary.SetRoutes(2, netip.MustParsePrefix("192.168.0.0/24"))
got, err := tailNode(
tt.node,
0,
polMan,
func(id types.NodeID) []netip.Prefix {
return primary.PrimaryRoutes(id)
},
cfg,
)
@@ -268,20 +242,13 @@ func TestNodeExpiry(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
node := &types.Node{
ID: 0,
GivenName: "test",
Expiry: tt.exp,
}
polMan, err := policy.NewPolicyManager(nil, nil, nil)
require.NoError(t, err)
tn, err := tailNode(
node,
0,
polMan,
func(id types.NodeID) []netip.Prefix {
return []netip.Prefix{}
},
&policy.PolicyManagerV1{},
&types.Config{},
)
if err != nil {

View File

@@ -100,10 +100,6 @@ func (h *Headscale) NoiseUpgradeHandler(
router.HandleFunc("/machine/register", noiseServer.NoiseRegistrationHandler).
Methods(http.MethodPost)
// Endpoints outside of the register endpoint must use getAndValidateNode to
// get the node to ensure that the MachineKey matches the Node setting up the
// connection.
router.HandleFunc("/machine/map", noiseServer.NoisePollNetMapHandler)
noiseServer.httpBaseConfig = &http.Server{
@@ -120,13 +116,9 @@ func (h *Headscale) NoiseUpgradeHandler(
)
}
func unsupportedClientError(version tailcfg.CapabilityVersion) error {
return fmt.Errorf("unsupported client version: %s (%d)", capver.TailscaleVersion(version), version)
}
func (ns *noiseServer) earlyNoise(protocolVersion int, writer io.Writer) error {
if !isSupportedVersion(tailcfg.CapabilityVersion(protocolVersion)) {
return unsupportedClientError(tailcfg.CapabilityVersion(protocolVersion))
return fmt.Errorf("unsupported client version: %d", protocolVersion)
}
earlyJSON, err := json.Marshal(&tailcfg.EarlyNoise{
@@ -179,7 +171,7 @@ func rejectUnsupported(
Str("node_key", nkey.ShortString()).
Str("machine_key", mkey.ShortString()).
Msg("unsupported client connected")
http.Error(writer, unsupportedClientError(version).Error(), http.StatusBadRequest)
http.Error(writer, "unsupported client version", http.StatusBadRequest)
return true
}
@@ -213,14 +205,18 @@ func (ns *noiseServer) NoisePollNetMapHandler(
return
}
node, err := ns.getAndValidateNode(mapRequest)
ns.nodeKey = mapRequest.NodeKey
node, err := ns.headscale.db.GetNodeByNodeKey(mapRequest.NodeKey)
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
httpError(writer, NewHTTPError(http.StatusNotFound, "node not found", nil))
return
}
httpError(writer, err)
return
}
ns.nodeKey = node.NodeKey
sess := ns.headscale.newMapSession(req.Context(), mapRequest, writer, node)
sess.tracef("a node sending a MapRequest with Noise protocol")
if !sess.isStreaming() {
@@ -230,10 +226,6 @@ func (ns *noiseServer) NoisePollNetMapHandler(
}
}
func regErr(err error) *tailcfg.RegisterResponse {
return &tailcfg.RegisterResponse{Error: err.Error()}
}
// NoiseRegistrationHandler handles the actual registration process of a node.
func (ns *noiseServer) NoiseRegistrationHandler(
writer http.ResponseWriter,
@@ -245,66 +237,52 @@ func (ns *noiseServer) NoiseRegistrationHandler(
return
}
registerRequest, registerResponse := func() (*tailcfg.RegisterRequest, *tailcfg.RegisterResponse) {
var resp *tailcfg.RegisterResponse
registerRequest, registerResponse, err := func() (*tailcfg.RegisterRequest, []byte, error) {
body, err := io.ReadAll(req.Body)
if err != nil {
return &tailcfg.RegisterRequest{}, regErr(err)
return nil, nil, err
}
var regReq tailcfg.RegisterRequest
if err := json.Unmarshal(body, &regReq); err != nil {
return &regReq, regErr(err)
var registerRequest tailcfg.RegisterRequest
if err := json.Unmarshal(body, &registerRequest); err != nil {
return nil, nil, err
}
ns.nodeKey = regReq.NodeKey
ns.nodeKey = registerRequest.NodeKey
resp, err = ns.headscale.handleRegister(req.Context(), regReq, ns.conn.Peer())
resp, err := ns.headscale.handleRegister(req.Context(), registerRequest, ns.conn.Peer())
// TODO(kradalby): Here we could have two error types, one that is surfaced to the client
// and one that returns 500.
if err != nil {
var httpErr HTTPError
if errors.As(err, &httpErr) {
resp = &tailcfg.RegisterResponse{
Error: httpErr.Msg,
}
return &regReq, resp
}
return &regReq, regErr(err)
return nil, nil, err
}
return &regReq, resp
respBody, err := json.Marshal(resp)
if err != nil {
return nil, nil, err
}
return &registerRequest, respBody, nil
}()
if err != nil {
log.Error().
Caller().
Err(err).
Msg("Error handling registration")
http.Error(writer, "Internal server error", http.StatusInternalServerError)
}
// Reject unsupported versions
if rejectUnsupported(writer, registerRequest.Version, ns.machineKey, registerRequest.NodeKey) {
return
}
respBody, err := json.Marshal(registerResponse)
if err != nil {
httpError(writer, err)
return
}
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
writer.WriteHeader(http.StatusOK)
writer.Write(respBody)
}
// getAndValidateNode retrieves the node from the database using the NodeKey
// and validates that it matches the MachineKey from the Noise session.
func (ns *noiseServer) getAndValidateNode(mapRequest tailcfg.MapRequest) (*types.Node, error) {
node, err := ns.headscale.state.GetNodeByNodeKey(mapRequest.NodeKey)
_, err = writer.Write(registerResponse)
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, NewHTTPError(http.StatusNotFound, "node not found", nil)
}
return nil, err
log.Error().
Caller().
Err(err).
Msg("Failed to write response")
}
// Validate that the MachineKey in the Noise session matches the one associated with the NodeKey.
if ns.machineKey != node.MachineKey {
return nil, NewHTTPError(http.StatusNotFound, "node key in request does not match the one associated with this machine key", nil)
}
return node, nil
}

View File

@@ -9,7 +9,7 @@ import (
"time"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/puzpuzpuz/xsync/v4"
"github.com/puzpuzpuz/xsync/v3"
"github.com/rs/zerolog/log"
"github.com/sasha-s/go-deadlock"
"tailscale.com/envknob"
@@ -63,26 +63,9 @@ func (n *Notifier) Close() {
n.closed = true
n.b.close()
// Close channels safely using the helper method
for nodeID, c := range n.nodes {
n.safeCloseChannel(nodeID, c)
for _, c := range n.nodes {
close(c)
}
// Clear node map after closing channels
n.nodes = make(map[types.NodeID]chan<- types.StateUpdate)
}
// safeCloseChannel closes a channel and panic recovers if already closed
func (n *Notifier) safeCloseChannel(nodeID types.NodeID, c chan<- types.StateUpdate) {
defer func() {
if r := recover(); r != nil {
log.Error().
Uint64("node.id", nodeID.Uint64()).
Any("recover", r).
Msg("recovered from panic when closing channel in Close()")
}
}()
close(c)
}
func (n *Notifier) tracef(nID types.NodeID, msg string, args ...any) {
@@ -107,11 +90,7 @@ func (n *Notifier) AddNode(nodeID types.NodeID, c chan<- types.StateUpdate) {
// connection. Close the old channel and replace it.
if curr, ok := n.nodes[nodeID]; ok {
n.tracef(nodeID, "channel present, closing and replacing")
// Use the safeCloseChannel helper in a goroutine to avoid deadlocks
// if/when someone is waiting to send on this channel
go func(ch chan<- types.StateUpdate) {
n.safeCloseChannel(nodeID, ch)
}(curr)
close(curr)
}
n.nodes[nodeID] = c
@@ -182,7 +161,6 @@ func (n *Notifier) IsLikelyConnected(nodeID types.NodeID) bool {
return false
}
// LikelyConnectedMap returns a thread safe map of connected nodes
func (n *Notifier) LikelyConnectedMap() *xsync.MapOf[types.NodeID, bool] {
return n.connected
}
@@ -410,13 +388,19 @@ func (b *batcher) flush() {
})
if b.changedNodeIDs.Slice().Len() > 0 {
update := types.UpdatePeerChanged(changedNodes...)
update := types.StateUpdate{
Type: types.StatePeerChanged,
ChangeNodes: changedNodes,
}
b.n.sendAll(update)
}
if len(patches) > 0 {
patchUpdate := types.UpdatePeerPatch(patches...)
patchUpdate := types.StateUpdate{
Type: types.StatePeerChangedPatch,
ChangePatches: patches,
}
b.n.sendAll(patchUpdate)
}

View File

@@ -2,16 +2,11 @@ package notifier
import (
"context"
"fmt"
"math/rand"
"net/netip"
"sort"
"sync"
"testing"
"time"
"slices"
"github.com/google/go-cmp/cmp"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
@@ -254,7 +249,9 @@ func TestBatcher(t *testing.T) {
// Make the inner order stable for comparison.
for _, u := range got {
slices.Sort(u.ChangeNodes)
sort.Slice(u.ChangeNodes, func(i, j int) bool {
return u.ChangeNodes[i] < u.ChangeNodes[j]
})
sort.Slice(u.ChangePatches, func(i, j int) bool {
return u.ChangePatches[i].NodeID < u.ChangePatches[j].NodeID
})
@@ -266,78 +263,3 @@ func TestBatcher(t *testing.T) {
})
}
}
// TestIsLikelyConnectedRaceCondition tests for a race condition in IsLikelyConnected
// Multiple goroutines calling AddNode and RemoveNode cause panics when trying to
// close a channel that was already closed, which can happen when a node changes
// network transport quickly (eg mobile->wifi) and reconnects whilst also disconnecting
func TestIsLikelyConnectedRaceCondition(t *testing.T) {
// mock config for the notifier
cfg := &types.Config{
Tuning: types.Tuning{
NotifierSendTimeout: 1 * time.Second,
BatchChangeDelay: 1 * time.Second,
NodeMapSessionBufferedChanSize: 30,
},
}
notifier := NewNotifier(cfg)
defer notifier.Close()
nodeID := types.NodeID(1)
updateChan := make(chan types.StateUpdate, 10)
var wg sync.WaitGroup
// Number of goroutines to spawn for concurrent access
concurrentAccessors := 100
iterations := 100
// Add node to notifier
notifier.AddNode(nodeID, updateChan)
// Track errors
errChan := make(chan string, concurrentAccessors*iterations)
// Start goroutines to cause a race
wg.Add(concurrentAccessors)
for i := range concurrentAccessors {
go func(routineID int) {
defer wg.Done()
for range iterations {
// Simulate race by having some goroutines check IsLikelyConnected
// while others add/remove the node
if routineID%3 == 0 {
// This goroutine checks connection status
isConnected := notifier.IsLikelyConnected(nodeID)
if isConnected != true && isConnected != false {
errChan <- fmt.Sprintf("Invalid connection status: %v", isConnected)
}
} else if routineID%3 == 1 {
// This goroutine removes the node
notifier.RemoveNode(nodeID, updateChan)
} else {
// This goroutine adds the node back
notifier.AddNode(nodeID, updateChan)
}
// Small random delay to increase chance of races
time.Sleep(time.Duration(rand.Intn(100)) * time.Microsecond)
}
}(i)
}
wg.Wait()
close(errChan)
// Collate errors
var errors []string
for err := range errChan {
errors = append(errors, err)
}
if len(errors) > 0 {
t.Errorf("Detected %d race condition errors: %v", len(errors), errors)
}
}

View File

@@ -2,7 +2,6 @@ package hscontrol
import (
"bytes"
"cmp"
"context"
_ "embed"
"errors"
@@ -17,7 +16,7 @@ import (
"github.com/gorilla/mux"
"github.com/juanfont/headscale/hscontrol/db"
"github.com/juanfont/headscale/hscontrol/notifier"
"github.com/juanfont/headscale/hscontrol/state"
"github.com/juanfont/headscale/hscontrol/policy"
"github.com/juanfont/headscale/hscontrol/types"
"github.com/juanfont/headscale/hscontrol/util"
"github.com/rs/zerolog/log"
@@ -28,8 +27,6 @@ import (
const (
randomByteSize = 16
defaultOAuthOptionsCount = 3
registerCacheExpiration = time.Minute * 15
registerCacheCleanup = time.Minute * 20
)
var (
@@ -58,9 +55,11 @@ type RegistrationInfo struct {
type AuthProviderOIDC struct {
serverURL string
cfg *types.OIDCConfig
state *state.State
db *db.HSDatabase
registrationCache *zcache.Cache[string, RegistrationInfo]
notifier *notifier.Notifier
ipAlloc *db.IPAllocator
polMan policy.PolicyManager
oidcProvider *oidc.Provider
oauth2Config *oauth2.Config
@@ -70,8 +69,10 @@ func NewAuthProviderOIDC(
ctx context.Context,
serverURL string,
cfg *types.OIDCConfig,
state *state.State,
db *db.HSDatabase,
notif *notifier.Notifier,
ipAlloc *db.IPAllocator,
polMan policy.PolicyManager,
) (*AuthProviderOIDC, error) {
var err error
// grab oidc config if it hasn't been already
@@ -99,9 +100,11 @@ func NewAuthProviderOIDC(
return &AuthProviderOIDC{
serverURL: serverURL,
cfg: cfg,
state: state,
db: db,
registrationCache: registrationCache,
notifier: notif,
ipAlloc: ipAlloc,
polMan: polMan,
oidcProvider: oidcProvider,
oauth2Config: oauth2Config,
@@ -231,14 +234,7 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
return
}
oauth2Token, err := a.getOauth2Token(req.Context(), code, state)
if err != nil {
httpError(writer, err)
return
}
idToken, err := a.extractIDToken(req.Context(), oauth2Token)
idToken, err := a.extractIDToken(req.Context(), code, state)
if err != nil {
httpError(writer, err)
return
@@ -277,55 +273,12 @@ func (a *AuthProviderOIDC) OIDCCallbackHandler(
return
}
var userinfo *oidc.UserInfo
userinfo, err = a.oidcProvider.UserInfo(req.Context(), oauth2.StaticTokenSource(oauth2Token))
user, err := a.createOrUpdateUserFromClaim(&claims)
if err != nil {
util.LogErr(err, "could not get userinfo; only checking claim")
}
// If the userinfo is available, we can check if the subject matches the
// claims, then use some of the userinfo fields to update the user.
// https://openid.net/specs/openid-connect-core-1_0.html#UserInfo
if userinfo != nil && userinfo.Subject == claims.Sub {
claims.Email = cmp.Or(claims.Email, userinfo.Email)
claims.EmailVerified = cmp.Or(claims.EmailVerified, types.FlexibleBoolean(userinfo.EmailVerified))
// The userinfo has some extra fields that we can use to update the user but they are only
// available in the underlying claims struct.
// TODO(kradalby): there might be more interesting fields here that we have not found yet.
var userinfo2 types.OIDCUserInfo
if err := userinfo.Claims(&userinfo2); err == nil {
claims.Username = cmp.Or(claims.Username, userinfo2.PreferredUsername)
claims.Name = cmp.Or(claims.Name, userinfo2.Name)
claims.ProfilePictureURL = cmp.Or(claims.ProfilePictureURL, userinfo2.Picture)
}
}
user, policyChanged, err := a.createOrUpdateUserFromClaim(&claims)
if err != nil {
log.Error().
Err(err).
Caller().
Msgf("could not create or update user")
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
writer.WriteHeader(http.StatusInternalServerError)
_, werr := writer.Write([]byte("Could not create or update user"))
if werr != nil {
log.Error().
Caller().
Err(werr).
Msg("Failed to write response")
}
httpError(writer, err)
return
}
// Send policy update notifications if needed
if policyChanged {
ctx := types.NotifyCtx(context.Background(), "oidc-user-created", user.Name)
a.notifier.NotifyAll(ctx, types.UpdateFull())
}
// TODO(kradalby): Is this comment right?
// If the node exists, then the node should be reauthenticated,
// if the node does not exist, and the machine key exists, then
@@ -380,12 +333,13 @@ func extractCodeAndStateParamFromRequest(
return code, state, nil
}
// getOauth2Token exchanges the code from the callback for an oauth2 token.
func (a *AuthProviderOIDC) getOauth2Token(
// extractIDToken takes the code parameter from the callback
// and extracts the ID token from the oauth2 token.
func (a *AuthProviderOIDC) extractIDToken(
ctx context.Context,
code string,
state string,
) (*oauth2.Token, error) {
) (*oidc.IDToken, error) {
var exchangeOpts []oauth2.AuthCodeOption
if a.cfg.PKCE.Enabled {
@@ -402,14 +356,7 @@ func (a *AuthProviderOIDC) getOauth2Token(
if err != nil {
return nil, NewHTTPError(http.StatusForbidden, "invalid code", fmt.Errorf("could not exchange code for token: %w", err))
}
return oauth2Token, err
}
// extractIDToken extracts the ID token from the oauth2 token.
func (a *AuthProviderOIDC) extractIDToken(
ctx context.Context,
oauth2Token *oauth2.Token,
) (*oidc.IDToken, error) {
rawIDToken, ok := oauth2Token.Extra("id_token").(string)
if !ok {
return nil, NewHTTPError(http.StatusBadRequest, "no id_token", errNoOIDCIDToken)
@@ -487,40 +434,57 @@ func (a *AuthProviderOIDC) getRegistrationIDFromState(state string) *types.Regis
func (a *AuthProviderOIDC) createOrUpdateUserFromClaim(
claims *types.OIDCClaims,
) (*types.User, bool, error) {
) (*types.User, error) {
var user *types.User
var err error
var newUser bool
var policyChanged bool
user, err = a.state.GetUserByOIDCIdentifier(claims.Identifier())
user, err = a.db.GetUserByOIDCIdentifier(claims.Identifier())
if err != nil && !errors.Is(err, db.ErrUserNotFound) {
return nil, false, fmt.Errorf("creating or updating user: %w", err)
return nil, fmt.Errorf("creating or updating user: %w", err)
}
// This check is for legacy, if the user cannot be found by the OIDC identifier
// look it up by username. This should only be needed once.
// This branch will persist for a number of versions after the OIDC migration and
// then be removed following a deprecation.
// TODO(kradalby): Remove when strip_email_domain and migration is removed
// after #2170 is cleaned up.
if a.cfg.MapLegacyUsers && user == nil {
log.Trace().Str("username", claims.Username).Str("sub", claims.Sub).Msg("user not found by OIDC identifier, looking up by username")
if oldUsername, err := getUserName(claims, a.cfg.StripEmaildomain); err == nil {
log.Trace().Str("old_username", oldUsername).Str("sub", claims.Sub).Msg("found username")
user, err = a.db.GetUserByName(oldUsername)
if err != nil && !errors.Is(err, db.ErrUserNotFound) {
return nil, fmt.Errorf("getting user: %w", err)
}
// If the user exists, but it already has a provider identifier (OIDC sub), create a new user.
// This is to prevent users that have already been migrated to the new OIDC format
// to be updated with the new OIDC identifier inexplicitly which might be the cause of an
// account takeover.
if user != nil && user.ProviderIdentifier.Valid {
log.Info().Str("username", claims.Username).Str("sub", claims.Sub).Msg("user found by username, but has provider identifier, creating new user.")
user = &types.User{}
}
}
}
// if the user is still not found, create a new empty user.
if user == nil {
newUser = true
user = &types.User{}
}
user.FromClaim(claims)
if newUser {
user, policyChanged, err = a.state.CreateUser(*user)
if err != nil {
return nil, false, fmt.Errorf("creating user: %w", err)
}
} else {
_, policyChanged, err = a.state.UpdateUser(types.UserID(user.ID), func(u *types.User) error {
*u = *user
return nil
})
if err != nil {
return nil, false, fmt.Errorf("updating user: %w", err)
}
err = a.db.DB.Save(user).Error
if err != nil {
return nil, fmt.Errorf("creating or updating user: %w", err)
}
return user, policyChanged, nil
err = usersChangedHook(a.db, a.polMan, a.notifier)
if err != nil {
return nil, fmt.Errorf("updating resources using user: %w", err)
}
return user, nil
}
func (a *AuthProviderOIDC) handleRegistration(
@@ -528,49 +492,40 @@ func (a *AuthProviderOIDC) handleRegistration(
registrationID types.RegistrationID,
expiry time.Time,
) (bool, error) {
node, newNode, err := a.state.HandleNodeFromAuthPath(
ipv4, ipv6, err := a.ipAlloc.Next()
if err != nil {
return false, err
}
node, newNode, err := a.db.HandleNodeFromAuthPath(
registrationID,
types.UserID(user.ID),
&expiry,
util.RegisterMethodOIDC,
ipv4, ipv6,
)
if err != nil {
return false, fmt.Errorf("could not register node: %w", err)
}
// This is a bit of a back and forth, but we have a bit of a chicken and egg
// dependency here.
// Because the way the policy manager works, we need to have the node
// in the database, then add it to the policy manager and then we can
// approve the route. This means we get this dance where the node is
// first added to the database, then we add it to the policy manager via
// SaveNode (which automatically updates the policy manager) and then we can auto approve the routes.
// As that only approves the struct object, we need to save it again and
// ensure we send an update.
// This works, but might be another good candidate for doing some sort of
// eventbus.
routesChanged := a.state.AutoApproveRoutes(node)
_, policyChanged, err := a.state.SaveNode(node)
// Send an update to all nodes if this is a new node that they need to know
// about.
// If this is a refresh, just send new expiry updates.
updateSent, err := nodesChangedHook(a.db, a.polMan, a.notifier)
if err != nil {
return false, fmt.Errorf("saving auto approved routes to node: %w", err)
return false, fmt.Errorf("updating resources using node: %w", err)
}
// Send policy update notifications if needed (from SaveNode or route changes)
if policyChanged {
ctx := types.NotifyCtx(context.Background(), "oidc-nodes-change", "all")
a.notifier.NotifyAll(ctx, types.UpdateFull())
}
if routesChanged {
if !updateSent {
ctx := types.NotifyCtx(context.Background(), "oidc-expiry-self", node.Hostname)
a.notifier.NotifyByNodeID(
ctx,
types.UpdateSelf(node.ID),
types.StateSelf(node.ID),
node.ID,
)
ctx = types.NotifyCtx(context.Background(), "oidc-expiry-peers", node.Hostname)
a.notifier.NotifyWithIgnore(ctx, types.UpdatePeerChanged(node.ID), node.ID)
a.notifier.NotifyWithIgnore(ctx, types.StateUpdatePeerAdded(node.ID), node.ID)
}
return newNode, nil
@@ -584,7 +539,7 @@ func renderOIDCCallbackTemplate(
) (*bytes.Buffer, error) {
var content bytes.Buffer
if err := oidcCallbackTemplate.Execute(&content, oidcCallbackTemplateConfig{
User: user.Display(),
User: user.DisplayNameOrUsername(),
Verb: verb,
}); err != nil {
return nil, fmt.Errorf("rendering OIDC callback template: %w", err)
@@ -593,6 +548,27 @@ func renderOIDCCallbackTemplate(
return &content, nil
}
// TODO(kradalby): Reintroduce when strip_email_domain is removed
// after #2170 is cleaned up
// DEPRECATED: DO NOT USE.
func getUserName(
claims *types.OIDCClaims,
stripEmaildomain bool,
) (string, error) {
if !claims.EmailVerified {
return "", fmt.Errorf("email not verified")
}
userName, err := util.NormalizeToFQDNRules(
claims.Email,
stripEmaildomain,
)
if err != nil {
return "", err
}
return userName, nil
}
func setCSRFCookie(w http.ResponseWriter, r *http.Request, name string) (string, error) {
val, err := util.GenerateRandomStringURLSafe(64)
if err != nil {

Some files were not shown because too many files have changed in this diff Show More