mirror of
https://github.com/juanfont/headscale.git
synced 2025-08-15 05:37:45 +00:00
Compare commits
28 Commits
v0.18.0-be
...
v0.18.0-be
Author | SHA1 | Date | |
---|---|---|---|
![]() |
2084464225 | ||
![]() |
66ebbf3ecb | ||
![]() |
55a3885614 | ||
![]() |
afae1ff7b6 | ||
![]() |
4de49f5f49 | ||
![]() |
6db9656008 | ||
![]() |
fecb13b24b | ||
![]() |
23a595c26f | ||
![]() |
085912cfb4 | ||
![]() |
7157e14aff | ||
![]() |
4e2c4f92d3 | ||
![]() |
893b0de8fa | ||
![]() |
9b98c3b79f | ||
![]() |
6de26b1d7c | ||
![]() |
1f1931fb00 | ||
![]() |
1f4efbcd3b | ||
![]() |
711fe1d806 | ||
![]() |
e2c62a7b0c | ||
![]() |
ab6565723e | ||
![]() |
7bb6f1a7eb | ||
![]() |
549b82df11 | ||
![]() |
036cdf922f | ||
![]() |
b4ff22935c | ||
![]() |
5feadbf3fc | ||
![]() |
3e9ee816f9 | ||
![]() |
2494e27a73 | ||
![]() |
8e8b65bb84 | ||
![]() |
b7d7fc57c4 |
3
.github/FUNDING.yml
vendored
Normal file
3
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
ko_fi: headscale
|
@@ -9,6 +9,9 @@
|
||||
- Set `db_ssl` to false by default [#1052](https://github.com/juanfont/headscale/pull/1052)
|
||||
- Fix duplicate nodes due to incorrect implementation of the protocol [#1058](https://github.com/juanfont/headscale/pull/1058)
|
||||
- Report if a machine is online in CLI more accurately [#1062](https://github.com/juanfont/headscale/pull/1062)
|
||||
- Added config option for custom DNS records [#1035](https://github.com/juanfont/headscale/pull/1035)
|
||||
- Expire nodes based on OIDC token expiry [#1067](https://github.com/juanfont/headscale/pull/1067)
|
||||
- Remove ephemeral nodes on logout [#1098](https://github.com/juanfont/headscale/pull/1098)
|
||||
|
||||
## 0.17.1 (2022-12-05)
|
||||
|
||||
|
60
app.go
60
app.go
@@ -217,6 +217,15 @@ func (h *Headscale) expireEphemeralNodes(milliSeconds int64) {
|
||||
}
|
||||
}
|
||||
|
||||
// expireExpiredMachines expires machines that have an explicit expiry set
|
||||
// after that expiry time has passed.
|
||||
func (h *Headscale) expireExpiredMachines(milliSeconds int64) {
|
||||
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
|
||||
for range ticker.C {
|
||||
h.expireExpiredMachinesWorker()
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) failoverSubnetRoutes(milliSeconds int64) {
|
||||
ticker := time.NewTicker(time.Duration(milliSeconds) * time.Millisecond)
|
||||
for range ticker.C {
|
||||
@@ -248,8 +257,7 @@ func (h *Headscale) expireEphemeralNodesWorker() {
|
||||
|
||||
expiredFound := false
|
||||
for _, machine := range machines {
|
||||
if machine.AuthKey != nil && machine.LastSeen != nil &&
|
||||
machine.AuthKey.Ephemeral &&
|
||||
if machine.isEphemeral() && machine.LastSeen != nil &&
|
||||
time.Now().
|
||||
After(machine.LastSeen.Add(h.cfg.EphemeralNodeInactivityTimeout)) {
|
||||
expiredFound = true
|
||||
@@ -273,6 +281,53 @@ func (h *Headscale) expireEphemeralNodesWorker() {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) expireExpiredMachinesWorker() {
|
||||
namespaces, err := h.ListNamespaces()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Error listing namespaces")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
for _, namespace := range namespaces {
|
||||
machines, err := h.ListMachinesInNamespace(namespace.Name)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("namespace", namespace.Name).
|
||||
Msg("Error listing machines in namespace")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
expiredFound := false
|
||||
for index, machine := range machines {
|
||||
if machine.isExpired() &&
|
||||
machine.Expiry.After(h.getLastStateChange(namespace)) {
|
||||
expiredFound = true
|
||||
|
||||
err := h.ExpireMachine(&machines[index])
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("machine", machine.Hostname).
|
||||
Str("name", machine.GivenName).
|
||||
Msg("🤮 Cannot expire machine")
|
||||
} else {
|
||||
log.Info().
|
||||
Str("machine", machine.Hostname).
|
||||
Str("name", machine.GivenName).
|
||||
Msg("Machine successfully expired")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if expiredFound {
|
||||
h.setLastStateChangeToNow()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Headscale) grpcAuthenticationInterceptor(ctx context.Context,
|
||||
req interface{},
|
||||
info *grpc.UnaryServerInfo,
|
||||
@@ -494,6 +549,7 @@ func (h *Headscale) Serve() error {
|
||||
}
|
||||
|
||||
go h.expireEphemeralNodes(updateInterval)
|
||||
go h.expireExpiredMachines(updateInterval)
|
||||
|
||||
go h.failoverSubnetRoutes(updateInterval)
|
||||
|
||||
|
@@ -16,10 +16,11 @@ const (
|
||||
errMockOidcClientIDNotDefined = Error("MOCKOIDC_CLIENT_ID not defined")
|
||||
errMockOidcClientSecretNotDefined = Error("MOCKOIDC_CLIENT_SECRET not defined")
|
||||
errMockOidcPortNotDefined = Error("MOCKOIDC_PORT not defined")
|
||||
accessTTL = 10 * time.Minute
|
||||
refreshTTL = 60 * time.Minute
|
||||
)
|
||||
|
||||
var accessTTL = 2 * time.Minute
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(mockOidcCmd)
|
||||
}
|
||||
@@ -54,6 +55,16 @@ func mockOIDC() error {
|
||||
if portStr == "" {
|
||||
return errMockOidcPortNotDefined
|
||||
}
|
||||
accessTTLOverride := os.Getenv("MOCKOIDC_ACCESS_TTL")
|
||||
if accessTTLOverride != "" {
|
||||
newTTL, err := time.ParseDuration(accessTTLOverride)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
accessTTL = newTTL
|
||||
}
|
||||
|
||||
log.Info().Msgf("Access token TTL: %s", accessTTL)
|
||||
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
|
@@ -235,6 +235,17 @@ dns_config:
|
||||
# Search domains to inject.
|
||||
domains: []
|
||||
|
||||
# Extra DNS records
|
||||
# so far only A-records are supported (on the tailscale side)
|
||||
# See https://github.com/juanfont/headscale/blob/main/docs/dns-records.md#Limitations
|
||||
# extra_records:
|
||||
# - name: "grafana.myvpn.example.com"
|
||||
# type: "A"
|
||||
# value: "100.64.0.3"
|
||||
#
|
||||
# # you can also put it in one line
|
||||
# - { name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.3" }
|
||||
|
||||
# Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
|
||||
# Only works if there is at least a nameserver defined.
|
||||
magic_dns: true
|
||||
|
18
config.go
18
config.go
@@ -408,7 +408,7 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.restricted_nameservers") {
|
||||
if len(dnsConfig.Nameservers) > 0 {
|
||||
if len(dnsConfig.Resolvers) > 0 {
|
||||
dnsConfig.Routes = make(map[string][]*dnstype.Resolver)
|
||||
restrictedDNS := viper.GetStringMapStringSlice(
|
||||
"dns_config.restricted_nameservers",
|
||||
@@ -440,7 +440,7 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
|
||||
|
||||
if viper.IsSet("dns_config.domains") {
|
||||
domains := viper.GetStringSlice("dns_config.domains")
|
||||
if len(dnsConfig.Nameservers) > 0 {
|
||||
if len(dnsConfig.Resolvers) > 0 {
|
||||
dnsConfig.Domains = domains
|
||||
} else if domains != nil {
|
||||
log.Warn().
|
||||
@@ -448,6 +448,20 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
|
||||
}
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.extra_records") {
|
||||
var extraRecords []tailcfg.DNSRecord
|
||||
|
||||
err := viper.UnmarshalKey("dns_config.extra_records", &extraRecords)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Str("func", "getDNSConfig").
|
||||
Err(err).
|
||||
Msgf("Could not parse dns_config.extra_records")
|
||||
}
|
||||
|
||||
dnsConfig.ExtraRecords = extraRecords
|
||||
}
|
||||
|
||||
if viper.IsSet("dns_config.magic_dns") {
|
||||
dnsConfig.Proxied = viper.GetBool("dns_config.magic_dns")
|
||||
}
|
||||
|
@@ -12,6 +12,7 @@ please ask on [Discord](https://discord.gg/c84AZQhmpx) instead of opening an Iss
|
||||
- [Running headscale on Linux](running-headscale-linux.md)
|
||||
- [Control headscale remotely](remote-cli.md)
|
||||
- [Using a Windows client with headscale](windows-client.md)
|
||||
- [Configuring OIDC](oidc.md)
|
||||
|
||||
### References
|
||||
|
||||
@@ -29,6 +30,7 @@ written by community members. It is _not_ verified by `headscale` developers.
|
||||
- [Running headscale in a container](running-headscale-container.md)
|
||||
- [Running headscale on OpenBSD](running-headscale-openbsd.md)
|
||||
- [Running headscale behind a reverse proxy](reverse-proxy.md)
|
||||
- [Set Custom DNS records](dns-records.md)
|
||||
|
||||
## Misc
|
||||
|
||||
|
83
docs/dns-records.md
Normal file
83
docs/dns-records.md
Normal file
@@ -0,0 +1,83 @@
|
||||
# Setting custom DNS records
|
||||
|
||||
## Goal
|
||||
|
||||
This documentation has the goal of showing how a user can set custom DNS records with `headscale`s magic dns.
|
||||
An example use case is to serve apps on the same host via a reverse proxy like NGINX, in this case a Prometheus monitoring stack. This allows to nicely access the service with "http://grafana.myvpn.example.com" instead of the hostname and portnum combination "http://hostname-in-magic-dns.myvpn.example.com:3000".
|
||||
|
||||
## Setup
|
||||
|
||||
### 1. Change the configuration
|
||||
|
||||
1. Change the `config.yaml` to contain the desired records like so:
|
||||
|
||||
```yaml
|
||||
dns_config:
|
||||
...
|
||||
extra_records:
|
||||
- name: "prometheus.myvpn.example.com"
|
||||
type: "A"
|
||||
value: "100.64.0.3"
|
||||
|
||||
- name: "grafana.myvpn.example.com"
|
||||
type: "A"
|
||||
value: "100.64.0.3"
|
||||
...
|
||||
```
|
||||
|
||||
2. Restart your headscale instance.
|
||||
|
||||
Beware of the limitations listed later on!
|
||||
|
||||
### 2. Verify that the records are set
|
||||
|
||||
You can use a DNS querying tool of your choice on one of your hosts to verify that your newly set records are actually available in MagicDNS, here we used [`dig`](https://man.archlinux.org/man/dig.1.en):
|
||||
|
||||
```
|
||||
$ dig grafana.myvpn.example.com
|
||||
|
||||
; <<>> DiG 9.18.10 <<>> grafana.myvpn.example.com
|
||||
;; global options: +cmd
|
||||
;; Got answer:
|
||||
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 44054
|
||||
;; flags: qr rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1
|
||||
|
||||
;; OPT PSEUDOSECTION:
|
||||
; EDNS: version: 0, flags:; udp: 65494
|
||||
;; QUESTION SECTION:
|
||||
;grafana.myvpn.example.com. IN A
|
||||
|
||||
;; ANSWER SECTION:
|
||||
grafana.myvpn.example.com. 593 IN A 100.64.0.3
|
||||
|
||||
;; Query time: 0 msec
|
||||
;; SERVER: 127.0.0.53#53(127.0.0.53) (UDP)
|
||||
;; WHEN: Sat Dec 31 11:46:55 CET 2022
|
||||
;; MSG SIZE rcvd: 66
|
||||
```
|
||||
|
||||
### 3. Optional: Setup the reverse proxy
|
||||
|
||||
The motivating example here was to be able to access internal monitoring services on the same host without specifying a port:
|
||||
|
||||
```
|
||||
server {
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
|
||||
server_name grafana.myvpn.example.com;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:3000;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
## Limitations
|
||||
|
||||
[Not all types of records are supported](https://github.com/tailscale/tailscale/blob/main/ipn/ipnlocal/local.go#L2891-L2909), especially no CNAME records.
|
137
docs/oidc.md
Normal file
137
docs/oidc.md
Normal file
@@ -0,0 +1,137 @@
|
||||
# Configuring Headscale to use OIDC authentication
|
||||
|
||||
In order to authenticate users through a centralized solution one must enable the OIDC integration.
|
||||
|
||||
Known limitations:
|
||||
|
||||
- No dynamic ACL support
|
||||
- OIDC groups cannot be used in ACLs
|
||||
|
||||
## Basic configuration
|
||||
|
||||
In your `config.yaml`, customize this to your liking:
|
||||
|
||||
```yaml
|
||||
oidc:
|
||||
# Block further startup until the OIDC provider is healthy and available
|
||||
only_start_if_oidc_is_available: true
|
||||
# Specified by your OIDC provider
|
||||
issuer: "https://your-oidc.issuer.com/path"
|
||||
# Specified/generated by your OIDC provider
|
||||
client_id: "your-oidc-client-id"
|
||||
client_secret: "your-oidc-client-secret"
|
||||
|
||||
# Customize the scopes used in the OIDC flow, defaults to "openid", "profile" and "email" and add custom query
|
||||
# parameters to the Authorize Endpoint request. Scopes default to "openid", "profile" and "email".
|
||||
scope: ["openid", "profile", "email", "custom"]
|
||||
# Optional: Passed on to the browser login request – used to tweak behaviour for the OIDC provider
|
||||
extra_params:
|
||||
domain_hint: example.com
|
||||
|
||||
# Optional: List allowed principal domains and/or users. If an authenticated user's domain is not in this list,
|
||||
# the authentication request will be rejected.
|
||||
allowed_domains:
|
||||
- example.com
|
||||
# Optional. Note that groups from Keycloak have a leading '/'.
|
||||
allowed_groups:
|
||||
- /headscale
|
||||
# Optional.
|
||||
allowed_users:
|
||||
- alice@example.com
|
||||
|
||||
# If `strip_email_domain` is set to `true`, the domain part of the username email address will be removed.
|
||||
# This will transform `first-name.last-name@example.com` to the namespace `first-name.last-name`
|
||||
# If `strip_email_domain` is set to `false` the domain part will NOT be removed resulting to the following
|
||||
# namespace: `first-name.last-name.example.com`
|
||||
strip_email_domain: true
|
||||
```
|
||||
|
||||
## Azure AD example
|
||||
|
||||
In order to integrate Headscale with Azure Active Directory, we'll need to provision an App Registration with the correct scopes and redirect URI. Here with Terraform:
|
||||
|
||||
```hcl
|
||||
resource "azuread_application" "headscale" {
|
||||
display_name = "Headscale"
|
||||
|
||||
sign_in_audience = "AzureADMyOrg"
|
||||
fallback_public_client_enabled = false
|
||||
|
||||
required_resource_access {
|
||||
// Microsoft Graph
|
||||
resource_app_id = "00000003-0000-0000-c000-000000000000"
|
||||
|
||||
resource_access {
|
||||
// scope: profile
|
||||
id = "14dad69e-099b-42c9-810b-d002981feec1"
|
||||
type = "Scope"
|
||||
}
|
||||
resource_access {
|
||||
// scope: openid
|
||||
id = "37f7f235-527c-4136-accd-4a02d197296e"
|
||||
type = "Scope"
|
||||
}
|
||||
resource_access {
|
||||
// scope: email
|
||||
id = "64a6cdd6-aab1-4aaf-94b8-3cc8405e90d0"
|
||||
type = "Scope"
|
||||
}
|
||||
}
|
||||
web {
|
||||
# Points at your running Headscale instance
|
||||
redirect_uris = ["https://headscale.example.com/oidc/callback"]
|
||||
|
||||
implicit_grant {
|
||||
access_token_issuance_enabled = false
|
||||
id_token_issuance_enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
group_membership_claims = ["SecurityGroup"]
|
||||
optional_claims {
|
||||
# Expose group memberships
|
||||
id_token {
|
||||
name = "groups"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "azuread_application_password" "headscale-application-secret" {
|
||||
display_name = "Headscale Server"
|
||||
application_object_id = azuread_application.headscale.object_id
|
||||
}
|
||||
|
||||
resource "azuread_service_principal" "headscale" {
|
||||
application_id = azuread_application.headscale.application_id
|
||||
}
|
||||
|
||||
resource "azuread_service_principal_password" "headscale" {
|
||||
service_principal_id = azuread_service_principal.headscale.id
|
||||
end_date_relative = "44640h"
|
||||
}
|
||||
|
||||
output "headscale_client_id" {
|
||||
value = azuread_application.headscale.application_id
|
||||
}
|
||||
|
||||
output "headscale_client_secret" {
|
||||
value = azuread_application_password.headscale-application-secret.value
|
||||
}
|
||||
```
|
||||
|
||||
And in your Headscale `config.yaml`:
|
||||
|
||||
```yaml
|
||||
oidc:
|
||||
issuer: "https://login.microsoftonline.com/<tenant-UUID>/v2.0"
|
||||
client_id: "<client-id-from-terraform>"
|
||||
client_secret: "<client-secret-from-terraform>"
|
||||
|
||||
# Optional: add "groups"
|
||||
scope: ["openid", "profile", "email"]
|
||||
extra_params:
|
||||
# Use your own domain, associated with Azure AD
|
||||
domain_hint: example.com
|
||||
# Optional: Force the Azure AD account picker
|
||||
prompt: select_account
|
||||
```
|
@@ -98,3 +98,17 @@ spec:
|
||||
upgrade_configs:
|
||||
- upgrade_type: tailscale-control-protocol
|
||||
```
|
||||
|
||||
## Caddy
|
||||
|
||||
The following Caddyfile is all that is necessary to use Caddy as a reverse proxy for headscale, in combination with the `config.yaml` specifications above to disable headscale's built in TLS. Replace values as necessary - `<YOUR_SERVER_NAME>` should be the FQDN at which headscale will be served, and `<IP:PORT>` should be the IP address and port where headscale is running. In most cases, this will be `localhost:8080`.
|
||||
|
||||
```
|
||||
<YOUR_SERVER_NAME> {
|
||||
reverse_proxy <IP:PORT>
|
||||
}
|
||||
```
|
||||
|
||||
Caddy v2 will [automatically](https://caddyserver.com/docs/automatic-https) provision a certficate for your domain/subdomain, force HTTPS, and proxy websockets - no further configuration is necessary.
|
||||
|
||||
For a slightly more complex configuration which utilizes Docker containers to manage Caddy, Headscale, and Headscale-UI, [Guru Computing's guide](https://blog.gurucomputing.com.au/smart-vpns-with-headscale/) is an excellent reference.
|
||||
|
@@ -176,6 +176,7 @@ func (api headscaleV1APIServer) RegisterMachine(
|
||||
machine, err := api.h.RegisterMachineFromAuthCallback(
|
||||
request.GetKey(),
|
||||
request.GetNamespace(),
|
||||
nil,
|
||||
RegisterMethodCLI,
|
||||
)
|
||||
if err != nil {
|
||||
|
@@ -9,8 +9,10 @@ import (
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale"
|
||||
"github.com/juanfont/headscale/integration/dockertestutil"
|
||||
@@ -22,7 +24,7 @@ import (
|
||||
const (
|
||||
dockerContextPath = "../."
|
||||
hsicOIDCMockHashLength = 6
|
||||
oidcServerPort = 10000
|
||||
defaultAccessTTL = 10 * time.Minute
|
||||
)
|
||||
|
||||
var errStatusCodeNotOK = errors.New("status code not OK")
|
||||
@@ -50,7 +52,7 @@ func TestOIDCAuthenticationPingAll(t *testing.T) {
|
||||
"namespace1": len(TailscaleVersions),
|
||||
}
|
||||
|
||||
oidcConfig, err := scenario.runMockOIDC()
|
||||
oidcConfig, err := scenario.runMockOIDC(defaultAccessTTL)
|
||||
if err != nil {
|
||||
t.Errorf("failed to run mock OIDC server: %s", err)
|
||||
}
|
||||
@@ -87,20 +89,76 @@ func TestOIDCAuthenticationPingAll(t *testing.T) {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
success := 0
|
||||
success := pingAll(t, allClients, allIps)
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
for _, client := range allClients {
|
||||
for _, ip := range allIps {
|
||||
err := client.Ping(ip.String())
|
||||
if err != nil {
|
||||
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
|
||||
} else {
|
||||
success++
|
||||
}
|
||||
}
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOIDCExpireNodes(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
shortAccessTTL := 5 * time.Minute
|
||||
|
||||
baseScenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
scenario := AuthOIDCScenario{
|
||||
Scenario: baseScenario,
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"namespace1": len(TailscaleVersions),
|
||||
}
|
||||
|
||||
oidcConfig, err := scenario.runMockOIDC(shortAccessTTL)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run mock OIDC server: %s", err)
|
||||
}
|
||||
|
||||
oidcMap := map[string]string{
|
||||
"HEADSCALE_OIDC_ISSUER": oidcConfig.Issuer,
|
||||
"HEADSCALE_OIDC_CLIENT_ID": oidcConfig.ClientID,
|
||||
"HEADSCALE_OIDC_CLIENT_SECRET": oidcConfig.ClientSecret,
|
||||
"HEADSCALE_OIDC_STRIP_EMAIL_DOMAIN": fmt.Sprintf("%t", oidcConfig.StripEmaildomain),
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(
|
||||
spec,
|
||||
hsic.WithTestName("oidcexpirenodes"),
|
||||
hsic.WithConfigEnv(oidcMap),
|
||||
hsic.WithHostnameAsServerURL(),
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
success := pingAll(t, allClients, allIps)
|
||||
t.Logf("%d successful pings out of %d (before expiry)", success, len(allClients)*len(allIps))
|
||||
|
||||
// await all nodes being logged out after OIDC token expiry
|
||||
scenario.WaitForTailscaleLogout()
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
@@ -143,7 +201,13 @@ func (s *AuthOIDCScenario) CreateHeadscaleEnv(
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *AuthOIDCScenario) runMockOIDC() (*headscale.OIDCConfig, error) {
|
||||
func (s *AuthOIDCScenario) runMockOIDC(accessTTL time.Duration) (*headscale.OIDCConfig, error) {
|
||||
port, err := dockertestutil.RandomFreeHostPort()
|
||||
if err != nil {
|
||||
log.Fatalf("could not find an open port: %s", err)
|
||||
}
|
||||
portNotation := fmt.Sprintf("%d/tcp", port)
|
||||
|
||||
hash, _ := headscale.GenerateRandomStringDNSSafe(hsicOIDCMockHashLength)
|
||||
|
||||
hostname := fmt.Sprintf("hs-oidcmock-%s", hash)
|
||||
@@ -151,16 +215,17 @@ func (s *AuthOIDCScenario) runMockOIDC() (*headscale.OIDCConfig, error) {
|
||||
mockOidcOptions := &dockertest.RunOptions{
|
||||
Name: hostname,
|
||||
Cmd: []string{"headscale", "mockoidc"},
|
||||
ExposedPorts: []string{"10000/tcp"},
|
||||
ExposedPorts: []string{portNotation},
|
||||
PortBindings: map[docker.Port][]docker.PortBinding{
|
||||
"10000/tcp": {{HostPort: "10000"}},
|
||||
docker.Port(portNotation): {{HostPort: strconv.Itoa(port)}},
|
||||
},
|
||||
Networks: []*dockertest.Network{s.Scenario.network},
|
||||
Env: []string{
|
||||
fmt.Sprintf("MOCKOIDC_ADDR=%s", hostname),
|
||||
"MOCKOIDC_PORT=10000",
|
||||
fmt.Sprintf("MOCKOIDC_PORT=%d", port),
|
||||
"MOCKOIDC_CLIENT_ID=superclient",
|
||||
"MOCKOIDC_CLIENT_SECRET=supersecret",
|
||||
fmt.Sprintf("MOCKOIDC_ACCESS_TTL=%s", accessTTL.String()),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -169,7 +234,7 @@ func (s *AuthOIDCScenario) runMockOIDC() (*headscale.OIDCConfig, error) {
|
||||
ContextDir: dockerContextPath,
|
||||
}
|
||||
|
||||
err := s.pool.RemoveContainerByName(hostname)
|
||||
err = s.pool.RemoveContainerByName(hostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -184,11 +249,7 @@ func (s *AuthOIDCScenario) runMockOIDC() (*headscale.OIDCConfig, error) {
|
||||
}
|
||||
|
||||
log.Println("Waiting for headscale mock oidc to be ready for tests")
|
||||
hostEndpoint := fmt.Sprintf(
|
||||
"%s:%s",
|
||||
s.mockOIDC.GetIPInNetwork(s.network),
|
||||
s.mockOIDC.GetPort(fmt.Sprintf("%d/tcp", oidcServerPort)),
|
||||
)
|
||||
hostEndpoint := fmt.Sprintf("%s:%d", s.mockOIDC.GetIPInNetwork(s.network), port)
|
||||
|
||||
if err := s.pool.Retry(func() error {
|
||||
oidcConfigURL := fmt.Sprintf("http://%s/oidc/.well-known/openid-configuration", hostEndpoint)
|
||||
@@ -215,11 +276,11 @@ func (s *AuthOIDCScenario) runMockOIDC() (*headscale.OIDCConfig, error) {
|
||||
log.Printf("headscale mock oidc is ready for tests at %s", hostEndpoint)
|
||||
|
||||
return &headscale.OIDCConfig{
|
||||
Issuer: fmt.Sprintf("http://%s/oidc",
|
||||
net.JoinHostPort(s.mockOIDC.GetIPInNetwork(s.network), strconv.Itoa(oidcServerPort))),
|
||||
ClientID: "superclient",
|
||||
ClientSecret: "supersecret",
|
||||
StripEmaildomain: true,
|
||||
Issuer: fmt.Sprintf("http://%s/oidc", net.JoinHostPort(s.mockOIDC.GetIPInNetwork(s.network), strconv.Itoa(port))),
|
||||
ClientID: "superclient",
|
||||
ClientSecret: "supersecret",
|
||||
StripEmaildomain: true,
|
||||
OnlyStartIfOIDCIsAvailable: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -292,6 +353,24 @@ func (s *AuthOIDCScenario) runTailscaleUp(
|
||||
return fmt.Errorf("failed to up tailscale node: %w", errNoNamespaceAvailable)
|
||||
}
|
||||
|
||||
func pingAll(t *testing.T, clients []TailscaleClient, ips []netip.Addr) int {
|
||||
t.Helper()
|
||||
success := 0
|
||||
|
||||
for _, client := range clients {
|
||||
for _, ip := range ips {
|
||||
err := client.Ping(ip.String())
|
||||
if err != nil {
|
||||
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
|
||||
} else {
|
||||
success++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return success
|
||||
}
|
||||
|
||||
func (s *AuthOIDCScenario) Shutdown() error {
|
||||
err := s.pool.Purge(s.mockOIDC)
|
||||
if err != nil {
|
||||
|
@@ -141,13 +141,13 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, client := range allClients {
|
||||
_, _, err = client.Execute([]string{"tailscale", "logout"})
|
||||
err := client.Logout()
|
||||
if err != nil {
|
||||
t.Errorf("failed to logout client %s: %s", client.Hostname(), err)
|
||||
}
|
||||
}
|
||||
|
||||
scenario.waitForTailscaleLogout()
|
||||
scenario.WaitForTailscaleLogout()
|
||||
|
||||
t.Logf("all clients logged out")
|
||||
|
||||
@@ -259,22 +259,6 @@ func (s *AuthWebFlowScenario) CreateHeadscaleEnv(
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *AuthWebFlowScenario) waitForTailscaleLogout() {
|
||||
for _, namespace := range s.namespaces {
|
||||
for _, client := range namespace.Clients {
|
||||
namespace.syncWaitGroup.Add(1)
|
||||
|
||||
go func(c TailscaleClient) {
|
||||
defer namespace.syncWaitGroup.Done()
|
||||
|
||||
// TODO(kradalby): error handle this
|
||||
_ = c.WaitForLogout()
|
||||
}(client)
|
||||
}
|
||||
namespace.syncWaitGroup.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *AuthWebFlowScenario) runTailscaleUp(
|
||||
namespaceStr, loginServer string,
|
||||
) error {
|
||||
|
@@ -11,7 +11,7 @@ type ControlServer interface {
|
||||
GetEndpoint() string
|
||||
WaitForReady() error
|
||||
CreateNamespace(namespace string) error
|
||||
CreateAuthKey(namespace string) (*v1.PreAuthKey, error)
|
||||
CreateAuthKey(namespace string, reusable bool, ephemeral bool) (*v1.PreAuthKey, error)
|
||||
ListMachinesInNamespace(namespace string) ([]*v1.Machine, error)
|
||||
GetCert() []byte
|
||||
GetHostname() string
|
||||
|
@@ -2,6 +2,7 @@ package dockertestutil
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
@@ -60,3 +61,20 @@ func AddContainerToNetwork(
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RandomFreeHostPort asks the kernel for a free open port that is ready to use.
|
||||
// (from https://github.com/phayes/freeport)
|
||||
func RandomFreeHostPort() (int, error) {
|
||||
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
listener, err := net.ListenTCP("tcp", addr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer listener.Close()
|
||||
//nolint:forcetypeassert
|
||||
return listener.Addr().(*net.TCPAddr).Port, nil
|
||||
}
|
||||
|
@@ -2,6 +2,7 @@ package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -66,6 +67,239 @@ func TestPingAllByIP(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAuthKeyLogoutAndRelogin(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"namespace1": len(TailscaleVersions),
|
||||
"namespace2": len(TailscaleVersions),
|
||||
}
|
||||
|
||||
err = scenario.CreateHeadscaleEnv(spec, []tsic.Option{}, hsic.WithTestName("pingallbyip"))
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
clientIPs := make(map[TailscaleClient][]netip.Addr)
|
||||
for _, client := range allClients {
|
||||
ips, err := client.IPs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get IPs for client %s: %s", client.Hostname(), err)
|
||||
}
|
||||
clientIPs[client] = ips
|
||||
}
|
||||
|
||||
for _, client := range allClients {
|
||||
err := client.Logout()
|
||||
if err != nil {
|
||||
t.Errorf("failed to logout client %s: %s", client.Hostname(), err)
|
||||
}
|
||||
}
|
||||
|
||||
scenario.WaitForTailscaleLogout()
|
||||
|
||||
t.Logf("all clients logged out")
|
||||
|
||||
headscale, err := scenario.Headscale()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get headscale server: %s", err)
|
||||
}
|
||||
|
||||
for namespaceName := range spec {
|
||||
key, err := scenario.CreatePreAuthKey(namespaceName, true, false)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create pre-auth key for namespace %s: %s", namespaceName, err)
|
||||
}
|
||||
|
||||
err = scenario.RunTailscaleUp(namespaceName, headscale.GetEndpoint(), key.GetKey())
|
||||
if err != nil {
|
||||
t.Errorf("failed to run tailscale up for namespace %s: %s", namespaceName, err)
|
||||
}
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
allClients, err = scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
success := 0
|
||||
for _, client := range allClients {
|
||||
for _, ip := range allIps {
|
||||
err := client.Ping(ip.String())
|
||||
if err != nil {
|
||||
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
|
||||
} else {
|
||||
success++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
for _, client := range allClients {
|
||||
ips, err := client.IPs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get IPs for client %s: %s", client.Hostname(), err)
|
||||
}
|
||||
|
||||
// lets check if the IPs are the same
|
||||
if len(ips) != len(clientIPs[client]) {
|
||||
t.Errorf("IPs changed for client %s", client.Hostname())
|
||||
}
|
||||
|
||||
for _, ip := range ips {
|
||||
found := false
|
||||
for _, oldIP := range clientIPs[client] {
|
||||
if ip == oldIP {
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
t.Errorf("IPs changed for client %s. Used to be %v now %v", client.Hostname(), clientIPs[client], ips)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("all clients IPs are the same")
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEphemeral(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario, err := NewScenario()
|
||||
if err != nil {
|
||||
t.Errorf("failed to create scenario: %s", err)
|
||||
}
|
||||
|
||||
spec := map[string]int{
|
||||
"namespace1": len(TailscaleVersions),
|
||||
"namespace2": len(TailscaleVersions),
|
||||
}
|
||||
|
||||
headscale, err := scenario.Headscale(hsic.WithTestName("ephemeral"))
|
||||
if err != nil {
|
||||
t.Errorf("failed to create headscale environment: %s", err)
|
||||
}
|
||||
|
||||
for namespaceName, clientCount := range spec {
|
||||
err = scenario.CreateNamespace(namespaceName)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create namespace %s: %s", namespaceName, err)
|
||||
}
|
||||
|
||||
err = scenario.CreateTailscaleNodesInNamespace(namespaceName, "all", clientCount, []tsic.Option{}...)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create tailscale nodes in namespace %s: %s", namespaceName, err)
|
||||
}
|
||||
|
||||
key, err := scenario.CreatePreAuthKey(namespaceName, true, true)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create pre-auth key for namespace %s: %s", namespaceName, err)
|
||||
}
|
||||
|
||||
err = scenario.RunTailscaleUp(namespaceName, headscale.GetEndpoint(), key.GetKey())
|
||||
if err != nil {
|
||||
t.Errorf("failed to run tailscale up for namespace %s: %s", namespaceName, err)
|
||||
}
|
||||
}
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
if err != nil {
|
||||
t.Errorf("failed wait for tailscale clients to be in sync: %s", err)
|
||||
}
|
||||
|
||||
allClients, err := scenario.ListTailscaleClients()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
allIps, err := scenario.ListTailscaleClientsIPs()
|
||||
if err != nil {
|
||||
t.Errorf("failed to get clients: %s", err)
|
||||
}
|
||||
|
||||
success := 0
|
||||
for _, client := range allClients {
|
||||
for _, ip := range allIps {
|
||||
err := client.Ping(ip.String())
|
||||
if err != nil {
|
||||
t.Errorf("failed to ping %s from %s: %s", ip, client.Hostname(), err)
|
||||
} else {
|
||||
success++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
for _, client := range allClients {
|
||||
err := client.Logout()
|
||||
if err != nil {
|
||||
t.Errorf("failed to logout client %s: %s", client.Hostname(), err)
|
||||
}
|
||||
}
|
||||
|
||||
scenario.WaitForTailscaleLogout()
|
||||
|
||||
t.Logf("all clients logged out")
|
||||
|
||||
for namespaceName := range spec {
|
||||
machines, err := headscale.ListMachinesInNamespace(namespaceName)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("namespace", namespaceName).
|
||||
Msg("Error listing machines in namespace")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if len(machines) != 0 {
|
||||
t.Errorf("expected no machines, got %d in namespace %s", len(machines), namespaceName)
|
||||
}
|
||||
}
|
||||
|
||||
err = scenario.Shutdown()
|
||||
if err != nil {
|
||||
t.Errorf("failed to tear down scenario: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPingAllByHostname(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
@@ -316,6 +316,8 @@ func (t *HeadscaleInContainer) CreateNamespace(
|
||||
|
||||
func (t *HeadscaleInContainer) CreateAuthKey(
|
||||
namespace string,
|
||||
reusable bool,
|
||||
ephemeral bool,
|
||||
) (*v1.PreAuthKey, error) {
|
||||
command := []string{
|
||||
"headscale",
|
||||
@@ -323,13 +325,20 @@ func (t *HeadscaleInContainer) CreateAuthKey(
|
||||
namespace,
|
||||
"preauthkeys",
|
||||
"create",
|
||||
"--reusable",
|
||||
"--expiration",
|
||||
"24h",
|
||||
"--output",
|
||||
"json",
|
||||
}
|
||||
|
||||
if reusable {
|
||||
command = append(command, "--reusable")
|
||||
}
|
||||
|
||||
if ephemeral {
|
||||
command = append(command, "--ephemeral")
|
||||
}
|
||||
|
||||
result, _, err := dockertestutil.ExecuteCommand(
|
||||
t.container,
|
||||
command,
|
||||
|
@@ -43,11 +43,11 @@ var (
|
||||
"1.24.2",
|
||||
"1.22.2",
|
||||
"1.20.4",
|
||||
"1.18.2",
|
||||
}
|
||||
|
||||
// tailscaleVersionsUnavailable = []string{
|
||||
// // These versions seem to fail when fetching from apt.
|
||||
// "1.18.2",
|
||||
// "1.16.2",
|
||||
// "1.14.6",
|
||||
// "1.12.4",
|
||||
@@ -194,9 +194,9 @@ func (s *Scenario) Headscale(opts ...hsic.Option) (ControlServer, error) {
|
||||
return headscale, nil
|
||||
}
|
||||
|
||||
func (s *Scenario) CreatePreAuthKey(namespace string) (*v1.PreAuthKey, error) {
|
||||
func (s *Scenario) CreatePreAuthKey(namespace string, reusable bool, ephemeral bool) (*v1.PreAuthKey, error) {
|
||||
if headscale, err := s.Headscale(); err == nil {
|
||||
key, err := headscale.CreateAuthKey(namespace)
|
||||
key, err := headscale.CreateAuthKey(namespace, reusable, ephemeral)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create namespace: %w", err)
|
||||
}
|
||||
@@ -368,7 +368,7 @@ func (s *Scenario) CreateHeadscaleEnv(
|
||||
return err
|
||||
}
|
||||
|
||||
key, err := s.CreatePreAuthKey(namespaceName)
|
||||
key, err := s.CreatePreAuthKey(namespaceName, true, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -469,3 +469,19 @@ func (s *Scenario) ListTailscaleClientsFQDNs(namespaces ...string) ([]string, er
|
||||
|
||||
return allFQDNs, nil
|
||||
}
|
||||
|
||||
func (s *Scenario) WaitForTailscaleLogout() {
|
||||
for _, namespace := range s.namespaces {
|
||||
for _, client := range namespace.Clients {
|
||||
namespace.syncWaitGroup.Add(1)
|
||||
|
||||
go func(c TailscaleClient) {
|
||||
defer namespace.syncWaitGroup.Done()
|
||||
|
||||
// TODO(kradalby): error handle this
|
||||
_ = c.WaitForLogout()
|
||||
}(client)
|
||||
}
|
||||
namespace.syncWaitGroup.Wait()
|
||||
}
|
||||
}
|
||||
|
@@ -62,7 +62,7 @@ func TestHeadscale(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("create-auth-key", func(t *testing.T) {
|
||||
_, err := scenario.CreatePreAuthKey(namespace)
|
||||
_, err := scenario.CreatePreAuthKey(namespace, true, false)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create preauthkey: %s", err)
|
||||
}
|
||||
@@ -166,7 +166,7 @@ func TestTailscaleNodesJoiningHeadcale(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("join-headscale", func(t *testing.T) {
|
||||
key, err := scenario.CreatePreAuthKey(namespace)
|
||||
key, err := scenario.CreatePreAuthKey(namespace, true, false)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create preauthkey: %s", err)
|
||||
}
|
||||
|
@@ -15,6 +15,7 @@ type TailscaleClient interface {
|
||||
Execute(command []string) (string, string, error)
|
||||
Up(loginServer, authKey string) error
|
||||
UpWithLoginURL(loginServer string) (*url.URL, error)
|
||||
Logout() error
|
||||
IPs() ([]netip.Addr, error)
|
||||
FQDN() (string, error)
|
||||
Status() (*ipnstate.Status, error)
|
||||
|
@@ -270,6 +270,15 @@ func (t *TailscaleInContainer) UpWithLoginURL(
|
||||
return loginURL, nil
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) Logout() error {
|
||||
_, _, err := t.Execute([]string{"tailscale", "logout"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TailscaleInContainer) IPs() ([]netip.Addr, error) {
|
||||
if t.ips != nil && len(t.ips) != 0 {
|
||||
return t.ips, nil
|
||||
|
31
machine.go
31
machine.go
@@ -153,9 +153,15 @@ func (machine *Machine) isOnline() bool {
|
||||
return machine.LastSeen.After(time.Now().Add(-keepAliveInterval))
|
||||
}
|
||||
|
||||
// isEphemeral returns if the machine is registered as an Ephemeral node.
|
||||
// https://tailscale.com/kb/1111/ephemeral-nodes/
|
||||
func (machine *Machine) isEphemeral() bool {
|
||||
return machine.AuthKey != nil && machine.AuthKey.Ephemeral
|
||||
}
|
||||
|
||||
func containsAddresses(inputs []string, addrs []string) bool {
|
||||
for _, addr := range addrs {
|
||||
if contains(inputs, addr) {
|
||||
if containsStr(inputs, addr) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -386,7 +392,7 @@ func (h *Headscale) GetMachineByGivenName(namespace string, givenName string) (*
|
||||
// GetMachineByID finds a Machine by ID and returns the Machine struct.
|
||||
func (h *Headscale) GetMachineByID(id uint64) (*Machine, error) {
|
||||
m := Machine{}
|
||||
if result := h.db.Preload("Namespace").Find(&Machine{ID: id}).First(&m); result.Error != nil {
|
||||
if result := h.db.Preload("AuthKey").Preload("Namespace").Find(&Machine{ID: id}).First(&m); result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
@@ -398,7 +404,7 @@ func (h *Headscale) GetMachineByMachineKey(
|
||||
machineKey key.MachinePublic,
|
||||
) (*Machine, error) {
|
||||
m := Machine{}
|
||||
if result := h.db.Preload("Namespace").First(&m, "machine_key = ?", MachinePublicKeyStripPrefix(machineKey)); result.Error != nil {
|
||||
if result := h.db.Preload("AuthKey").Preload("Namespace").First(&m, "machine_key = ?", MachinePublicKeyStripPrefix(machineKey)); result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
|
||||
@@ -410,7 +416,7 @@ func (h *Headscale) GetMachineByNodeKey(
|
||||
nodeKey key.NodePublic,
|
||||
) (*Machine, error) {
|
||||
machine := Machine{}
|
||||
if result := h.db.Preload("Namespace").First(&machine, "node_key = ?",
|
||||
if result := h.db.Preload("AuthKey").Preload("Namespace").First(&machine, "node_key = ?",
|
||||
NodePublicKeyStripPrefix(nodeKey)); result.Error != nil {
|
||||
return nil, result.Error
|
||||
}
|
||||
@@ -423,7 +429,7 @@ func (h *Headscale) GetMachineByAnyKey(
|
||||
machineKey key.MachinePublic, nodeKey key.NodePublic, oldNodeKey key.NodePublic,
|
||||
) (*Machine, error) {
|
||||
machine := Machine{}
|
||||
if result := h.db.Preload("Namespace").First(&machine, "machine_key = ? OR node_key = ? OR node_key = ?",
|
||||
if result := h.db.Preload("AuthKey").Preload("Namespace").First(&machine, "machine_key = ? OR node_key = ? OR node_key = ?",
|
||||
MachinePublicKeyStripPrefix(machineKey),
|
||||
NodePublicKeyStripPrefix(nodeKey),
|
||||
NodePublicKeyStripPrefix(oldNodeKey)); result.Error != nil {
|
||||
@@ -683,7 +689,15 @@ func (h *Headscale) toNode(
|
||||
}
|
||||
primaryPrefixes := Routes(primaryRoutes).toPrefixes()
|
||||
|
||||
allowedIPs = append(allowedIPs, primaryPrefixes...)
|
||||
machineRoutes, err := h.GetMachineRoutes(&machine)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, route := range machineRoutes {
|
||||
if route.Enabled && (route.IsPrimary || route.isExitRoute()) {
|
||||
allowedIPs = append(allowedIPs, netip.Prefix(route.Prefix))
|
||||
}
|
||||
}
|
||||
|
||||
var derp string
|
||||
if machine.HostInfo.NetInfo != nil {
|
||||
@@ -844,6 +858,7 @@ func getTags(
|
||||
func (h *Headscale) RegisterMachineFromAuthCallback(
|
||||
nodeKeyStr string,
|
||||
namespaceName string,
|
||||
machineExpiry *time.Time,
|
||||
registrationMethod string,
|
||||
) (*Machine, error) {
|
||||
nodeKey := key.NodePublic{}
|
||||
@@ -877,6 +892,10 @@ func (h *Headscale) RegisterMachineFromAuthCallback(
|
||||
registrationMachine.NamespaceID = namespace.ID
|
||||
registrationMachine.RegisterMethod = registrationMethod
|
||||
|
||||
if machineExpiry != nil {
|
||||
registrationMachine.Expiry = machineExpiry
|
||||
}
|
||||
|
||||
machine, err := h.RegisterMachine(
|
||||
registrationMachine,
|
||||
)
|
||||
|
4
oidc.go
4
oidc.go
@@ -236,7 +236,7 @@ func (h *Headscale) OIDCCallback(
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.registerMachineForOIDCCallback(writer, namespace, nodeKey); err != nil {
|
||||
if err := h.registerMachineForOIDCCallback(writer, namespace, nodeKey, idToken.Expiry); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -679,10 +679,12 @@ func (h *Headscale) registerMachineForOIDCCallback(
|
||||
writer http.ResponseWriter,
|
||||
namespace *Namespace,
|
||||
nodeKey *key.NodePublic,
|
||||
expiry time.Time,
|
||||
) error {
|
||||
if _, err := h.RegisterMachineFromAuthCallback(
|
||||
nodeKey.String(),
|
||||
namespace.Name,
|
||||
&expiry,
|
||||
RegisterMethodOIDC,
|
||||
); err != nil {
|
||||
log.Error().
|
||||
|
@@ -255,7 +255,7 @@ func (h *Headscale) ApplePlatformConfig(
|
||||
writer.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
writer.WriteHeader(http.StatusBadRequest)
|
||||
_, err := writer.Write(
|
||||
[]byte("Invalid platform, only ios and macos is supported"),
|
||||
[]byte("Invalid platform. Only ios, macos-app-store and macos-standalone are supported"),
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
|
@@ -622,6 +622,20 @@ func (h *Headscale) handleMachineLogOutCommon(
|
||||
Caller().
|
||||
Err(err).
|
||||
Msg("Failed to write response")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if machine.isEphemeral() {
|
||||
err = h.HardDeleteMachine(&machine)
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("machine", machine.Hostname).
|
||||
Msg("Cannot delete ephemeral machine from the database")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Info().
|
||||
|
@@ -664,7 +664,11 @@ func (h *Headscale) scheduledPollWorker(
|
||||
Str("machine", machine.Hostname).
|
||||
Bool("noise", isNoise).
|
||||
Msg("Sending keepalive")
|
||||
keepAliveChan <- data
|
||||
select {
|
||||
case keepAliveChan <- data:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
case <-updateCheckerTicker.C:
|
||||
log.Debug().
|
||||
@@ -674,7 +678,11 @@ func (h *Headscale) scheduledPollWorker(
|
||||
Msg("Sending update request")
|
||||
updateRequestsFromNode.WithLabelValues(machine.Namespace.Name, machine.Hostname, "scheduled-update").
|
||||
Inc()
|
||||
updateChan <- struct{}{}
|
||||
select {
|
||||
case updateChan <- struct{}{}:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
100
routes_test.go
100
routes_test.go
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
func (s *Suite) TestGetRoutes(c *check.C) {
|
||||
@@ -352,3 +353,102 @@ func (s *Suite) TestSubnetFailover(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(routes), check.Equals, 2)
|
||||
}
|
||||
|
||||
// TestAllowedIPRoutes tests that the AllowedIPs are correctly set for a node,
|
||||
// including both the primary routes the node is responsible for, and the
|
||||
// exit node routes if enabled.
|
||||
func (s *Suite) TestAllowedIPRoutes(c *check.C) {
|
||||
namespace, err := app.CreateNamespace("test")
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
pak, err := app.CreatePreAuthKey(namespace.Name, false, false, nil, nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
_, err = app.GetMachine("test", "test_enable_route_machine")
|
||||
c.Assert(err, check.NotNil)
|
||||
|
||||
prefix, err := netip.ParsePrefix(
|
||||
"10.0.0.0/24",
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
prefix2, err := netip.ParsePrefix(
|
||||
"150.0.10.0/25",
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
prefixExitNodeV4, err := netip.ParsePrefix(
|
||||
"0.0.0.0/0",
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
prefixExitNodeV6, err := netip.ParsePrefix(
|
||||
"::/0",
|
||||
)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
hostInfo1 := tailcfg.Hostinfo{
|
||||
RoutableIPs: []netip.Prefix{prefix, prefix2, prefixExitNodeV4, prefixExitNodeV6},
|
||||
}
|
||||
|
||||
nodeKey := key.NewNode()
|
||||
discoKey := key.NewDisco()
|
||||
machineKey := key.NewMachine()
|
||||
|
||||
now := time.Now()
|
||||
machine1 := Machine{
|
||||
ID: 1,
|
||||
MachineKey: MachinePublicKeyStripPrefix(machineKey.Public()),
|
||||
NodeKey: NodePublicKeyStripPrefix(nodeKey.Public()),
|
||||
DiscoKey: DiscoPublicKeyStripPrefix(discoKey.Public()),
|
||||
Hostname: "test_enable_route_machine",
|
||||
NamespaceID: namespace.ID,
|
||||
RegisterMethod: RegisterMethodAuthKey,
|
||||
AuthKeyID: uint(pak.ID),
|
||||
HostInfo: HostInfo(hostInfo1),
|
||||
LastSeen: &now,
|
||||
}
|
||||
app.db.Save(&machine1)
|
||||
|
||||
err = app.processMachineRoutes(&machine1)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = app.EnableRoutes(&machine1, prefix.String())
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// We do not enable this one on purpose to test that it is not enabled
|
||||
// err = app.EnableRoutes(&machine1, prefix2.String())
|
||||
// c.Assert(err, check.IsNil)
|
||||
|
||||
err = app.EnableRoutes(&machine1, prefixExitNodeV4.String())
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = app.EnableRoutes(&machine1, prefixExitNodeV6.String())
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = app.handlePrimarySubnetFailover()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
enabledRoutes1, err := app.GetEnabledRoutes(&machine1)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(len(enabledRoutes1), check.Equals, 3)
|
||||
|
||||
peer, err := app.toNode(machine1, "headscale.net", nil)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
c.Assert(len(peer.AllowedIPs), check.Equals, 3)
|
||||
|
||||
foundExitNodeV4 := false
|
||||
foundExitNodeV6 := false
|
||||
for _, allowedIP := range peer.AllowedIPs {
|
||||
if allowedIP == prefixExitNodeV4 {
|
||||
foundExitNodeV4 = true
|
||||
}
|
||||
if allowedIP == prefixExitNodeV6 {
|
||||
foundExitNodeV6 = true
|
||||
}
|
||||
}
|
||||
|
||||
c.Assert(foundExitNodeV4, check.Equals, true)
|
||||
c.Assert(foundExitNodeV6, check.Equals, true)
|
||||
}
|
||||
|
@@ -24,7 +24,7 @@
|
||||
<li>
|
||||
ALT + Click the Tailscale icon in the menu and hover over the Debug menu
|
||||
</li>
|
||||
<li>Under "Custm Login Server", select "Add Account..."</li>
|
||||
<li>Under "Custom Login Server", select "Add Account..."</li>
|
||||
<li>
|
||||
Enter "{{.URL}}" of the headscale instance and press "Add Account"
|
||||
</li>
|
||||
@@ -68,7 +68,16 @@
|
||||
<!--
|
||||
<pre><code>curl {{.URL}}/apple/ios</code></pre>
|
||||
-->
|
||||
<pre><code>curl {{.URL}}/apple/macos</code></pre>
|
||||
<ul>
|
||||
<li>
|
||||
for app store client:
|
||||
<code>curl {{.URL}}/apple/macos-app-store</code>
|
||||
</li>
|
||||
<li>
|
||||
for standalone client:
|
||||
<code>curl {{.URL}}/apple/macos-standalone</code>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<h2>Profiles</h2>
|
||||
|
||||
|
10
utils.go
10
utils.go
@@ -269,6 +269,16 @@ func stringToIPPrefix(prefixes []string) ([]netip.Prefix, error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func containsStr(ts []string, t string) bool {
|
||||
for _, v := range ts {
|
||||
if v == t {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func contains[T string | netip.Prefix](ts []T, t T) bool {
|
||||
for _, v := range ts {
|
||||
if reflect.DeepEqual(v, t) {
|
||||
|
Reference in New Issue
Block a user