2022-10-18 09:59:43 +00:00
package integration
import (
2024-05-24 08:15:34 +00:00
"context"
2023-02-02 15:05:52 +00:00
"encoding/json"
2022-10-23 12:13:22 +00:00
"fmt"
2022-12-22 15:41:49 +00:00
"net/netip"
2022-10-24 12:59:14 +00:00
"strings"
2022-10-18 09:59:43 +00:00
"testing"
2022-10-23 12:13:22 +00:00
"time"
2023-02-02 15:05:52 +00:00
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
2024-04-17 05:03:06 +00:00
"github.com/juanfont/headscale/hscontrol/types"
2022-11-14 14:01:31 +00:00
"github.com/juanfont/headscale/integration/hsic"
2022-11-08 15:10:03 +00:00
"github.com/juanfont/headscale/integration/tsic"
2022-10-23 12:13:22 +00:00
"github.com/rs/zerolog/log"
2023-02-02 09:14:33 +00:00
"github.com/samber/lo"
2023-02-02 15:05:52 +00:00
"github.com/stretchr/testify/assert"
2024-05-24 08:15:34 +00:00
"golang.org/x/sync/errgroup"
2023-12-09 17:09:24 +00:00
"tailscale.com/client/tailscale/apitype"
"tailscale.com/types/key"
2022-10-18 09:59:43 +00:00
)
2022-10-21 12:08:04 +00:00
func TestPingAllByIP ( t * testing . T ) {
2022-10-18 09:59:43 +00:00
IntegrationSkip ( t )
2022-11-22 11:05:58 +00:00
t . Parallel ( )
2022-10-18 09:59:43 +00:00
2024-04-21 16:28:17 +00:00
scenario , err := NewScenario ( dockertestMaxWait ( ) )
2023-08-29 06:33:33 +00:00
assertNoErr ( t , err )
defer scenario . Shutdown ( )
2022-10-18 09:59:43 +00:00
2024-02-08 16:28:19 +00:00
// TODO(kradalby): it does not look like the user thing works, only second
// get created? maybe only when many?
2022-10-18 09:59:43 +00:00
spec := map [ string ] int {
2023-08-31 12:57:43 +00:00
"user1" : len ( MustTestVersions ) ,
"user2" : len ( MustTestVersions ) ,
2022-10-18 09:59:43 +00:00
}
2024-02-09 06:26:41 +00:00
err = scenario . CreateHeadscaleEnv ( spec ,
[ ] tsic . Option { } ,
hsic . WithTestName ( "pingallbyip" ) ,
2024-04-16 19:37:25 +00:00
hsic . WithEmbeddedDERPServerOnly ( ) ,
2024-02-09 06:26:41 +00:00
hsic . WithTLS ( ) ,
hsic . WithHostnameAsServerURL ( ) ,
2024-04-17 05:03:06 +00:00
hsic . WithIPAllocationStrategy ( types . IPAllocationStrategyRandom ) ,
2024-02-09 06:26:41 +00:00
)
2023-08-29 06:33:33 +00:00
assertNoErrHeadscaleEnv ( t , err )
2022-10-18 09:59:43 +00:00
2022-10-23 10:41:35 +00:00
allClients , err := scenario . ListTailscaleClients ( )
2023-08-29 06:33:33 +00:00
assertNoErrListClients ( t , err )
2022-10-18 09:59:43 +00:00
2022-10-23 10:41:35 +00:00
allIps , err := scenario . ListTailscaleClientsIPs ( )
2023-08-29 06:33:33 +00:00
assertNoErrListClientIPs ( t , err )
2022-10-18 09:59:43 +00:00
2022-10-18 10:19:43 +00:00
err = scenario . WaitForTailscaleSync ( )
2023-08-29 06:33:33 +00:00
assertNoErrSync ( t , err )
2022-10-18 09:59:43 +00:00
2024-02-23 09:59:24 +00:00
// assertClientsState(t, allClients)
2024-02-09 06:26:41 +00:00
allAddrs := lo . Map ( allIps , func ( x netip . Addr , index int ) string {
return x . String ( )
} )
success := pingAllHelper ( t , allClients , allAddrs )
t . Logf ( "%d successful pings out of %d" , success , len ( allClients ) * len ( allIps ) )
}
func TestPingAllByIPPublicDERP ( t * testing . T ) {
IntegrationSkip ( t )
t . Parallel ( )
2024-04-21 16:28:17 +00:00
scenario , err := NewScenario ( dockertestMaxWait ( ) )
2024-02-09 06:26:41 +00:00
assertNoErr ( t , err )
defer scenario . Shutdown ( )
spec := map [ string ] int {
"user1" : len ( MustTestVersions ) ,
"user2" : len ( MustTestVersions ) ,
}
err = scenario . CreateHeadscaleEnv ( spec ,
[ ] tsic . Option { } ,
hsic . WithTestName ( "pingallbyippubderp" ) ,
)
assertNoErrHeadscaleEnv ( t , err )
allClients , err := scenario . ListTailscaleClients ( )
assertNoErrListClients ( t , err )
allIps , err := scenario . ListTailscaleClientsIPs ( )
assertNoErrListClientIPs ( t , err )
err = scenario . WaitForTailscaleSync ( )
assertNoErrSync ( t , err )
2024-02-23 09:59:24 +00:00
// assertClientsState(t, allClients)
2024-02-09 06:26:41 +00:00
2023-02-02 09:14:33 +00:00
allAddrs := lo . Map ( allIps , func ( x netip . Addr , index int ) string {
return x . String ( )
} )
2022-10-18 09:59:43 +00:00
2023-02-02 09:14:33 +00:00
success := pingAllHelper ( t , allClients , allAddrs )
2022-10-18 09:59:43 +00:00
t . Logf ( "%d successful pings out of %d" , success , len ( allClients ) * len ( allIps ) )
}
2022-10-21 12:08:14 +00:00
2022-12-22 15:41:49 +00:00
func TestAuthKeyLogoutAndRelogin ( t * testing . T ) {
IntegrationSkip ( t )
t . Parallel ( )
2024-04-21 16:28:17 +00:00
scenario , err := NewScenario ( dockertestMaxWait ( ) )
2023-08-29 06:33:33 +00:00
assertNoErr ( t , err )
defer scenario . Shutdown ( )
2022-12-22 15:41:49 +00:00
spec := map [ string ] int {
2023-08-31 12:57:43 +00:00
"user1" : len ( MustTestVersions ) ,
"user2" : len ( MustTestVersions ) ,
2022-12-22 15:41:49 +00:00
}
err = scenario . CreateHeadscaleEnv ( spec , [ ] tsic . Option { } , hsic . WithTestName ( "pingallbyip" ) )
2023-08-29 06:33:33 +00:00
assertNoErrHeadscaleEnv ( t , err )
2022-12-22 15:41:49 +00:00
allClients , err := scenario . ListTailscaleClients ( )
2023-08-29 06:33:33 +00:00
assertNoErrListClients ( t , err )
2022-12-22 15:41:49 +00:00
err = scenario . WaitForTailscaleSync ( )
2023-08-29 06:33:33 +00:00
assertNoErrSync ( t , err )
2022-12-22 15:41:49 +00:00
2024-02-23 09:59:24 +00:00
// assertClientsState(t, allClients)
2024-02-09 06:26:41 +00:00
2022-12-22 15:41:49 +00:00
clientIPs := make ( map [ TailscaleClient ] [ ] netip . Addr )
for _ , client := range allClients {
ips , err := client . IPs ( )
if err != nil {
2023-08-29 06:33:33 +00:00
t . Fatalf ( "failed to get IPs for client %s: %s" , client . Hostname ( ) , err )
2022-12-22 15:41:49 +00:00
}
clientIPs [ client ] = ips
}
for _ , client := range allClients {
err := client . Logout ( )
if err != nil {
2023-08-29 06:33:33 +00:00
t . Fatalf ( "failed to logout client %s: %s" , client . Hostname ( ) , err )
2022-12-22 15:41:49 +00:00
}
}
2023-08-29 06:33:33 +00:00
err = scenario . WaitForTailscaleLogout ( )
assertNoErrLogout ( t , err )
2022-12-22 15:41:49 +00:00
t . Logf ( "all clients logged out" )
headscale , err := scenario . Headscale ( )
2023-08-29 06:33:33 +00:00
assertNoErrGetHeadscale ( t , err )
2022-12-22 15:41:49 +00:00
2023-01-17 16:43:44 +00:00
for userName := range spec {
key , err := scenario . CreatePreAuthKey ( userName , true , false )
2022-12-22 15:41:49 +00:00
if err != nil {
2023-08-29 06:33:33 +00:00
t . Fatalf ( "failed to create pre-auth key for user %s: %s" , userName , err )
2022-12-22 15:41:49 +00:00
}
2023-01-17 16:43:44 +00:00
err = scenario . RunTailscaleUp ( userName , headscale . GetEndpoint ( ) , key . GetKey ( ) )
2022-12-22 15:41:49 +00:00
if err != nil {
2023-08-29 06:33:33 +00:00
t . Fatalf ( "failed to run tailscale up for user %s: %s" , userName , err )
2022-12-22 15:41:49 +00:00
}
}
err = scenario . WaitForTailscaleSync ( )
2023-08-29 06:33:33 +00:00
assertNoErrSync ( t , err )
2022-12-22 15:41:49 +00:00
2024-02-23 09:59:24 +00:00
// assertClientsState(t, allClients)
2024-02-09 06:26:41 +00:00
2022-12-22 15:41:49 +00:00
allClients , err = scenario . ListTailscaleClients ( )
2023-08-29 06:33:33 +00:00
assertNoErrListClients ( t , err )
2022-12-22 15:41:49 +00:00
allIps , err := scenario . ListTailscaleClientsIPs ( )
2023-08-29 06:33:33 +00:00
assertNoErrListClientIPs ( t , err )
2022-12-22 15:41:49 +00:00
2023-02-02 09:14:33 +00:00
allAddrs := lo . Map ( allIps , func ( x netip . Addr , index int ) string {
return x . String ( )
} )
2022-12-22 15:41:49 +00:00
2023-02-02 09:14:33 +00:00
success := pingAllHelper ( t , allClients , allAddrs )
2022-12-22 15:41:49 +00:00
t . Logf ( "%d successful pings out of %d" , success , len ( allClients ) * len ( allIps ) )
for _ , client := range allClients {
ips , err := client . IPs ( )
if err != nil {
2023-08-29 06:33:33 +00:00
t . Fatalf ( "failed to get IPs for client %s: %s" , client . Hostname ( ) , err )
2022-12-22 15:41:49 +00:00
}
// lets check if the IPs are the same
if len ( ips ) != len ( clientIPs [ client ] ) {
2023-08-29 06:33:33 +00:00
t . Fatalf ( "IPs changed for client %s" , client . Hostname ( ) )
2022-12-22 15:41:49 +00:00
}
for _ , ip := range ips {
found := false
for _ , oldIP := range clientIPs [ client ] {
if ip == oldIP {
found = true
break
}
}
if ! found {
2023-08-29 06:33:33 +00:00
t . Fatalf (
2023-02-02 09:14:33 +00:00
"IPs changed for client %s. Used to be %v now %v" ,
client . Hostname ( ) ,
clientIPs [ client ] ,
ips ,
)
2022-12-22 15:41:49 +00:00
}
}
}
}
2022-12-27 19:05:21 +00:00
func TestEphemeral ( t * testing . T ) {
IntegrationSkip ( t )
t . Parallel ( )
2024-04-21 16:28:17 +00:00
scenario , err := NewScenario ( dockertestMaxWait ( ) )
2023-08-29 06:33:33 +00:00
assertNoErr ( t , err )
defer scenario . Shutdown ( )
2022-12-27 19:05:21 +00:00
spec := map [ string ] int {
2023-08-31 12:57:43 +00:00
"user1" : len ( MustTestVersions ) ,
"user2" : len ( MustTestVersions ) ,
2022-12-27 19:05:21 +00:00
}
headscale , err := scenario . Headscale ( hsic . WithTestName ( "ephemeral" ) )
2023-08-29 06:33:33 +00:00
assertNoErrHeadscaleEnv ( t , err )
2022-12-27 19:05:21 +00:00
2023-01-17 16:43:44 +00:00
for userName , clientCount := range spec {
err = scenario . CreateUser ( userName )
2022-12-27 19:05:21 +00:00
if err != nil {
2023-08-29 06:33:33 +00:00
t . Fatalf ( "failed to create user %s: %s" , userName , err )
2022-12-27 19:05:21 +00:00
}
2023-01-17 16:43:44 +00:00
err = scenario . CreateTailscaleNodesInUser ( userName , "all" , clientCount , [ ] tsic . Option { } ... )
2022-12-27 19:05:21 +00:00
if err != nil {
2023-08-29 06:33:33 +00:00
t . Fatalf ( "failed to create tailscale nodes in user %s: %s" , userName , err )
2022-12-27 19:05:21 +00:00
}
2023-01-17 16:43:44 +00:00
key , err := scenario . CreatePreAuthKey ( userName , true , true )
2022-12-27 19:05:21 +00:00
if err != nil {
2023-08-29 06:33:33 +00:00
t . Fatalf ( "failed to create pre-auth key for user %s: %s" , userName , err )
2022-12-27 19:05:21 +00:00
}
2023-01-17 16:43:44 +00:00
err = scenario . RunTailscaleUp ( userName , headscale . GetEndpoint ( ) , key . GetKey ( ) )
2022-12-27 19:05:21 +00:00
if err != nil {
2023-08-29 06:33:33 +00:00
t . Fatalf ( "failed to run tailscale up for user %s: %s" , userName , err )
2022-12-27 19:05:21 +00:00
}
}
err = scenario . WaitForTailscaleSync ( )
2023-08-29 06:33:33 +00:00
assertNoErrSync ( t , err )
2022-12-27 19:05:21 +00:00
allClients , err := scenario . ListTailscaleClients ( )
2023-08-29 06:33:33 +00:00
assertNoErrListClients ( t , err )
2022-12-27 19:05:21 +00:00
allIps , err := scenario . ListTailscaleClientsIPs ( )
2023-08-29 06:33:33 +00:00
assertNoErrListClientIPs ( t , err )
2022-12-27 19:05:21 +00:00
2023-02-02 09:14:33 +00:00
allAddrs := lo . Map ( allIps , func ( x netip . Addr , index int ) string {
return x . String ( )
} )
2022-12-27 19:05:21 +00:00
2023-02-02 09:14:33 +00:00
success := pingAllHelper ( t , allClients , allAddrs )
2022-12-27 19:05:21 +00:00
t . Logf ( "%d successful pings out of %d" , success , len ( allClients ) * len ( allIps ) )
for _ , client := range allClients {
err := client . Logout ( )
if err != nil {
2023-08-29 06:33:33 +00:00
t . Fatalf ( "failed to logout client %s: %s" , client . Hostname ( ) , err )
2022-12-27 19:05:21 +00:00
}
}
2023-08-29 06:33:33 +00:00
err = scenario . WaitForTailscaleLogout ( )
assertNoErrLogout ( t , err )
2022-12-27 19:05:21 +00:00
t . Logf ( "all clients logged out" )
2023-01-17 16:43:44 +00:00
for userName := range spec {
2023-09-24 11:42:05 +00:00
nodes , err := headscale . ListNodesInUser ( userName )
2022-12-27 19:05:21 +00:00
if err != nil {
log . Error ( ) .
Err ( err ) .
2023-01-17 16:43:44 +00:00
Str ( "user" , userName ) .
2023-09-24 11:42:05 +00:00
Msg ( "Error listing nodes in user" )
2022-12-27 19:05:21 +00:00
return
}
2023-09-24 11:42:05 +00:00
if len ( nodes ) != 0 {
t . Fatalf ( "expected no nodes, got %d in user %s" , len ( nodes ) , userName )
2022-12-27 19:05:21 +00:00
}
}
}
2024-07-18 08:01:59 +00:00
// TestEphemeral2006DeletedTooQuickly verifies that ephemeral nodes are not
// deleted by accident if they are still online and active.
func TestEphemeral2006DeletedTooQuickly ( t * testing . T ) {
IntegrationSkip ( t )
t . Parallel ( )
scenario , err := NewScenario ( dockertestMaxWait ( ) )
assertNoErr ( t , err )
defer scenario . Shutdown ( )
spec := map [ string ] int {
"user1" : len ( MustTestVersions ) ,
"user2" : len ( MustTestVersions ) ,
}
headscale , err := scenario . Headscale (
hsic . WithTestName ( "ephemeral2006" ) ,
hsic . WithConfigEnv ( map [ string ] string {
"HEADSCALE_EPHEMERAL_NODE_INACTIVITY_TIMEOUT" : "1m6s" ,
} ) ,
)
assertNoErrHeadscaleEnv ( t , err )
for userName , clientCount := range spec {
err = scenario . CreateUser ( userName )
if err != nil {
t . Fatalf ( "failed to create user %s: %s" , userName , err )
}
err = scenario . CreateTailscaleNodesInUser ( userName , "all" , clientCount , [ ] tsic . Option { } ... )
if err != nil {
t . Fatalf ( "failed to create tailscale nodes in user %s: %s" , userName , err )
}
key , err := scenario . CreatePreAuthKey ( userName , true , true )
if err != nil {
t . Fatalf ( "failed to create pre-auth key for user %s: %s" , userName , err )
}
err = scenario . RunTailscaleUp ( userName , headscale . GetEndpoint ( ) , key . GetKey ( ) )
if err != nil {
t . Fatalf ( "failed to run tailscale up for user %s: %s" , userName , err )
}
}
err = scenario . WaitForTailscaleSync ( )
assertNoErrSync ( t , err )
allClients , err := scenario . ListTailscaleClients ( )
assertNoErrListClients ( t , err )
allIps , err := scenario . ListTailscaleClientsIPs ( )
assertNoErrListClientIPs ( t , err )
allAddrs := lo . Map ( allIps , func ( x netip . Addr , index int ) string {
return x . String ( )
} )
// All ephemeral nodes should be online and reachable.
success := pingAllHelper ( t , allClients , allAddrs )
t . Logf ( "%d successful pings out of %d" , success , len ( allClients ) * len ( allIps ) )
// Take down all clients, this should start an expiry timer for each.
for _ , client := range allClients {
err := client . Down ( )
if err != nil {
t . Fatalf ( "failed to take down client %s: %s" , client . Hostname ( ) , err )
}
}
// Wait a bit and bring up the clients again before the expiry
// time of the ephemeral nodes.
// Nodes should be able to reconnect and work fine.
time . Sleep ( 30 * time . Second )
for _ , client := range allClients {
err := client . Up ( )
if err != nil {
t . Fatalf ( "failed to take down client %s: %s" , client . Hostname ( ) , err )
}
}
err = scenario . WaitForTailscaleSync ( )
assertNoErrSync ( t , err )
success = pingAllHelper ( t , allClients , allAddrs )
t . Logf ( "%d successful pings out of %d" , success , len ( allClients ) * len ( allIps ) )
// Take down all clients, this should start an expiry timer for each.
for _ , client := range allClients {
err := client . Down ( )
if err != nil {
t . Fatalf ( "failed to take down client %s: %s" , client . Hostname ( ) , err )
}
}
// This time wait for all of the nodes to expire and check that they are no longer
// registered.
time . Sleep ( 3 * time . Minute )
for userName := range spec {
nodes , err := headscale . ListNodesInUser ( userName )
if err != nil {
log . Error ( ) .
Err ( err ) .
Str ( "user" , userName ) .
Msg ( "Error listing nodes in user" )
return
}
if len ( nodes ) != 0 {
t . Fatalf ( "expected no nodes, got %d in user %s" , len ( nodes ) , userName )
}
}
}
2022-10-21 12:08:14 +00:00
func TestPingAllByHostname ( t * testing . T ) {
IntegrationSkip ( t )
2022-11-22 11:05:58 +00:00
t . Parallel ( )
2022-10-21 12:08:14 +00:00
2024-04-21 16:28:17 +00:00
scenario , err := NewScenario ( dockertestMaxWait ( ) )
2023-08-29 06:33:33 +00:00
assertNoErr ( t , err )
defer scenario . Shutdown ( )
2022-10-21 12:08:14 +00:00
spec := map [ string ] int {
2023-12-09 17:09:24 +00:00
"user3" : len ( MustTestVersions ) ,
"user4" : len ( MustTestVersions ) ,
2022-10-21 12:08:14 +00:00
}
2022-11-08 15:10:03 +00:00
err = scenario . CreateHeadscaleEnv ( spec , [ ] tsic . Option { } , hsic . WithTestName ( "pingallbyname" ) )
2023-08-29 06:33:33 +00:00
assertNoErrHeadscaleEnv ( t , err )
2022-10-21 12:08:14 +00:00
2022-10-23 10:41:35 +00:00
allClients , err := scenario . ListTailscaleClients ( )
2023-08-29 06:33:33 +00:00
assertNoErrListClients ( t , err )
2022-10-21 12:08:14 +00:00
err = scenario . WaitForTailscaleSync ( )
2023-08-29 06:33:33 +00:00
assertNoErrSync ( t , err )
2022-10-21 12:08:14 +00:00
2024-02-23 09:59:24 +00:00
// assertClientsState(t, allClients)
2024-02-09 06:26:41 +00:00
2022-10-23 10:41:35 +00:00
allHostnames , err := scenario . ListTailscaleClientsFQDNs ( )
2023-08-29 06:33:33 +00:00
assertNoErrListFQDN ( t , err )
2022-10-21 15:44:40 +00:00
2023-02-02 09:14:33 +00:00
success := pingAllHelper ( t , allClients , allHostnames )
2022-10-21 12:08:14 +00:00
t . Logf ( "%d successful pings out of %d" , success , len ( allClients ) * len ( allClients ) )
}
2022-10-23 12:13:22 +00:00
2022-12-01 13:01:06 +00:00
// If subtests are parallel, then they will start before setup is run.
// This might mean we approach setup slightly wrong, but for now, ignore
// the linter
// nolint:tparallel
2022-10-23 12:13:22 +00:00
func TestTaildrop ( t * testing . T ) {
IntegrationSkip ( t )
2022-11-22 11:05:58 +00:00
t . Parallel ( )
2022-10-23 12:13:22 +00:00
2024-05-19 21:49:27 +00:00
retry := func ( times int , sleepInterval time . Duration , doWork func ( ) error ) error {
2022-10-23 12:13:22 +00:00
var err error
for attempts := 0 ; attempts < times ; attempts ++ {
err = doWork ( )
if err == nil {
return nil
}
2024-05-19 21:49:27 +00:00
time . Sleep ( sleepInterval )
2022-10-23 12:13:22 +00:00
}
return err
}
2024-04-21 16:28:17 +00:00
scenario , err := NewScenario ( dockertestMaxWait ( ) )
2023-08-29 06:33:33 +00:00
assertNoErr ( t , err )
defer scenario . Shutdown ( )
2022-10-23 12:13:22 +00:00
spec := map [ string ] int {
2023-12-09 17:09:24 +00:00
"taildrop" : len ( MustTestVersions ) ,
2022-10-23 12:13:22 +00:00
}
2022-11-08 15:10:03 +00:00
err = scenario . CreateHeadscaleEnv ( spec , [ ] tsic . Option { } , hsic . WithTestName ( "taildrop" ) )
2023-08-29 06:33:33 +00:00
assertNoErrHeadscaleEnv ( t , err )
2022-10-23 12:13:22 +00:00
allClients , err := scenario . ListTailscaleClients ( )
2023-08-29 06:33:33 +00:00
assertNoErrListClients ( t , err )
2022-10-23 12:13:22 +00:00
err = scenario . WaitForTailscaleSync ( )
2023-08-29 06:33:33 +00:00
assertNoErrSync ( t , err )
2022-10-23 12:13:22 +00:00
// This will essentially fetch and cache all the FQDNs
_ , err = scenario . ListTailscaleClientsFQDNs ( )
2023-08-29 06:33:33 +00:00
assertNoErrListFQDN ( t , err )
2022-10-23 12:13:22 +00:00
2023-12-09 17:09:24 +00:00
for _ , client := range allClients {
if ! strings . Contains ( client . Hostname ( ) , "head" ) {
command := [ ] string { "apk" , "add" , "curl" }
_ , _ , err := client . Execute ( command )
if err != nil {
t . Fatalf ( "failed to install curl on %s, err: %s" , client . Hostname ( ) , err )
}
}
2024-02-08 16:28:19 +00:00
curlCommand := [ ] string {
"curl" ,
"--unix-socket" ,
"/var/run/tailscale/tailscaled.sock" ,
"http://local-tailscaled.sock/localapi/v0/file-targets" ,
}
2023-12-09 17:09:24 +00:00
err = retry ( 10 , 1 * time . Second , func ( ) error {
result , _ , err := client . Execute ( curlCommand )
if err != nil {
return err
}
var fts [ ] apitype . FileTarget
err = json . Unmarshal ( [ ] byte ( result ) , & fts )
if err != nil {
return err
}
if len ( fts ) != len ( allClients ) - 1 {
ftStr := fmt . Sprintf ( "FileTargets for %s:\n" , client . Hostname ( ) )
for _ , ft := range fts {
ftStr += fmt . Sprintf ( "\t%s\n" , ft . Node . Name )
}
2024-02-08 16:28:19 +00:00
return fmt . Errorf (
"client %s does not have all its peers as FileTargets, got %d, want: %d\n%s" ,
client . Hostname ( ) ,
len ( fts ) ,
len ( allClients ) - 1 ,
ftStr ,
)
2023-12-09 17:09:24 +00:00
}
return err
} )
if err != nil {
2024-02-08 16:28:19 +00:00
t . Errorf (
"failed to query localapi for filetarget on %s, err: %s" ,
client . Hostname ( ) ,
err ,
)
2023-12-09 17:09:24 +00:00
}
}
2022-10-23 12:13:22 +00:00
for _ , client := range allClients {
command := [ ] string { "touch" , fmt . Sprintf ( "/tmp/file_from_%s" , client . Hostname ( ) ) }
2022-11-03 16:00:23 +00:00
if _ , _ , err := client . Execute ( command ) ; err != nil {
2023-08-29 06:33:33 +00:00
t . Fatalf ( "failed to create taildrop file on %s, err: %s" , client . Hostname ( ) , err )
2022-10-23 12:13:22 +00:00
}
for _ , peer := range allClients {
if client . Hostname ( ) == peer . Hostname ( ) {
continue
}
// It is safe to ignore this error as we handled it when caching it
peerFQDN , _ := peer . FQDN ( )
t . Run ( fmt . Sprintf ( "%s-%s" , client . Hostname ( ) , peer . Hostname ( ) ) , func ( t * testing . T ) {
command := [ ] string {
"tailscale" , "file" , "cp" ,
fmt . Sprintf ( "/tmp/file_from_%s" , client . Hostname ( ) ) ,
fmt . Sprintf ( "%s:" , peerFQDN ) ,
}
err := retry ( 10 , 1 * time . Second , func ( ) error {
t . Logf (
"Sending file from %s to %s\n" ,
client . Hostname ( ) ,
peer . Hostname ( ) ,
)
2022-11-03 16:00:23 +00:00
_ , _ , err := client . Execute ( command )
2022-10-23 12:13:22 +00:00
return err
} )
if err != nil {
2023-08-29 06:33:33 +00:00
t . Fatalf (
2023-12-09 17:09:24 +00:00
"failed to send taildrop file on %s with command %q, err: %s" ,
2022-10-23 12:13:22 +00:00
client . Hostname ( ) ,
2023-12-09 17:09:24 +00:00
strings . Join ( command , " " ) ,
2022-10-23 12:13:22 +00:00
err ,
)
}
} )
}
}
for _ , client := range allClients {
command := [ ] string {
"tailscale" , "file" ,
"get" ,
"/tmp/" ,
}
2022-11-03 16:00:23 +00:00
if _ , _ , err := client . Execute ( command ) ; err != nil {
2023-08-29 06:33:33 +00:00
t . Fatalf ( "failed to get taildrop file on %s, err: %s" , client . Hostname ( ) , err )
2022-10-23 12:13:22 +00:00
}
for _ , peer := range allClients {
if client . Hostname ( ) == peer . Hostname ( ) {
continue
}
t . Run ( fmt . Sprintf ( "%s-%s" , client . Hostname ( ) , peer . Hostname ( ) ) , func ( t * testing . T ) {
command := [ ] string {
"ls" ,
fmt . Sprintf ( "/tmp/file_from_%s" , peer . Hostname ( ) ) ,
}
log . Printf (
"Checking file in %s from %s\n" ,
client . Hostname ( ) ,
peer . Hostname ( ) ,
)
2022-11-03 16:00:23 +00:00
result , _ , err := client . Execute ( command )
2023-08-29 06:33:33 +00:00
assertNoErrf ( t , "failed to execute command to ls taildrop: %s" , err )
2022-10-23 12:13:22 +00:00
log . Printf ( "Result for %s: %s\n" , peer . Hostname ( ) , result )
if fmt . Sprintf ( "/tmp/file_from_%s\n" , peer . Hostname ( ) ) != result {
2023-08-29 06:33:33 +00:00
t . Fatalf (
2022-10-23 12:13:22 +00:00
"taildrop result is not correct %s, wanted %s" ,
result ,
fmt . Sprintf ( "/tmp/file_from_%s\n" , peer . Hostname ( ) ) ,
)
}
} )
}
}
}
2022-10-24 12:59:14 +00:00
2023-02-02 15:05:52 +00:00
func TestExpireNode ( t * testing . T ) {
IntegrationSkip ( t )
t . Parallel ( )
2023-02-02 09:14:33 +00:00
2024-04-21 16:28:17 +00:00
scenario , err := NewScenario ( dockertestMaxWait ( ) )
2023-08-29 06:33:33 +00:00
assertNoErr ( t , err )
defer scenario . Shutdown ( )
2023-02-02 15:05:52 +00:00
spec := map [ string ] int {
2023-08-31 12:57:43 +00:00
"user1" : len ( MustTestVersions ) ,
2023-02-02 15:05:52 +00:00
}
err = scenario . CreateHeadscaleEnv ( spec , [ ] tsic . Option { } , hsic . WithTestName ( "expirenode" ) )
2023-08-29 06:33:33 +00:00
assertNoErrHeadscaleEnv ( t , err )
2023-02-02 15:05:52 +00:00
allClients , err := scenario . ListTailscaleClients ( )
2023-08-29 06:33:33 +00:00
assertNoErrListClients ( t , err )
2023-02-02 15:05:52 +00:00
allIps , err := scenario . ListTailscaleClientsIPs ( )
2023-08-29 06:33:33 +00:00
assertNoErrListClientIPs ( t , err )
2023-02-02 15:05:52 +00:00
err = scenario . WaitForTailscaleSync ( )
2023-08-29 06:33:33 +00:00
assertNoErrSync ( t , err )
2023-02-02 15:05:52 +00:00
2024-02-23 09:59:24 +00:00
// assertClientsState(t, allClients)
2024-02-09 06:26:41 +00:00
2023-02-02 15:05:52 +00:00
allAddrs := lo . Map ( allIps , func ( x netip . Addr , index int ) string {
return x . String ( )
} )
success := pingAllHelper ( t , allClients , allAddrs )
t . Logf ( "before expire: %d successful pings out of %d" , success , len ( allClients ) * len ( allIps ) )
for _ , client := range allClients {
status , err := client . Status ( )
2023-08-29 06:33:33 +00:00
assertNoErr ( t , err )
2023-02-02 15:05:52 +00:00
// Assert that we have the original count - self
2024-01-05 09:41:56 +00:00
assert . Len ( t , status . Peers ( ) , spec [ "user1" ] - 1 )
2023-02-02 15:05:52 +00:00
}
headscale , err := scenario . Headscale ( )
2023-08-29 06:33:33 +00:00
assertNoErr ( t , err )
2023-02-02 15:05:52 +00:00
// TODO(kradalby): This is Headscale specific and would not play nicely
// with other implementations of the ControlServer interface
result , err := headscale . Execute ( [ ] string {
2024-02-08 16:28:19 +00:00
"headscale" , "nodes" , "expire" , "--identifier" , "1" , "--output" , "json" ,
2023-02-02 15:05:52 +00:00
} )
2023-08-29 06:33:33 +00:00
assertNoErr ( t , err )
2023-02-02 15:05:52 +00:00
2023-09-24 11:42:05 +00:00
var node v1 . Node
err = json . Unmarshal ( [ ] byte ( result ) , & node )
2023-08-29 06:33:33 +00:00
assertNoErr ( t , err )
2023-02-02 15:05:52 +00:00
2023-12-09 17:09:24 +00:00
var expiredNodeKey key . NodePublic
err = expiredNodeKey . UnmarshalText ( [ ] byte ( node . GetNodeKey ( ) ) )
assertNoErr ( t , err )
t . Logf ( "Node %s with node_key %s has been expired" , node . GetName ( ) , expiredNodeKey . String ( ) )
2024-01-05 09:41:56 +00:00
time . Sleep ( 2 * time . Minute )
2023-02-02 15:05:52 +00:00
2023-12-09 17:09:24 +00:00
now := time . Now ( )
// Verify that the expired node has been marked in all peers list.
for _ , client := range allClients {
status , err := client . Status ( )
assertNoErr ( t , err )
if client . Hostname ( ) != node . GetName ( ) {
t . Logf ( "available peers of %s: %v" , client . Hostname ( ) , status . Peers ( ) )
2024-01-05 09:41:56 +00:00
// Ensures that the node is present, and that it is expired.
2023-12-09 17:09:24 +00:00
if peerStatus , ok := status . Peer [ expiredNodeKey ] ; ok {
assertNotNil ( t , peerStatus . Expired )
2024-01-05 09:41:56 +00:00
assert . NotNil ( t , peerStatus . KeyExpiry )
2024-02-08 16:28:19 +00:00
t . Logf (
"node %q should have a key expire before %s, was %s" ,
peerStatus . HostName ,
now . String ( ) ,
peerStatus . KeyExpiry ,
)
2024-01-05 09:41:56 +00:00
if peerStatus . KeyExpiry != nil {
2024-02-08 16:28:19 +00:00
assert . Truef (
t ,
peerStatus . KeyExpiry . Before ( now ) ,
"node %q should have a key expire before %s, was %s" ,
peerStatus . HostName ,
now . String ( ) ,
peerStatus . KeyExpiry ,
)
2024-01-05 09:41:56 +00:00
}
2024-02-08 16:28:19 +00:00
assert . Truef (
t ,
peerStatus . Expired ,
"node %q should be expired, expired is %v" ,
peerStatus . HostName ,
peerStatus . Expired ,
)
2024-01-05 09:41:56 +00:00
_ , stderr , _ := client . Execute ( [ ] string { "tailscale" , "ping" , node . GetName ( ) } )
if ! strings . Contains ( stderr , "node key has expired" ) {
2024-02-08 16:28:19 +00:00
t . Errorf (
"expected to be unable to ping expired host %q from %q" ,
node . GetName ( ) ,
client . Hostname ( ) ,
)
2024-01-05 09:41:56 +00:00
}
} else {
t . Errorf ( "failed to find node %q with nodekey (%s) in mapresponse, should be present even if it is expired" , node . GetName ( ) , expiredNodeKey )
}
} else {
if status . Self . KeyExpiry != nil {
assert . Truef ( t , status . Self . KeyExpiry . Before ( now ) , "node %q should have a key expire before %s, was %s" , status . Self . HostName , now . String ( ) , status . Self . KeyExpiry )
2023-12-09 17:09:24 +00:00
}
2024-01-05 09:41:56 +00:00
// NeedsLogin means that the node has understood that it is no longer
// valid.
2024-02-08 16:28:19 +00:00
assert . Equalf ( t , "NeedsLogin" , status . BackendState , "checking node %q" , status . Self . HostName )
2023-12-09 17:09:24 +00:00
}
}
}
2024-02-23 09:59:24 +00:00
func TestNodeOnlineStatus ( t * testing . T ) {
2023-12-09 17:09:24 +00:00
IntegrationSkip ( t )
t . Parallel ( )
2024-04-21 16:28:17 +00:00
scenario , err := NewScenario ( dockertestMaxWait ( ) )
2023-12-09 17:09:24 +00:00
assertNoErr ( t , err )
defer scenario . Shutdown ( )
spec := map [ string ] int {
"user1" : len ( MustTestVersions ) ,
}
2024-02-23 09:59:24 +00:00
err = scenario . CreateHeadscaleEnv ( spec , [ ] tsic . Option { } , hsic . WithTestName ( "online" ) )
2023-12-09 17:09:24 +00:00
assertNoErrHeadscaleEnv ( t , err )
allClients , err := scenario . ListTailscaleClients ( )
assertNoErrListClients ( t , err )
allIps , err := scenario . ListTailscaleClientsIPs ( )
assertNoErrListClientIPs ( t , err )
err = scenario . WaitForTailscaleSync ( )
assertNoErrSync ( t , err )
2024-02-23 09:59:24 +00:00
// assertClientsState(t, allClients)
2024-02-09 06:26:41 +00:00
2023-12-09 17:09:24 +00:00
allAddrs := lo . Map ( allIps , func ( x netip . Addr , index int ) string {
return x . String ( )
} )
success := pingAllHelper ( t , allClients , allAddrs )
t . Logf ( "before expire: %d successful pings out of %d" , success , len ( allClients ) * len ( allIps ) )
2023-02-02 15:05:52 +00:00
for _ , client := range allClients {
status , err := client . Status ( )
2023-08-29 06:33:33 +00:00
assertNoErr ( t , err )
2023-02-02 15:05:52 +00:00
2023-12-09 17:09:24 +00:00
// Assert that we have the original count - self
assert . Len ( t , status . Peers ( ) , len ( MustTestVersions ) - 1 )
}
2023-02-02 15:05:52 +00:00
2023-12-09 17:09:24 +00:00
headscale , err := scenario . Headscale ( )
assertNoErr ( t , err )
// Duration is chosen arbitrarily, 10m is reported in #1561
testDuration := 12 * time . Minute
start := time . Now ( )
end := start . Add ( testDuration )
2023-02-02 15:05:52 +00:00
2023-12-09 17:09:24 +00:00
log . Printf ( "Starting online test from %v to %v" , start , end )
for {
// Let the test run continuously for X minutes to verify
// all nodes stay connected and has the expected status over time.
if end . Before ( time . Now ( ) ) {
return
2023-02-02 15:05:52 +00:00
}
2023-12-09 17:09:24 +00:00
result , err := headscale . Execute ( [ ] string {
"headscale" , "nodes" , "list" , "--output" , "json" ,
} )
assertNoErr ( t , err )
var nodes [ ] * v1 . Node
err = json . Unmarshal ( [ ] byte ( result ) , & nodes )
assertNoErr ( t , err )
// Verify that headscale reports the nodes as online
for _ , node := range nodes {
// All nodes should be online
assert . Truef (
t ,
node . GetOnline ( ) ,
"expected %s to have online status in Headscale, marked as offline %s after start" ,
node . GetName ( ) ,
time . Since ( start ) ,
)
2023-02-02 09:14:33 +00:00
}
2023-12-09 17:09:24 +00:00
// Verify that all nodes report all nodes to be online
for _ , client := range allClients {
status , err := client . Status ( )
assertNoErr ( t , err )
for _ , peerKey := range status . Peers ( ) {
peerStatus := status . Peer [ peerKey ]
// .Online is only available from CapVer 16, which
// is not present in 1.18 which is the lowest we
// test.
if strings . Contains ( client . Hostname ( ) , "1-18" ) {
continue
}
2024-05-19 21:49:27 +00:00
// All peers of this nodes are reporting to be
2023-12-09 17:09:24 +00:00
// connected to the control server
assert . Truef (
t ,
peerStatus . Online ,
"expected node %s to be marked as online in %s peer list, marked as offline %s after start" ,
peerStatus . HostName ,
client . Hostname ( ) ,
time . Since ( start ) ,
)
}
}
// Check maximum once per second
time . Sleep ( time . Second )
2023-02-02 09:14:33 +00:00
}
}
2024-02-23 09:59:24 +00:00
// TestPingAllByIPManyUpDown is a variant of the PingAll
// test which will take the tailscale node up and down
// five times ensuring they are able to restablish connectivity.
func TestPingAllByIPManyUpDown ( t * testing . T ) {
IntegrationSkip ( t )
t . Parallel ( )
2024-04-21 16:28:17 +00:00
scenario , err := NewScenario ( dockertestMaxWait ( ) )
2024-02-23 09:59:24 +00:00
assertNoErr ( t , err )
defer scenario . Shutdown ( )
// TODO(kradalby): it does not look like the user thing works, only second
// get created? maybe only when many?
spec := map [ string ] int {
"user1" : len ( MustTestVersions ) ,
"user2" : len ( MustTestVersions ) ,
}
err = scenario . CreateHeadscaleEnv ( spec ,
[ ] tsic . Option { } ,
2024-05-24 08:15:34 +00:00
hsic . WithTestName ( "pingallbyipmany" ) ,
hsic . WithEmbeddedDERPServerOnly ( ) ,
2024-02-23 09:59:24 +00:00
hsic . WithTLS ( ) ,
hsic . WithHostnameAsServerURL ( ) ,
)
assertNoErrHeadscaleEnv ( t , err )
allClients , err := scenario . ListTailscaleClients ( )
assertNoErrListClients ( t , err )
allIps , err := scenario . ListTailscaleClientsIPs ( )
assertNoErrListClientIPs ( t , err )
err = scenario . WaitForTailscaleSync ( )
assertNoErrSync ( t , err )
// assertClientsState(t, allClients)
allAddrs := lo . Map ( allIps , func ( x netip . Addr , index int ) string {
return x . String ( )
} )
success := pingAllHelper ( t , allClients , allAddrs )
t . Logf ( "%d successful pings out of %d" , success , len ( allClients ) * len ( allIps ) )
2024-05-24 08:15:34 +00:00
wg , _ := errgroup . WithContext ( context . Background ( ) )
2024-02-23 09:59:24 +00:00
for run := range 3 {
t . Logf ( "Starting DownUpPing run %d" , run + 1 )
for _ , client := range allClients {
2024-05-24 08:15:34 +00:00
c := client
wg . Go ( func ( ) error {
t . Logf ( "taking down %q" , c . Hostname ( ) )
return c . Down ( )
} )
}
if err := wg . Wait ( ) ; err != nil {
t . Fatalf ( "failed to take down all nodes: %s" , err )
2024-02-23 09:59:24 +00:00
}
time . Sleep ( 5 * time . Second )
for _ , client := range allClients {
2024-05-24 08:15:34 +00:00
c := client
wg . Go ( func ( ) error {
t . Logf ( "bringing up %q" , c . Hostname ( ) )
return c . Up ( )
} )
}
if err := wg . Wait ( ) ; err != nil {
t . Fatalf ( "failed to take down all nodes: %s" , err )
2024-02-23 09:59:24 +00:00
}
time . Sleep ( 5 * time . Second )
err = scenario . WaitForTailscaleSync ( )
assertNoErrSync ( t , err )
success := pingAllHelper ( t , allClients , allAddrs )
t . Logf ( "%d successful pings out of %d" , success , len ( allClients ) * len ( allIps ) )
}
}