mirror of
https://github.com/juanfont/headscale.git
synced 2025-08-11 12:37:42 +00:00
integration: replace time.Sleep with assert.EventuallyWithT (#2680)
This commit is contained in:
@@ -1077,7 +1077,6 @@ func TestACLDevice1CanAccessDevice2(t *testing.T) {
|
||||
|
||||
func TestPolicyUpdateWhileRunningWithCLIInDatabase(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 1,
|
||||
@@ -1213,7 +1212,6 @@ func TestPolicyUpdateWhileRunningWithCLIInDatabase(t *testing.T) {
|
||||
|
||||
func TestACLAutogroupMember(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario := aclScenario(t,
|
||||
&policyv2.Policy{
|
||||
@@ -1271,7 +1269,6 @@ func TestACLAutogroupMember(t *testing.T) {
|
||||
|
||||
func TestACLAutogroupTagged(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario := aclScenario(t,
|
||||
&policyv2.Policy{
|
||||
|
@@ -3,12 +3,11 @@ package integration
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"slices"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
@@ -19,7 +18,6 @@ import (
|
||||
|
||||
func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
for _, https := range []bool{true, false} {
|
||||
t.Run(fmt.Sprintf("with-https-%t", https), func(t *testing.T) {
|
||||
@@ -66,7 +64,7 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
|
||||
assertNoErrGetHeadscale(t, err)
|
||||
|
||||
listNodes, err := headscale.ListNodes()
|
||||
assert.Equal(t, len(listNodes), len(allClients))
|
||||
assert.Len(t, allClients, len(listNodes))
|
||||
nodeCountBeforeLogout := len(listNodes)
|
||||
t.Logf("node count before logout: %d", nodeCountBeforeLogout)
|
||||
|
||||
@@ -161,12 +159,11 @@ func TestAuthKeyLogoutAndReloginSameUser(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func assertLastSeenSet(t *testing.T, node *v1.Node) {
|
||||
assert.NotNil(t, node)
|
||||
assert.NotNil(t, node.LastSeen)
|
||||
assert.NotNil(t, node.GetLastSeen())
|
||||
}
|
||||
|
||||
// This test will first log in two sets of nodes to two sets of users, then
|
||||
@@ -175,7 +172,6 @@ func assertLastSeenSet(t *testing.T, node *v1.Node) {
|
||||
// still has nodes, but they are not connected.
|
||||
func TestAuthKeyLogoutAndReloginNewUser(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: len(MustTestVersions),
|
||||
@@ -204,7 +200,7 @@ func TestAuthKeyLogoutAndReloginNewUser(t *testing.T) {
|
||||
assertNoErrGetHeadscale(t, err)
|
||||
|
||||
listNodes, err := headscale.ListNodes()
|
||||
assert.Equal(t, len(listNodes), len(allClients))
|
||||
assert.Len(t, allClients, len(listNodes))
|
||||
nodeCountBeforeLogout := len(listNodes)
|
||||
t.Logf("node count before logout: %d", nodeCountBeforeLogout)
|
||||
|
||||
@@ -259,7 +255,6 @@ func TestAuthKeyLogoutAndReloginNewUser(t *testing.T) {
|
||||
|
||||
func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
for _, https := range []bool{true, false} {
|
||||
t.Run(fmt.Sprintf("with-https-%t", https), func(t *testing.T) {
|
||||
@@ -303,7 +298,7 @@ func TestAuthKeyLogoutAndReloginSameUserExpiredKey(t *testing.T) {
|
||||
assertNoErrGetHeadscale(t, err)
|
||||
|
||||
listNodes, err := headscale.ListNodes()
|
||||
assert.Equal(t, len(listNodes), len(allClients))
|
||||
assert.Len(t, allClients, len(listNodes))
|
||||
nodeCountBeforeLogout := len(listNodes)
|
||||
t.Logf("node count before logout: %d", nodeCountBeforeLogout)
|
||||
|
||||
|
@@ -1,14 +1,12 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"maps"
|
||||
"net/netip"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"maps"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
@@ -21,7 +19,6 @@ import (
|
||||
|
||||
func TestOIDCAuthenticationPingAll(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
// Logins to MockOIDC is served by a queue with a strict order,
|
||||
// if we use more than one node per user, the order of the logins
|
||||
@@ -119,7 +116,6 @@ func TestOIDCAuthenticationPingAll(t *testing.T) {
|
||||
// This test is really flaky.
|
||||
func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
shortAccessTTL := 5 * time.Minute
|
||||
|
||||
@@ -174,9 +170,13 @@ func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) {
|
||||
// of safety reasons) before checking if the clients have logged out.
|
||||
// The Wait function can't do it itself as it has an upper bound of 1
|
||||
// min.
|
||||
time.Sleep(shortAccessTTL + 10*time.Second)
|
||||
|
||||
assertTailscaleNodesLogout(t, allClients)
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
for _, client := range allClients {
|
||||
status, err := client.Status()
|
||||
assert.NoError(ct, err)
|
||||
assert.Equal(ct, "NeedsLogin", status.BackendState)
|
||||
}
|
||||
}, shortAccessTTL+10*time.Second, 5*time.Second)
|
||||
}
|
||||
|
||||
func TestOIDC024UserCreation(t *testing.T) {
|
||||
@@ -295,9 +295,7 @@ func TestOIDC024UserCreation(t *testing.T) {
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 1,
|
||||
}
|
||||
for _, user := range tt.cliUsers {
|
||||
spec.Users = append(spec.Users, user)
|
||||
}
|
||||
spec.Users = append(spec.Users, tt.cliUsers...)
|
||||
|
||||
for _, user := range tt.oidcUsers {
|
||||
spec.OIDCUsers = append(spec.OIDCUsers, oidcMockUser(user, tt.emailVerified))
|
||||
@@ -350,7 +348,6 @@ func TestOIDC024UserCreation(t *testing.T) {
|
||||
|
||||
func TestOIDCAuthenticationWithPKCE(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
// Single user with one node for testing PKCE flow
|
||||
spec := ScenarioSpec{
|
||||
@@ -402,7 +399,6 @@ func TestOIDCAuthenticationWithPKCE(t *testing.T) {
|
||||
|
||||
func TestOIDCReloginSameNodeNewUser(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
// Create no nodes and no users
|
||||
scenario, err := NewScenario(ScenarioSpec{
|
||||
@@ -440,7 +436,7 @@ func TestOIDCReloginSameNodeNewUser(t *testing.T) {
|
||||
|
||||
listUsers, err := headscale.ListUsers()
|
||||
assertNoErr(t, err)
|
||||
assert.Len(t, listUsers, 0)
|
||||
assert.Empty(t, listUsers)
|
||||
|
||||
ts, err := scenario.CreateTailscaleNode("unstable", tsic.WithNetwork(scenario.networks[scenario.testDefaultNetwork]))
|
||||
assertNoErr(t, err)
|
||||
@@ -482,7 +478,13 @@ func TestOIDCReloginSameNodeNewUser(t *testing.T) {
|
||||
err = ts.Logout()
|
||||
assertNoErr(t, err)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
// Wait for logout to complete and then do second logout
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
// Check that the first logout completed
|
||||
status, err := ts.Status()
|
||||
assert.NoError(ct, err)
|
||||
assert.Equal(ct, "NeedsLogin", status.BackendState)
|
||||
}, 5*time.Second, 1*time.Second)
|
||||
|
||||
// TODO(kradalby): Not sure why we need to logout twice, but it fails and
|
||||
// logs in immediately after the first logout and I cannot reproduce it
|
||||
@@ -530,16 +532,22 @@ func TestOIDCReloginSameNodeNewUser(t *testing.T) {
|
||||
|
||||
// Machine key is the same as the "machine" has not changed,
|
||||
// but Node key is not as it is a new node
|
||||
assert.Equal(t, listNodes[0].MachineKey, listNodesAfterNewUserLogin[0].MachineKey)
|
||||
assert.Equal(t, listNodesAfterNewUserLogin[0].MachineKey, listNodesAfterNewUserLogin[1].MachineKey)
|
||||
assert.NotEqual(t, listNodesAfterNewUserLogin[0].NodeKey, listNodesAfterNewUserLogin[1].NodeKey)
|
||||
assert.Equal(t, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[0].GetMachineKey())
|
||||
assert.Equal(t, listNodesAfterNewUserLogin[0].GetMachineKey(), listNodesAfterNewUserLogin[1].GetMachineKey())
|
||||
assert.NotEqual(t, listNodesAfterNewUserLogin[0].GetNodeKey(), listNodesAfterNewUserLogin[1].GetNodeKey())
|
||||
|
||||
// Log out user2, and log into user1, no new node should be created,
|
||||
// the node should now "become" node1 again
|
||||
err = ts.Logout()
|
||||
assertNoErr(t, err)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
// Wait for logout to complete and then do second logout
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
// Check that the first logout completed
|
||||
status, err := ts.Status()
|
||||
assert.NoError(ct, err)
|
||||
assert.Equal(ct, "NeedsLogin", status.BackendState)
|
||||
}, 5*time.Second, 1*time.Second)
|
||||
|
||||
// TODO(kradalby): Not sure why we need to logout twice, but it fails and
|
||||
// logs in immediately after the first logout and I cannot reproduce it
|
||||
@@ -588,24 +596,24 @@ func TestOIDCReloginSameNodeNewUser(t *testing.T) {
|
||||
// Validate that the machine we had when we logged in the first time, has the same
|
||||
// machine key, but a different ID than the newly logged in version of the same
|
||||
// machine.
|
||||
assert.Equal(t, listNodes[0].MachineKey, listNodesAfterNewUserLogin[0].MachineKey)
|
||||
assert.Equal(t, listNodes[0].NodeKey, listNodesAfterNewUserLogin[0].NodeKey)
|
||||
assert.Equal(t, listNodes[0].Id, listNodesAfterNewUserLogin[0].Id)
|
||||
assert.Equal(t, listNodes[0].MachineKey, listNodesAfterNewUserLogin[1].MachineKey)
|
||||
assert.NotEqual(t, listNodes[0].Id, listNodesAfterNewUserLogin[1].Id)
|
||||
assert.NotEqual(t, listNodes[0].User.Id, listNodesAfterNewUserLogin[1].User.Id)
|
||||
assert.Equal(t, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[0].GetMachineKey())
|
||||
assert.Equal(t, listNodes[0].GetNodeKey(), listNodesAfterNewUserLogin[0].GetNodeKey())
|
||||
assert.Equal(t, listNodes[0].GetId(), listNodesAfterNewUserLogin[0].GetId())
|
||||
assert.Equal(t, listNodes[0].GetMachineKey(), listNodesAfterNewUserLogin[1].GetMachineKey())
|
||||
assert.NotEqual(t, listNodes[0].GetId(), listNodesAfterNewUserLogin[1].GetId())
|
||||
assert.NotEqual(t, listNodes[0].GetUser().GetId(), listNodesAfterNewUserLogin[1].GetUser().GetId())
|
||||
|
||||
// Even tho we are logging in again with the same user, the previous key has been expired
|
||||
// and a new one has been generated. The node entry in the database should be the same
|
||||
// as the user + machinekey still matches.
|
||||
assert.Equal(t, listNodes[0].MachineKey, listNodesAfterLoggingBackIn[0].MachineKey)
|
||||
assert.NotEqual(t, listNodes[0].NodeKey, listNodesAfterLoggingBackIn[0].NodeKey)
|
||||
assert.Equal(t, listNodes[0].Id, listNodesAfterLoggingBackIn[0].Id)
|
||||
assert.Equal(t, listNodes[0].GetMachineKey(), listNodesAfterLoggingBackIn[0].GetMachineKey())
|
||||
assert.NotEqual(t, listNodes[0].GetNodeKey(), listNodesAfterLoggingBackIn[0].GetNodeKey())
|
||||
assert.Equal(t, listNodes[0].GetId(), listNodesAfterLoggingBackIn[0].GetId())
|
||||
|
||||
// The "logged back in" machine should have the same machinekey but a different nodekey
|
||||
// than the version logged in with a different user.
|
||||
assert.Equal(t, listNodesAfterLoggingBackIn[0].MachineKey, listNodesAfterLoggingBackIn[1].MachineKey)
|
||||
assert.NotEqual(t, listNodesAfterLoggingBackIn[0].NodeKey, listNodesAfterLoggingBackIn[1].NodeKey)
|
||||
assert.Equal(t, listNodesAfterLoggingBackIn[0].GetMachineKey(), listNodesAfterLoggingBackIn[1].GetMachineKey())
|
||||
assert.NotEqual(t, listNodesAfterLoggingBackIn[0].GetNodeKey(), listNodesAfterLoggingBackIn[1].GetNodeKey())
|
||||
}
|
||||
|
||||
func assertTailscaleNodesLogout(t *testing.T, clients []TailscaleClient) {
|
||||
@@ -623,7 +631,7 @@ func oidcMockUser(username string, emailVerified bool) mockoidc.MockUser {
|
||||
return mockoidc.MockUser{
|
||||
Subject: username,
|
||||
PreferredUsername: username,
|
||||
Email: fmt.Sprintf("%s@headscale.net", username),
|
||||
Email: username + "@headscale.net",
|
||||
EmailVerified: emailVerified,
|
||||
}
|
||||
}
|
||||
|
@@ -2,9 +2,8 @@ package integration
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"testing"
|
||||
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/samber/lo"
|
||||
@@ -55,7 +54,6 @@ func TestAuthWebFlowAuthenticationPingAll(t *testing.T) {
|
||||
|
||||
func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: len(MustTestVersions),
|
||||
@@ -95,7 +93,7 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
|
||||
assertNoErrGetHeadscale(t, err)
|
||||
|
||||
listNodes, err := headscale.ListNodes()
|
||||
assert.Equal(t, len(listNodes), len(allClients))
|
||||
assert.Len(t, allClients, len(listNodes))
|
||||
nodeCountBeforeLogout := len(listNodes)
|
||||
t.Logf("node count before logout: %d", nodeCountBeforeLogout)
|
||||
|
||||
@@ -140,7 +138,7 @@ func TestAuthWebFlowLogoutAndRelogin(t *testing.T) {
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
listNodes, err = headscale.ListNodes()
|
||||
require.Equal(t, nodeCountBeforeLogout, len(listNodes))
|
||||
require.Len(t, listNodes, nodeCountBeforeLogout)
|
||||
t.Logf("node count first login: %d, after relogin: %d", nodeCountBeforeLogout, len(listNodes))
|
||||
|
||||
for _, client := range allClients {
|
||||
|
@@ -18,8 +18,8 @@ import (
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"tailscale.com/tailcfg"
|
||||
"golang.org/x/exp/slices"
|
||||
"tailscale.com/tailcfg"
|
||||
)
|
||||
|
||||
func executeAndUnmarshal[T any](headscale ControlServer, command []string, result T) error {
|
||||
@@ -30,7 +30,7 @@ func executeAndUnmarshal[T any](headscale ControlServer, command []string, resul
|
||||
|
||||
err = json.Unmarshal([]byte(str), result)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal: %s\n command err: %s", err, str)
|
||||
return fmt.Errorf("failed to unmarshal: %w\n command err: %s", err, str)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -48,7 +48,6 @@ func sortWithID[T GRPCSortable](a, b T) int {
|
||||
|
||||
func TestUserCommand(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
Users: []string{"user1", "user2"},
|
||||
@@ -184,7 +183,7 @@ func TestUserCommand(t *testing.T) {
|
||||
"--identifier=1",
|
||||
},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, deleteResult, "User destroyed")
|
||||
|
||||
var listAfterIDDelete []*v1.User
|
||||
@@ -222,7 +221,7 @@ func TestUserCommand(t *testing.T) {
|
||||
"--name=newname",
|
||||
},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
assert.Contains(t, deleteResult, "User destroyed")
|
||||
|
||||
var listAfterNameDelete []v1.User
|
||||
@@ -238,12 +237,11 @@ func TestUserCommand(t *testing.T) {
|
||||
)
|
||||
assertNoErr(t, err)
|
||||
|
||||
require.Len(t, listAfterNameDelete, 0)
|
||||
require.Empty(t, listAfterNameDelete)
|
||||
}
|
||||
|
||||
func TestPreAuthKeyCommand(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
user := "preauthkeyspace"
|
||||
count := 3
|
||||
@@ -347,7 +345,7 @@ func TestPreAuthKeyCommand(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
|
||||
assert.Equal(t, listedPreAuthKeys[index].GetAclTags(), []string{"tag:test1", "tag:test2"})
|
||||
assert.Equal(t, []string{"tag:test1", "tag:test2"}, listedPreAuthKeys[index].GetAclTags())
|
||||
}
|
||||
|
||||
// Test key expiry
|
||||
@@ -386,7 +384,6 @@ func TestPreAuthKeyCommand(t *testing.T) {
|
||||
|
||||
func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
user := "pre-auth-key-without-exp-user"
|
||||
spec := ScenarioSpec{
|
||||
@@ -448,7 +445,6 @@ func TestPreAuthKeyCommandWithoutExpiry(t *testing.T) {
|
||||
|
||||
func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
user := "pre-auth-key-reus-ephm-user"
|
||||
spec := ScenarioSpec{
|
||||
@@ -524,7 +520,6 @@ func TestPreAuthKeyCommandReusableEphemeral(t *testing.T) {
|
||||
|
||||
func TestPreAuthKeyCorrectUserLoggedInCommand(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
user1 := "user1"
|
||||
user2 := "user2"
|
||||
@@ -575,7 +570,7 @@ func TestPreAuthKeyCorrectUserLoggedInCommand(t *testing.T) {
|
||||
assertNoErr(t, err)
|
||||
|
||||
listNodes, err := headscale.ListNodes()
|
||||
require.Nil(t, err)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, listNodes, 1)
|
||||
assert.Equal(t, user1, listNodes[0].GetUser().GetName())
|
||||
|
||||
@@ -613,7 +608,7 @@ func TestPreAuthKeyCorrectUserLoggedInCommand(t *testing.T) {
|
||||
}
|
||||
|
||||
listNodes, err = headscale.ListNodes()
|
||||
require.Nil(t, err)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, listNodes, 2)
|
||||
assert.Equal(t, user1, listNodes[0].GetUser().GetName())
|
||||
assert.Equal(t, user2, listNodes[1].GetUser().GetName())
|
||||
@@ -621,7 +616,6 @@ func TestPreAuthKeyCorrectUserLoggedInCommand(t *testing.T) {
|
||||
|
||||
func TestApiKeyCommand(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
count := 5
|
||||
|
||||
@@ -653,7 +647,7 @@ func TestApiKeyCommand(t *testing.T) {
|
||||
"json",
|
||||
},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, apiResult)
|
||||
|
||||
keys[idx] = apiResult
|
||||
@@ -672,7 +666,7 @@ func TestApiKeyCommand(t *testing.T) {
|
||||
},
|
||||
&listedAPIKeys,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, listedAPIKeys, 5)
|
||||
|
||||
@@ -728,7 +722,7 @@ func TestApiKeyCommand(t *testing.T) {
|
||||
listedAPIKeys[idx].GetPrefix(),
|
||||
},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
expiredPrefixes[listedAPIKeys[idx].GetPrefix()] = true
|
||||
}
|
||||
@@ -744,7 +738,7 @@ func TestApiKeyCommand(t *testing.T) {
|
||||
},
|
||||
&listedAfterExpireAPIKeys,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for index := range listedAfterExpireAPIKeys {
|
||||
if _, ok := expiredPrefixes[listedAfterExpireAPIKeys[index].GetPrefix()]; ok {
|
||||
@@ -770,7 +764,7 @@ func TestApiKeyCommand(t *testing.T) {
|
||||
"--prefix",
|
||||
listedAPIKeys[0].GetPrefix(),
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var listedAPIKeysAfterDelete []v1.ApiKey
|
||||
err = executeAndUnmarshal(headscale,
|
||||
@@ -783,14 +777,13 @@ func TestApiKeyCommand(t *testing.T) {
|
||||
},
|
||||
&listedAPIKeysAfterDelete,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, listedAPIKeysAfterDelete, 4)
|
||||
}
|
||||
|
||||
func TestNodeTagCommand(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
Users: []string{"user1"},
|
||||
@@ -811,7 +804,7 @@ func TestNodeTagCommand(t *testing.T) {
|
||||
types.MustRegistrationID().String(),
|
||||
}
|
||||
nodes := make([]*v1.Node, len(regIDs))
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for index, regID := range regIDs {
|
||||
_, err := headscale.Execute(
|
||||
@@ -829,7 +822,7 @@ func TestNodeTagCommand(t *testing.T) {
|
||||
"json",
|
||||
},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var node v1.Node
|
||||
err = executeAndUnmarshal(
|
||||
@@ -847,7 +840,7 @@ func TestNodeTagCommand(t *testing.T) {
|
||||
},
|
||||
&node,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
nodes[index] = &node
|
||||
}
|
||||
@@ -866,7 +859,7 @@ func TestNodeTagCommand(t *testing.T) {
|
||||
},
|
||||
&node,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, []string{"tag:test"}, node.GetForcedTags())
|
||||
|
||||
@@ -894,7 +887,7 @@ func TestNodeTagCommand(t *testing.T) {
|
||||
},
|
||||
&resultMachines,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
found := false
|
||||
for _, node := range resultMachines {
|
||||
if node.GetForcedTags() != nil {
|
||||
@@ -905,19 +898,15 @@ func TestNodeTagCommand(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
assert.Equal(
|
||||
assert.True(
|
||||
t,
|
||||
true,
|
||||
found,
|
||||
"should find a node with the tag 'tag:test' in the list of nodes",
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
|
||||
func TestNodeAdvertiseTagCommand(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -1024,7 +1013,7 @@ func TestNodeAdvertiseTagCommand(t *testing.T) {
|
||||
},
|
||||
&resultMachines,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
found := false
|
||||
for _, node := range resultMachines {
|
||||
if tags := node.GetValidTags(); tags != nil {
|
||||
@@ -1043,7 +1032,6 @@ func TestNodeAdvertiseTagCommand(t *testing.T) {
|
||||
|
||||
func TestNodeCommand(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
Users: []string{"node-user", "other-user"},
|
||||
@@ -1067,7 +1055,7 @@ func TestNodeCommand(t *testing.T) {
|
||||
types.MustRegistrationID().String(),
|
||||
}
|
||||
nodes := make([]*v1.Node, len(regIDs))
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for index, regID := range regIDs {
|
||||
_, err := headscale.Execute(
|
||||
@@ -1085,7 +1073,7 @@ func TestNodeCommand(t *testing.T) {
|
||||
"json",
|
||||
},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var node v1.Node
|
||||
err = executeAndUnmarshal(
|
||||
@@ -1103,7 +1091,7 @@ func TestNodeCommand(t *testing.T) {
|
||||
},
|
||||
&node,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
nodes[index] = &node
|
||||
}
|
||||
@@ -1123,7 +1111,7 @@ func TestNodeCommand(t *testing.T) {
|
||||
},
|
||||
&listAll,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, listAll, 5)
|
||||
|
||||
@@ -1144,7 +1132,7 @@ func TestNodeCommand(t *testing.T) {
|
||||
types.MustRegistrationID().String(),
|
||||
}
|
||||
otherUserMachines := make([]*v1.Node, len(otherUserRegIDs))
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for index, regID := range otherUserRegIDs {
|
||||
_, err := headscale.Execute(
|
||||
@@ -1162,7 +1150,7 @@ func TestNodeCommand(t *testing.T) {
|
||||
"json",
|
||||
},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var node v1.Node
|
||||
err = executeAndUnmarshal(
|
||||
@@ -1180,7 +1168,7 @@ func TestNodeCommand(t *testing.T) {
|
||||
},
|
||||
&node,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
otherUserMachines[index] = &node
|
||||
}
|
||||
@@ -1200,7 +1188,7 @@ func TestNodeCommand(t *testing.T) {
|
||||
},
|
||||
&listAllWithotherUser,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// All nodes, nodes + otherUser
|
||||
assert.Len(t, listAllWithotherUser, 7)
|
||||
@@ -1226,7 +1214,7 @@ func TestNodeCommand(t *testing.T) {
|
||||
},
|
||||
&listOnlyotherUserMachineUser,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, listOnlyotherUserMachineUser, 2)
|
||||
|
||||
@@ -1258,7 +1246,7 @@ func TestNodeCommand(t *testing.T) {
|
||||
"--force",
|
||||
},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test: list main user after node is deleted
|
||||
var listOnlyMachineUserAfterDelete []v1.Node
|
||||
@@ -1275,14 +1263,13 @@ func TestNodeCommand(t *testing.T) {
|
||||
},
|
||||
&listOnlyMachineUserAfterDelete,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, listOnlyMachineUserAfterDelete, 4)
|
||||
}
|
||||
|
||||
func TestNodeExpireCommand(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
Users: []string{"node-expire-user"},
|
||||
@@ -1323,7 +1310,7 @@ func TestNodeExpireCommand(t *testing.T) {
|
||||
"json",
|
||||
},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var node v1.Node
|
||||
err = executeAndUnmarshal(
|
||||
@@ -1341,7 +1328,7 @@ func TestNodeExpireCommand(t *testing.T) {
|
||||
},
|
||||
&node,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
nodes[index] = &node
|
||||
}
|
||||
@@ -1360,7 +1347,7 @@ func TestNodeExpireCommand(t *testing.T) {
|
||||
},
|
||||
&listAll,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, listAll, 5)
|
||||
|
||||
@@ -1377,10 +1364,10 @@ func TestNodeExpireCommand(t *testing.T) {
|
||||
"nodes",
|
||||
"expire",
|
||||
"--identifier",
|
||||
fmt.Sprintf("%d", listAll[idx].GetId()),
|
||||
strconv.FormatUint(listAll[idx].GetId(), 10),
|
||||
},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
var listAllAfterExpiry []v1.Node
|
||||
@@ -1395,7 +1382,7 @@ func TestNodeExpireCommand(t *testing.T) {
|
||||
},
|
||||
&listAllAfterExpiry,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, listAllAfterExpiry, 5)
|
||||
|
||||
@@ -1408,7 +1395,6 @@ func TestNodeExpireCommand(t *testing.T) {
|
||||
|
||||
func TestNodeRenameCommand(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
Users: []string{"node-rename-command"},
|
||||
@@ -1432,7 +1418,7 @@ func TestNodeRenameCommand(t *testing.T) {
|
||||
types.MustRegistrationID().String(),
|
||||
}
|
||||
nodes := make([]*v1.Node, len(regIDs))
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for index, regID := range regIDs {
|
||||
_, err := headscale.Execute(
|
||||
@@ -1487,7 +1473,7 @@ func TestNodeRenameCommand(t *testing.T) {
|
||||
},
|
||||
&listAll,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, listAll, 5)
|
||||
|
||||
@@ -1504,11 +1490,11 @@ func TestNodeRenameCommand(t *testing.T) {
|
||||
"nodes",
|
||||
"rename",
|
||||
"--identifier",
|
||||
fmt.Sprintf("%d", listAll[idx].GetId()),
|
||||
strconv.FormatUint(listAll[idx].GetId(), 10),
|
||||
fmt.Sprintf("newnode-%d", idx+1),
|
||||
},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Contains(t, res, "Node renamed")
|
||||
}
|
||||
@@ -1525,7 +1511,7 @@ func TestNodeRenameCommand(t *testing.T) {
|
||||
},
|
||||
&listAllAfterRename,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, listAllAfterRename, 5)
|
||||
|
||||
@@ -1542,7 +1528,7 @@ func TestNodeRenameCommand(t *testing.T) {
|
||||
"nodes",
|
||||
"rename",
|
||||
"--identifier",
|
||||
fmt.Sprintf("%d", listAll[4].GetId()),
|
||||
strconv.FormatUint(listAll[4].GetId(), 10),
|
||||
strings.Repeat("t", 64),
|
||||
},
|
||||
)
|
||||
@@ -1560,7 +1546,7 @@ func TestNodeRenameCommand(t *testing.T) {
|
||||
},
|
||||
&listAllAfterRenameAttempt,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, listAllAfterRenameAttempt, 5)
|
||||
|
||||
@@ -1573,7 +1559,6 @@ func TestNodeRenameCommand(t *testing.T) {
|
||||
|
||||
func TestNodeMoveCommand(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
Users: []string{"old-user", "new-user"},
|
||||
@@ -1610,7 +1595,7 @@ func TestNodeMoveCommand(t *testing.T) {
|
||||
"json",
|
||||
},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var node v1.Node
|
||||
err = executeAndUnmarshal(
|
||||
@@ -1628,13 +1613,13 @@ func TestNodeMoveCommand(t *testing.T) {
|
||||
},
|
||||
&node,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, uint64(1), node.GetId())
|
||||
assert.Equal(t, "nomad-node", node.GetName())
|
||||
assert.Equal(t, node.GetUser().GetName(), "old-user")
|
||||
assert.Equal(t, "old-user", node.GetUser().GetName())
|
||||
|
||||
nodeID := fmt.Sprintf("%d", node.GetId())
|
||||
nodeID := strconv.FormatUint(node.GetId(), 10)
|
||||
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -1651,9 +1636,9 @@ func TestNodeMoveCommand(t *testing.T) {
|
||||
},
|
||||
&node,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, node.GetUser().GetName(), "new-user")
|
||||
assert.Equal(t, "new-user", node.GetUser().GetName())
|
||||
|
||||
var allNodes []v1.Node
|
||||
err = executeAndUnmarshal(
|
||||
@@ -1667,13 +1652,13 @@ func TestNodeMoveCommand(t *testing.T) {
|
||||
},
|
||||
&allNodes,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, allNodes, 1)
|
||||
|
||||
assert.Equal(t, allNodes[0].GetId(), node.GetId())
|
||||
assert.Equal(t, allNodes[0].GetUser(), node.GetUser())
|
||||
assert.Equal(t, allNodes[0].GetUser().GetName(), "new-user")
|
||||
assert.Equal(t, "new-user", allNodes[0].GetUser().GetName())
|
||||
|
||||
_, err = headscale.Execute(
|
||||
[]string{
|
||||
@@ -1693,7 +1678,7 @@ func TestNodeMoveCommand(t *testing.T) {
|
||||
err,
|
||||
"user not found",
|
||||
)
|
||||
assert.Equal(t, node.GetUser().GetName(), "new-user")
|
||||
assert.Equal(t, "new-user", node.GetUser().GetName())
|
||||
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -1710,9 +1695,9 @@ func TestNodeMoveCommand(t *testing.T) {
|
||||
},
|
||||
&node,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, node.GetUser().GetName(), "old-user")
|
||||
assert.Equal(t, "old-user", node.GetUser().GetName())
|
||||
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
@@ -1729,14 +1714,13 @@ func TestNodeMoveCommand(t *testing.T) {
|
||||
},
|
||||
&node,
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, node.GetUser().GetName(), "old-user")
|
||||
assert.Equal(t, "old-user", node.GetUser().GetName())
|
||||
}
|
||||
|
||||
func TestPolicyCommand(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
Users: []string{"user1"},
|
||||
@@ -1817,7 +1801,6 @@ func TestPolicyCommand(t *testing.T) {
|
||||
|
||||
func TestPolicyBrokenConfigCommand(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 1,
|
||||
|
@@ -1,7 +1,6 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
@@ -104,7 +103,7 @@ func DERPVerify(
|
||||
defer c.Close()
|
||||
|
||||
var result error
|
||||
if err := c.Connect(context.Background()); err != nil {
|
||||
if err := c.Connect(t.Context()); err != nil {
|
||||
result = fmt.Errorf("client Connect: %w", err)
|
||||
}
|
||||
if m, err := c.Recv(); err != nil {
|
||||
|
@@ -15,7 +15,6 @@ import (
|
||||
|
||||
func TestResolveMagicDNS(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: len(MustTestVersions),
|
||||
@@ -49,7 +48,7 @@ func TestResolveMagicDNS(t *testing.T) {
|
||||
// It is safe to ignore this error as we handled it when caching it
|
||||
peerFQDN, _ := peer.FQDN()
|
||||
|
||||
assert.Equal(t, fmt.Sprintf("%s.headscale.net.", peer.Hostname()), peerFQDN)
|
||||
assert.Equal(t, peer.Hostname()+".headscale.net.", peerFQDN)
|
||||
|
||||
command := []string{
|
||||
"tailscale",
|
||||
@@ -85,7 +84,6 @@ func TestResolveMagicDNS(t *testing.T) {
|
||||
|
||||
func TestResolveMagicDNSExtraRecordsPath(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 1,
|
||||
@@ -222,12 +220,14 @@ func TestResolveMagicDNSExtraRecordsPath(t *testing.T) {
|
||||
_, err = hs.Execute([]string{"rm", erPath})
|
||||
assertNoErr(t, err)
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// The same paths should still be available as it is not cleared on delete.
|
||||
for _, client := range allClients {
|
||||
assertCommandOutputContains(t, client, []string{"dig", "docker.myvpn.example.com"}, "9.9.9.9")
|
||||
}
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
for _, client := range allClients {
|
||||
result, _, err := client.Execute([]string{"dig", "docker.myvpn.example.com"})
|
||||
assert.NoError(ct, err)
|
||||
assert.Contains(ct, result, "9.9.9.9")
|
||||
}
|
||||
}, 10*time.Second, 1*time.Second)
|
||||
|
||||
// Write a new file, the backoff mechanism should make the filewatcher pick it up
|
||||
// again.
|
||||
|
@@ -33,26 +33,27 @@ func DockerAddIntegrationLabels(opts *dockertest.RunOptions, testType string) {
|
||||
}
|
||||
|
||||
// GenerateRunID creates a unique run identifier with timestamp and random hash.
|
||||
// Format: YYYYMMDD-HHMMSS-HASH (e.g., 20250619-143052-a1b2c3)
|
||||
// Format: YYYYMMDD-HHMMSS-HASH (e.g., 20250619-143052-a1b2c3).
|
||||
func GenerateRunID() string {
|
||||
now := time.Now()
|
||||
timestamp := now.Format("20060102-150405")
|
||||
|
||||
|
||||
// Add a short random hash to ensure uniqueness
|
||||
randomHash := util.MustGenerateRandomStringDNSSafe(6)
|
||||
|
||||
return fmt.Sprintf("%s-%s", timestamp, randomHash)
|
||||
}
|
||||
|
||||
// ExtractRunIDFromContainerName extracts the run ID from container name.
|
||||
// Expects format: "prefix-YYYYMMDD-HHMMSS-HASH"
|
||||
// Expects format: "prefix-YYYYMMDD-HHMMSS-HASH".
|
||||
func ExtractRunIDFromContainerName(containerName string) string {
|
||||
parts := strings.Split(containerName, "-")
|
||||
if len(parts) >= 3 {
|
||||
// Return the last three parts as the run ID (YYYYMMDD-HHMMSS-HASH)
|
||||
return strings.Join(parts[len(parts)-3:], "-")
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("unexpected container name format: %s", containerName))
|
||||
|
||||
panic("unexpected container name format: " + containerName)
|
||||
}
|
||||
|
||||
// IsRunningInContainer checks if the current process is running inside a Docker container.
|
||||
@@ -62,4 +63,4 @@ func IsRunningInContainer() bool {
|
||||
// This could be improved with more robust detection if needed
|
||||
_, err := os.Stat("/.dockerenv")
|
||||
return err == nil
|
||||
}
|
||||
}
|
||||
|
@@ -30,7 +30,7 @@ func ExecuteCommandTimeout(timeout time.Duration) ExecuteCommandOption {
|
||||
})
|
||||
}
|
||||
|
||||
// buffer is a goroutine safe bytes.buffer
|
||||
// buffer is a goroutine safe bytes.buffer.
|
||||
type buffer struct {
|
||||
store bytes.Buffer
|
||||
mutex sync.Mutex
|
||||
@@ -58,8 +58,8 @@ func ExecuteCommand(
|
||||
env []string,
|
||||
options ...ExecuteCommandOption,
|
||||
) (string, string, error) {
|
||||
var stdout = buffer{}
|
||||
var stderr = buffer{}
|
||||
stdout := buffer{}
|
||||
stderr := buffer{}
|
||||
|
||||
execConfig := ExecuteCommandConfig{
|
||||
timeout: dockerExecuteTimeout,
|
||||
|
@@ -159,7 +159,6 @@ func New(
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
if dsic.workdir != "" {
|
||||
runOptions.WorkingDir = dsic.workdir
|
||||
}
|
||||
@@ -192,7 +191,7 @@ func New(
|
||||
}
|
||||
// Add integration test labels if running under hi tool
|
||||
dockertestutil.DockerAddIntegrationLabels(runOptions, "derp")
|
||||
|
||||
|
||||
container, err = pool.BuildAndRunWithBuildOptions(
|
||||
buildOptions,
|
||||
runOptions,
|
||||
|
@@ -2,13 +2,13 @@ package integration
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/juanfont/headscale/integration/hsic"
|
||||
"github.com/juanfont/headscale/integration/tsic"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
type ClientsSpec struct {
|
||||
@@ -71,9 +71,9 @@ func TestDERPServerWebsocketScenario(t *testing.T) {
|
||||
NodesPerUser: 1,
|
||||
Users: []string{"user1", "user2", "user3"},
|
||||
Networks: map[string][]string{
|
||||
"usernet1": []string{"user1"},
|
||||
"usernet2": []string{"user2"},
|
||||
"usernet3": []string{"user3"},
|
||||
"usernet1": {"user1"},
|
||||
"usernet2": {"user2"},
|
||||
"usernet3": {"user3"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -106,7 +106,6 @@ func derpServerScenario(
|
||||
furtherAssertions ...func(*Scenario),
|
||||
) {
|
||||
IntegrationSkip(t)
|
||||
// t.Parallel()
|
||||
|
||||
scenario, err := NewScenario(spec)
|
||||
assertNoErr(t, err)
|
||||
|
@@ -26,7 +26,6 @@ import (
|
||||
|
||||
func TestPingAllByIP(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: len(MustTestVersions),
|
||||
@@ -68,7 +67,6 @@ func TestPingAllByIP(t *testing.T) {
|
||||
|
||||
func TestPingAllByIPPublicDERP(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: len(MustTestVersions),
|
||||
@@ -118,7 +116,6 @@ func TestEphemeralInAlternateTimezone(t *testing.T) {
|
||||
|
||||
func testEphemeralWithOptions(t *testing.T, opts ...hsic.Option) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: len(MustTestVersions),
|
||||
@@ -191,7 +188,6 @@ func testEphemeralWithOptions(t *testing.T, opts ...hsic.Option) {
|
||||
// deleted by accident if they are still online and active.
|
||||
func TestEphemeral2006DeletedTooQuickly(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: len(MustTestVersions),
|
||||
@@ -260,18 +256,21 @@ func TestEphemeral2006DeletedTooQuickly(t *testing.T) {
|
||||
// Wait a bit and bring up the clients again before the expiry
|
||||
// time of the ephemeral nodes.
|
||||
// Nodes should be able to reconnect and work fine.
|
||||
time.Sleep(30 * time.Second)
|
||||
|
||||
for _, client := range allClients {
|
||||
err := client.Up()
|
||||
if err != nil {
|
||||
t.Fatalf("failed to take down client %s: %s", client.Hostname(), err)
|
||||
}
|
||||
}
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
|
||||
success = pingAllHelper(t, allClients, allAddrs)
|
||||
// Wait for clients to sync and be able to ping each other after reconnection
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assert.NoError(ct, err)
|
||||
|
||||
success = pingAllHelper(t, allClients, allAddrs)
|
||||
assert.Greater(ct, success, 0, "Ephemeral nodes should be able to reconnect and ping")
|
||||
}, 60*time.Second, 2*time.Second)
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
|
||||
// Take down all clients, this should start an expiry timer for each.
|
||||
@@ -284,7 +283,13 @@ func TestEphemeral2006DeletedTooQuickly(t *testing.T) {
|
||||
|
||||
// This time wait for all of the nodes to expire and check that they are no longer
|
||||
// registered.
|
||||
time.Sleep(3 * time.Minute)
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
for _, userName := range spec.Users {
|
||||
nodes, err := headscale.ListNodes(userName)
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, nodes, 0, "Ephemeral nodes should be expired and removed for user %s", userName)
|
||||
}
|
||||
}, 4*time.Minute, 10*time.Second)
|
||||
|
||||
for _, userName := range spec.Users {
|
||||
nodes, err := headscale.ListNodes(userName)
|
||||
@@ -305,7 +310,6 @@ func TestEphemeral2006DeletedTooQuickly(t *testing.T) {
|
||||
|
||||
func TestPingAllByHostname(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: len(MustTestVersions),
|
||||
@@ -341,20 +345,6 @@ func TestPingAllByHostname(t *testing.T) {
|
||||
// nolint:tparallel
|
||||
func TestTaildrop(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
retry := func(times int, sleepInterval time.Duration, doWork func() error) error {
|
||||
var err error
|
||||
for range times {
|
||||
err = doWork()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(sleepInterval)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: len(MustTestVersions),
|
||||
@@ -396,40 +386,27 @@ func TestTaildrop(t *testing.T) {
|
||||
"/var/run/tailscale/tailscaled.sock",
|
||||
"http://local-tailscaled.sock/localapi/v0/file-targets",
|
||||
}
|
||||
err = retry(10, 1*time.Second, func() error {
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
result, _, err := client.Execute(curlCommand)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
assert.NoError(ct, err)
|
||||
|
||||
var fts []apitype.FileTarget
|
||||
err = json.Unmarshal([]byte(result), &fts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
assert.NoError(ct, err)
|
||||
|
||||
if len(fts) != len(allClients)-1 {
|
||||
ftStr := fmt.Sprintf("FileTargets for %s:\n", client.Hostname())
|
||||
for _, ft := range fts {
|
||||
ftStr += fmt.Sprintf("\t%s\n", ft.Node.Name)
|
||||
}
|
||||
return fmt.Errorf(
|
||||
"client %s does not have all its peers as FileTargets, got %d, want: %d\n%s",
|
||||
client.Hostname(),
|
||||
assert.Failf(ct, "client %s does not have all its peers as FileTargets",
|
||||
"got %d, want: %d\n%s",
|
||||
len(fts),
|
||||
len(allClients)-1,
|
||||
ftStr,
|
||||
)
|
||||
}
|
||||
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf(
|
||||
"failed to query localapi for filetarget on %s, err: %s",
|
||||
client.Hostname(),
|
||||
err,
|
||||
)
|
||||
}
|
||||
}, 10*time.Second, 1*time.Second)
|
||||
}
|
||||
|
||||
for _, client := range allClients {
|
||||
@@ -454,24 +431,15 @@ func TestTaildrop(t *testing.T) {
|
||||
fmt.Sprintf("%s:", peerFQDN),
|
||||
}
|
||||
|
||||
err := retry(10, 1*time.Second, func() error {
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
t.Logf(
|
||||
"Sending file from %s to %s\n",
|
||||
client.Hostname(),
|
||||
peer.Hostname(),
|
||||
)
|
||||
_, _, err := client.Execute(command)
|
||||
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"failed to send taildrop file on %s with command %q, err: %s",
|
||||
client.Hostname(),
|
||||
strings.Join(command, " "),
|
||||
err,
|
||||
)
|
||||
}
|
||||
assert.NoError(ct, err)
|
||||
}, 10*time.Second, 1*time.Second)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -520,7 +488,6 @@ func TestTaildrop(t *testing.T) {
|
||||
|
||||
func TestUpdateHostnameFromClient(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
hostnames := map[string]string{
|
||||
"1": "user1-host",
|
||||
@@ -603,9 +570,47 @@ func TestUpdateHostnameFromClient(t *testing.T) {
|
||||
assertNoErr(t, err)
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
// Verify that the server-side rename is reflected in DNSName while HostName remains unchanged
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
// Build a map of expected DNSNames by node ID
|
||||
expectedDNSNames := make(map[string]string)
|
||||
for _, node := range nodes {
|
||||
nodeID := strconv.FormatUint(node.GetId(), 10)
|
||||
expectedDNSNames[nodeID] = fmt.Sprintf("%d-givenname.headscale.net.", node.GetId())
|
||||
}
|
||||
|
||||
// Verify from each client's perspective
|
||||
for _, client := range allClients {
|
||||
status, err := client.Status()
|
||||
assert.NoError(ct, err)
|
||||
|
||||
// Check self node
|
||||
selfID := string(status.Self.ID)
|
||||
expectedDNS := expectedDNSNames[selfID]
|
||||
assert.Equal(ct, expectedDNS, status.Self.DNSName,
|
||||
"Self DNSName should be renamed for client %s (ID: %s)", client.Hostname(), selfID)
|
||||
|
||||
// HostName should remain as the original client-reported hostname
|
||||
originalHostname := hostnames[selfID]
|
||||
assert.Equal(ct, originalHostname, status.Self.HostName,
|
||||
"Self HostName should remain unchanged for client %s (ID: %s)", client.Hostname(), selfID)
|
||||
|
||||
// Check peers
|
||||
for _, peer := range status.Peer {
|
||||
peerID := string(peer.ID)
|
||||
if expectedDNS, ok := expectedDNSNames[peerID]; ok {
|
||||
assert.Equal(ct, expectedDNS, peer.DNSName,
|
||||
"Peer DNSName should be renamed for peer ID %s as seen by client %s", peerID, client.Hostname())
|
||||
|
||||
// HostName should remain as the original client-reported hostname
|
||||
originalHostname := hostnames[peerID]
|
||||
assert.Equal(ct, originalHostname, peer.HostName,
|
||||
"Peer HostName should remain unchanged for peer ID %s as seen by client %s", peerID, client.Hostname())
|
||||
}
|
||||
}
|
||||
}
|
||||
}, 60*time.Second, 2*time.Second)
|
||||
|
||||
// Verify that the clients can see the new hostname, but no givenName
|
||||
for _, client := range allClients {
|
||||
status, err := client.Status()
|
||||
assertNoErr(t, err)
|
||||
@@ -647,7 +652,6 @@ func TestUpdateHostnameFromClient(t *testing.T) {
|
||||
|
||||
func TestExpireNode(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: len(MustTestVersions),
|
||||
@@ -707,7 +711,23 @@ func TestExpireNode(t *testing.T) {
|
||||
|
||||
t.Logf("Node %s with node_key %s has been expired", node.GetName(), expiredNodeKey.String())
|
||||
|
||||
time.Sleep(2 * time.Minute)
|
||||
// Verify that the expired node has been marked in all peers list.
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
for _, client := range allClients {
|
||||
status, err := client.Status()
|
||||
assert.NoError(ct, err)
|
||||
|
||||
if client.Hostname() != node.GetName() {
|
||||
// Check if the expired node appears as expired in this client's peer list
|
||||
for key, peer := range status.Peer {
|
||||
if key == expiredNodeKey {
|
||||
assert.True(ct, peer.Expired, "Node should be marked as expired for client %s", client.Hostname())
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}, 3*time.Minute, 10*time.Second)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
@@ -774,7 +794,6 @@ func TestExpireNode(t *testing.T) {
|
||||
|
||||
func TestNodeOnlineStatus(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: len(MustTestVersions),
|
||||
@@ -890,7 +909,6 @@ func TestNodeOnlineStatus(t *testing.T) {
|
||||
// five times ensuring they are able to restablish connectivity.
|
||||
func TestPingAllByIPManyUpDown(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: len(MustTestVersions),
|
||||
@@ -944,8 +962,6 @@ func TestPingAllByIPManyUpDown(t *testing.T) {
|
||||
t.Fatalf("failed to take down all nodes: %s", err)
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
for _, client := range allClients {
|
||||
c := client
|
||||
wg.Go(func() error {
|
||||
@@ -958,10 +974,14 @@ func TestPingAllByIPManyUpDown(t *testing.T) {
|
||||
t.Fatalf("failed to take down all nodes: %s", err)
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
// Wait for sync and successful pings after nodes come back up
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assert.NoError(ct, err)
|
||||
|
||||
err = scenario.WaitForTailscaleSync()
|
||||
assertNoErrSync(t, err)
|
||||
success := pingAllHelper(t, allClients, allAddrs)
|
||||
assert.Greater(ct, success, 0, "Nodes should be able to ping after coming back up")
|
||||
}, 30*time.Second, 2*time.Second)
|
||||
|
||||
success := pingAllHelper(t, allClients, allAddrs)
|
||||
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
|
||||
@@ -970,7 +990,6 @@ func TestPingAllByIPManyUpDown(t *testing.T) {
|
||||
|
||||
func Test2118DeletingOnlineNodePanics(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 1,
|
||||
@@ -1042,10 +1061,24 @@ func Test2118DeletingOnlineNodePanics(t *testing.T) {
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
// Ensure that the node has been deleted, this did not occur due to a panic.
|
||||
var nodeListAfter []v1.Node
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"nodes",
|
||||
"list",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
&nodeListAfter,
|
||||
)
|
||||
assert.NoError(ct, err)
|
||||
assert.Len(ct, nodeListAfter, 1, "Node should be deleted from list")
|
||||
}, 10*time.Second, 1*time.Second)
|
||||
|
||||
err = executeAndUnmarshal(
|
||||
headscale,
|
||||
[]string{
|
||||
|
@@ -191,7 +191,7 @@ func WithPostgres() Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithPolicy sets the policy mode for headscale
|
||||
// WithPolicy sets the policy mode for headscale.
|
||||
func WithPolicyMode(mode types.PolicyMode) Option {
|
||||
return func(hsic *HeadscaleInContainer) {
|
||||
hsic.policyMode = mode
|
||||
@@ -279,7 +279,7 @@ func New(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hostname := fmt.Sprintf("hs-%s", hash)
|
||||
hostname := "hs-" + hash
|
||||
|
||||
hsic := &HeadscaleInContainer{
|
||||
hostname: hostname,
|
||||
@@ -308,14 +308,14 @@ func New(
|
||||
|
||||
if hsic.postgres {
|
||||
hsic.env["HEADSCALE_DATABASE_TYPE"] = "postgres"
|
||||
hsic.env["HEADSCALE_DATABASE_POSTGRES_HOST"] = fmt.Sprintf("postgres-%s", hash)
|
||||
hsic.env["HEADSCALE_DATABASE_POSTGRES_HOST"] = "postgres-" + hash
|
||||
hsic.env["HEADSCALE_DATABASE_POSTGRES_USER"] = "headscale"
|
||||
hsic.env["HEADSCALE_DATABASE_POSTGRES_PASS"] = "headscale"
|
||||
hsic.env["HEADSCALE_DATABASE_POSTGRES_NAME"] = "headscale"
|
||||
delete(hsic.env, "HEADSCALE_DATABASE_SQLITE_PATH")
|
||||
|
||||
pgRunOptions := &dockertest.RunOptions{
|
||||
Name: fmt.Sprintf("postgres-%s", hash),
|
||||
Name: "postgres-" + hash,
|
||||
Repository: "postgres",
|
||||
Tag: "latest",
|
||||
Networks: networks,
|
||||
@@ -328,7 +328,7 @@ func New(
|
||||
|
||||
// Add integration test labels if running under hi tool
|
||||
dockertestutil.DockerAddIntegrationLabels(pgRunOptions, "postgres")
|
||||
|
||||
|
||||
pg, err := pool.RunWithOptions(pgRunOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("starting postgres container: %w", err)
|
||||
@@ -373,7 +373,6 @@ func New(
|
||||
Env: env,
|
||||
}
|
||||
|
||||
|
||||
if len(hsic.hostPortBindings) > 0 {
|
||||
runOptions.PortBindings = map[docker.Port][]docker.PortBinding{}
|
||||
for port, hostPorts := range hsic.hostPortBindings {
|
||||
@@ -396,7 +395,7 @@ func New(
|
||||
|
||||
// Add integration test labels if running under hi tool
|
||||
dockertestutil.DockerAddIntegrationLabels(runOptions, "headscale")
|
||||
|
||||
|
||||
container, err := pool.BuildAndRunWithBuildOptions(
|
||||
headscaleBuildOptions,
|
||||
runOptions,
|
||||
@@ -566,7 +565,7 @@ func (t *HeadscaleInContainer) SaveMetrics(savePath string) error {
|
||||
|
||||
// extractTarToDirectory extracts a tar archive to a directory.
|
||||
func extractTarToDirectory(tarData []byte, targetDir string) error {
|
||||
if err := os.MkdirAll(targetDir, 0755); err != nil {
|
||||
if err := os.MkdirAll(targetDir, 0o755); err != nil {
|
||||
return fmt.Errorf("failed to create directory %s: %w", targetDir, err)
|
||||
}
|
||||
|
||||
@@ -624,6 +623,7 @@ func (t *HeadscaleInContainer) SaveProfile(savePath string) error {
|
||||
}
|
||||
|
||||
targetDir := path.Join(savePath, t.hostname+"-pprof")
|
||||
|
||||
return extractTarToDirectory(tarFile, targetDir)
|
||||
}
|
||||
|
||||
@@ -634,6 +634,7 @@ func (t *HeadscaleInContainer) SaveMapResponses(savePath string) error {
|
||||
}
|
||||
|
||||
targetDir := path.Join(savePath, t.hostname+"-mapresponses")
|
||||
|
||||
return extractTarToDirectory(tarFile, targetDir)
|
||||
}
|
||||
|
||||
@@ -672,17 +673,16 @@ func (t *HeadscaleInContainer) SaveDatabase(savePath string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check database schema (sqlite3 command failed): %w", err)
|
||||
}
|
||||
|
||||
|
||||
if strings.TrimSpace(schemaCheck) == "" {
|
||||
return fmt.Errorf("database file exists but has no schema (empty database)")
|
||||
return errors.New("database file exists but has no schema (empty database)")
|
||||
}
|
||||
|
||||
|
||||
// Show a preview of the schema (first 500 chars)
|
||||
schemaPreview := schemaCheck
|
||||
if len(schemaPreview) > 500 {
|
||||
schemaPreview = schemaPreview[:500] + "..."
|
||||
}
|
||||
log.Printf("Database schema preview:\n%s", schemaPreview)
|
||||
|
||||
tarFile, err := t.FetchPath("/tmp/integration_test_db.sqlite3")
|
||||
if err != nil {
|
||||
@@ -727,7 +727,7 @@ func (t *HeadscaleInContainer) SaveDatabase(savePath string) error {
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("no regular file found in database tar archive")
|
||||
return errors.New("no regular file found in database tar archive")
|
||||
}
|
||||
|
||||
// Execute runs a command inside the Headscale container and returns the
|
||||
@@ -756,13 +756,13 @@ func (t *HeadscaleInContainer) Execute(
|
||||
|
||||
// GetPort returns the docker container port as a string.
|
||||
func (t *HeadscaleInContainer) GetPort() string {
|
||||
return fmt.Sprintf("%d", t.port)
|
||||
return strconv.Itoa(t.port)
|
||||
}
|
||||
|
||||
// GetHealthEndpoint returns a health endpoint for the HeadscaleInContainer
|
||||
// instance.
|
||||
func (t *HeadscaleInContainer) GetHealthEndpoint() string {
|
||||
return fmt.Sprintf("%s/health", t.GetEndpoint())
|
||||
return t.GetEndpoint() + "/health"
|
||||
}
|
||||
|
||||
// GetEndpoint returns the Headscale endpoint for the HeadscaleInContainer.
|
||||
@@ -772,10 +772,10 @@ func (t *HeadscaleInContainer) GetEndpoint() string {
|
||||
t.port)
|
||||
|
||||
if t.hasTLS() {
|
||||
return fmt.Sprintf("https://%s", hostEndpoint)
|
||||
return "https://" + hostEndpoint
|
||||
}
|
||||
|
||||
return fmt.Sprintf("http://%s", hostEndpoint)
|
||||
return "http://" + hostEndpoint
|
||||
}
|
||||
|
||||
// GetCert returns the public certificate of the HeadscaleInContainer.
|
||||
@@ -910,6 +910,7 @@ func (t *HeadscaleInContainer) ListNodes(
|
||||
}
|
||||
|
||||
ret = append(ret, nodes...)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -932,6 +933,7 @@ func (t *HeadscaleInContainer) ListNodes(
|
||||
sort.Slice(ret, func(i, j int) bool {
|
||||
return cmp.Compare(ret[i].GetId(), ret[j].GetId()) == -1
|
||||
})
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
@@ -943,10 +945,10 @@ func (t *HeadscaleInContainer) NodesByUser() (map[string][]*v1.Node, error) {
|
||||
|
||||
var userMap map[string][]*v1.Node
|
||||
for _, node := range nodes {
|
||||
if _, ok := userMap[node.User.Name]; !ok {
|
||||
mak.Set(&userMap, node.User.Name, []*v1.Node{node})
|
||||
if _, ok := userMap[node.GetUser().GetName()]; !ok {
|
||||
mak.Set(&userMap, node.GetUser().GetName(), []*v1.Node{node})
|
||||
} else {
|
||||
userMap[node.User.Name] = append(userMap[node.User.Name], node)
|
||||
userMap[node.GetUser().GetName()] = append(userMap[node.GetUser().GetName()], node)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -999,7 +1001,7 @@ func (t *HeadscaleInContainer) MapUsers() (map[string]*v1.User, error) {
|
||||
|
||||
var userMap map[string]*v1.User
|
||||
for _, user := range users {
|
||||
mak.Set(&userMap, user.Name, user)
|
||||
mak.Set(&userMap, user.GetName(), user)
|
||||
}
|
||||
|
||||
return userMap, nil
|
||||
@@ -1095,7 +1097,7 @@ func (h *HeadscaleInContainer) PID() (int, error) {
|
||||
case 1:
|
||||
return pids[0], nil
|
||||
default:
|
||||
return 0, fmt.Errorf("multiple headscale processes running")
|
||||
return 0, errors.New("multiple headscale processes running")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1121,7 +1123,7 @@ func (t *HeadscaleInContainer) ApproveRoutes(id uint64, routes []netip.Prefix) (
|
||||
"headscale", "nodes", "approve-routes",
|
||||
"--output", "json",
|
||||
"--identifier", strconv.FormatUint(id, 10),
|
||||
fmt.Sprintf("--routes=%s", strings.Join(util.PrefixesToString(routes), ",")),
|
||||
"--routes=" + strings.Join(util.PrefixesToString(routes), ","),
|
||||
}
|
||||
|
||||
result, _, err := dockertestutil.ExecuteCommand(
|
||||
|
@@ -4,13 +4,12 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"slices"
|
||||
|
||||
cmpdiff "github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
@@ -37,7 +36,6 @@ var allPorts = filter.PortRange{First: 0, Last: 0xffff}
|
||||
// routes.
|
||||
func TestEnablingRoutes(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 3,
|
||||
@@ -182,11 +180,12 @@ func TestEnablingRoutes(t *testing.T) {
|
||||
for _, peerKey := range status.Peers() {
|
||||
peerStatus := status.Peer[peerKey]
|
||||
|
||||
if peerStatus.ID == "1" {
|
||||
switch peerStatus.ID {
|
||||
case "1":
|
||||
requirePeerSubnetRoutes(t, peerStatus, nil)
|
||||
} else if peerStatus.ID == "2" {
|
||||
case "2":
|
||||
requirePeerSubnetRoutes(t, peerStatus, nil)
|
||||
} else {
|
||||
default:
|
||||
requirePeerSubnetRoutes(t, peerStatus, []netip.Prefix{netip.MustParsePrefix("10.0.2.0/24")})
|
||||
}
|
||||
}
|
||||
@@ -195,7 +194,6 @@ func TestEnablingRoutes(t *testing.T) {
|
||||
|
||||
func TestHASubnetRouterFailover(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 3,
|
||||
@@ -779,7 +777,6 @@ func TestHASubnetRouterFailover(t *testing.T) {
|
||||
// https://github.com/juanfont/headscale/issues/1604
|
||||
func TestSubnetRouteACL(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
user := "user4"
|
||||
|
||||
@@ -1003,7 +1000,6 @@ func TestSubnetRouteACL(t *testing.T) {
|
||||
// set during login instead of set.
|
||||
func TestEnablingExitRoutes(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
user := "user2"
|
||||
|
||||
@@ -1097,7 +1093,6 @@ func TestEnablingExitRoutes(t *testing.T) {
|
||||
// subnet router is working as expected.
|
||||
func TestSubnetRouterMultiNetwork(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 1,
|
||||
@@ -1177,7 +1172,7 @@ func TestSubnetRouterMultiNetwork(t *testing.T) {
|
||||
|
||||
// Enable route
|
||||
_, err = headscale.ApproveRoutes(
|
||||
nodes[0].Id,
|
||||
nodes[0].GetId(),
|
||||
[]netip.Prefix{*pref},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
@@ -1224,7 +1219,6 @@ func TestSubnetRouterMultiNetwork(t *testing.T) {
|
||||
|
||||
func TestSubnetRouterMultiNetworkExitNode(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
spec := ScenarioSpec{
|
||||
NodesPerUser: 1,
|
||||
@@ -1300,7 +1294,7 @@ func TestSubnetRouterMultiNetworkExitNode(t *testing.T) {
|
||||
}
|
||||
|
||||
// Enable route
|
||||
_, err = headscale.ApproveRoutes(nodes[0].Id, []netip.Prefix{tsaddr.AllIPv4()})
|
||||
_, err = headscale.ApproveRoutes(nodes[0].GetId(), []netip.Prefix{tsaddr.AllIPv4()})
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
@@ -1719,7 +1713,7 @@ func TestAutoApproveMultiNetwork(t *testing.T) {
|
||||
pak, err := scenario.CreatePreAuthKey(userMap["user1"].GetId(), false, false)
|
||||
assertNoErr(t, err)
|
||||
|
||||
err = routerUsernet1.Login(headscale.GetEndpoint(), pak.Key)
|
||||
err = routerUsernet1.Login(headscale.GetEndpoint(), pak.GetKey())
|
||||
assertNoErr(t, err)
|
||||
}
|
||||
// extra creation end.
|
||||
@@ -2065,7 +2059,6 @@ func requireNodeRouteCount(t *testing.T, node *v1.Node, announced, approved, sub
|
||||
// that are explicitly allowed in the ACL.
|
||||
func TestSubnetRouteACLFiltering(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
// Use router and node users for better clarity
|
||||
routerUser := "router"
|
||||
@@ -2090,7 +2083,7 @@ func TestSubnetRouteACLFiltering(t *testing.T) {
|
||||
defer scenario.ShutdownAssertNoPanics(t)
|
||||
|
||||
// Set up the ACL policy that allows the node to access only one of the subnet routes (10.10.10.0/24)
|
||||
aclPolicyStr := fmt.Sprintf(`{
|
||||
aclPolicyStr := `{
|
||||
"hosts": {
|
||||
"router": "100.64.0.1/32",
|
||||
"node": "100.64.0.2/32"
|
||||
@@ -2115,7 +2108,7 @@ func TestSubnetRouteACLFiltering(t *testing.T) {
|
||||
]
|
||||
}
|
||||
]
|
||||
}`)
|
||||
}`
|
||||
|
||||
route, err := scenario.SubnetOfNetwork("usernet1")
|
||||
require.NoError(t, err)
|
||||
|
@@ -123,7 +123,7 @@ type ScenarioSpec struct {
|
||||
// NodesPerUser is how many nodes should be attached to each user.
|
||||
NodesPerUser int
|
||||
|
||||
// Networks, if set, is the seperate Docker networks that should be
|
||||
// Networks, if set, is the separate Docker networks that should be
|
||||
// created and a list of the users that should be placed in those networks.
|
||||
// If not set, a single network will be created and all users+nodes will be
|
||||
// added there.
|
||||
@@ -1077,7 +1077,7 @@ func (s *Scenario) runMockOIDC(accessTTL time.Duration, users []mockoidc.MockUse
|
||||
|
||||
hash, _ := util.GenerateRandomStringDNSSafe(hsicOIDCMockHashLength)
|
||||
|
||||
hostname := fmt.Sprintf("hs-oidcmock-%s", hash)
|
||||
hostname := "hs-oidcmock-" + hash
|
||||
|
||||
usersJSON, err := json.Marshal(users)
|
||||
if err != nil {
|
||||
@@ -1093,16 +1093,15 @@ func (s *Scenario) runMockOIDC(accessTTL time.Duration, users []mockoidc.MockUse
|
||||
},
|
||||
Networks: s.Networks(),
|
||||
Env: []string{
|
||||
fmt.Sprintf("MOCKOIDC_ADDR=%s", hostname),
|
||||
"MOCKOIDC_ADDR=" + hostname,
|
||||
fmt.Sprintf("MOCKOIDC_PORT=%d", port),
|
||||
"MOCKOIDC_CLIENT_ID=superclient",
|
||||
"MOCKOIDC_CLIENT_SECRET=supersecret",
|
||||
fmt.Sprintf("MOCKOIDC_ACCESS_TTL=%s", accessTTL.String()),
|
||||
fmt.Sprintf("MOCKOIDC_USERS=%s", string(usersJSON)),
|
||||
"MOCKOIDC_ACCESS_TTL=" + accessTTL.String(),
|
||||
"MOCKOIDC_USERS=" + string(usersJSON),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
headscaleBuildOptions := &dockertest.BuildOptions{
|
||||
Dockerfile: hsic.IntegrationTestDockerFileName,
|
||||
ContextDir: dockerContextPath,
|
||||
@@ -1117,7 +1116,7 @@ func (s *Scenario) runMockOIDC(accessTTL time.Duration, users []mockoidc.MockUse
|
||||
|
||||
// Add integration test labels if running under hi tool
|
||||
dockertestutil.DockerAddIntegrationLabels(mockOidcOptions, "oidc")
|
||||
|
||||
|
||||
if pmockoidc, err := s.pool.BuildAndRunWithBuildOptions(
|
||||
headscaleBuildOptions,
|
||||
mockOidcOptions,
|
||||
@@ -1184,7 +1183,7 @@ func Webservice(s *Scenario, networkName string) (*dockertest.Resource, error) {
|
||||
|
||||
hash := util.MustGenerateRandomStringDNSSafe(hsicOIDCMockHashLength)
|
||||
|
||||
hostname := fmt.Sprintf("hs-webservice-%s", hash)
|
||||
hostname := "hs-webservice-" + hash
|
||||
|
||||
network, ok := s.networks[s.prefixedNetworkName(networkName)]
|
||||
if !ok {
|
||||
|
@@ -28,7 +28,6 @@ func IntegrationSkip(t *testing.T) {
|
||||
// nolint:tparallel
|
||||
func TestHeadscale(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
var err error
|
||||
|
||||
@@ -75,7 +74,6 @@ func TestHeadscale(t *testing.T) {
|
||||
// nolint:tparallel
|
||||
func TestTailscaleNodesJoiningHeadcale(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
var err error
|
||||
|
||||
|
@@ -22,35 +22,6 @@ func isSSHNoAccessStdError(stderr string) bool {
|
||||
strings.Contains(stderr, "tailnet policy does not permit you to SSH to this node")
|
||||
}
|
||||
|
||||
var retry = func(times int, sleepInterval time.Duration,
|
||||
doWork func() (string, string, error),
|
||||
) (string, string, error) {
|
||||
var result string
|
||||
var stderr string
|
||||
var err error
|
||||
|
||||
for range times {
|
||||
tempResult, tempStderr, err := doWork()
|
||||
|
||||
result += tempResult
|
||||
stderr += tempStderr
|
||||
|
||||
if err == nil {
|
||||
return result, stderr, nil
|
||||
}
|
||||
|
||||
// If we get a permission denied error, we can fail immediately
|
||||
// since that is something we won-t recover from by retrying.
|
||||
if err != nil && isSSHNoAccessStdError(stderr) {
|
||||
return result, stderr, err
|
||||
}
|
||||
|
||||
time.Sleep(sleepInterval)
|
||||
}
|
||||
|
||||
return result, stderr, err
|
||||
}
|
||||
|
||||
func sshScenario(t *testing.T, policy *policyv2.Policy, clientsPerUser int) *Scenario {
|
||||
t.Helper()
|
||||
|
||||
@@ -92,7 +63,6 @@ func sshScenario(t *testing.T, policy *policyv2.Policy, clientsPerUser int) *Sce
|
||||
|
||||
func TestSSHOneUserToAll(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario := sshScenario(t,
|
||||
&policyv2.Policy{
|
||||
@@ -160,7 +130,6 @@ func TestSSHOneUserToAll(t *testing.T) {
|
||||
|
||||
func TestSSHMultipleUsersAllToAll(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario := sshScenario(t,
|
||||
&policyv2.Policy{
|
||||
@@ -216,7 +185,6 @@ func TestSSHMultipleUsersAllToAll(t *testing.T) {
|
||||
|
||||
func TestSSHNoSSHConfigured(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario := sshScenario(t,
|
||||
&policyv2.Policy{
|
||||
@@ -261,7 +229,6 @@ func TestSSHNoSSHConfigured(t *testing.T) {
|
||||
|
||||
func TestSSHIsBlockedInACL(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario := sshScenario(t,
|
||||
&policyv2.Policy{
|
||||
@@ -313,7 +280,6 @@ func TestSSHIsBlockedInACL(t *testing.T) {
|
||||
|
||||
func TestSSHUserOnlyIsolation(t *testing.T) {
|
||||
IntegrationSkip(t)
|
||||
t.Parallel()
|
||||
|
||||
scenario := sshScenario(t,
|
||||
&policyv2.Policy{
|
||||
@@ -404,6 +370,14 @@ func TestSSHUserOnlyIsolation(t *testing.T) {
|
||||
}
|
||||
|
||||
func doSSH(t *testing.T, client TailscaleClient, peer TailscaleClient) (string, string, error) {
|
||||
return doSSHWithRetry(t, client, peer, true)
|
||||
}
|
||||
|
||||
func doSSHWithoutRetry(t *testing.T, client TailscaleClient, peer TailscaleClient) (string, string, error) {
|
||||
return doSSHWithRetry(t, client, peer, false)
|
||||
}
|
||||
|
||||
func doSSHWithRetry(t *testing.T, client TailscaleClient, peer TailscaleClient, retry bool) (string, string, error) {
|
||||
t.Helper()
|
||||
|
||||
peerFQDN, _ := peer.FQDN()
|
||||
@@ -417,9 +391,29 @@ func doSSH(t *testing.T, client TailscaleClient, peer TailscaleClient) (string,
|
||||
log.Printf("Running from %s to %s", client.Hostname(), peer.Hostname())
|
||||
log.Printf("Command: %s", strings.Join(command, " "))
|
||||
|
||||
return retry(10, 1*time.Second, func() (string, string, error) {
|
||||
return client.Execute(command)
|
||||
})
|
||||
var result, stderr string
|
||||
var err error
|
||||
|
||||
if retry {
|
||||
// Use assert.EventuallyWithT to retry SSH connections for success cases
|
||||
assert.EventuallyWithT(t, func(ct *assert.CollectT) {
|
||||
result, stderr, err = client.Execute(command)
|
||||
|
||||
// If we get a permission denied error, we can fail immediately
|
||||
// since that is something we won't recover from by retrying.
|
||||
if err != nil && isSSHNoAccessStdError(stderr) {
|
||||
return // Don't retry permission denied errors
|
||||
}
|
||||
|
||||
// For all other errors, assert no error to trigger retry
|
||||
assert.NoError(ct, err)
|
||||
}, 10*time.Second, 1*time.Second)
|
||||
} else {
|
||||
// For failure cases, just execute once
|
||||
result, stderr, err = client.Execute(command)
|
||||
}
|
||||
|
||||
return result, stderr, err
|
||||
}
|
||||
|
||||
func assertSSHHostname(t *testing.T, client TailscaleClient, peer TailscaleClient) {
|
||||
@@ -434,7 +428,7 @@ func assertSSHHostname(t *testing.T, client TailscaleClient, peer TailscaleClien
|
||||
func assertSSHPermissionDenied(t *testing.T, client TailscaleClient, peer TailscaleClient) {
|
||||
t.Helper()
|
||||
|
||||
result, stderr, err := doSSH(t, client, peer)
|
||||
result, stderr, err := doSSHWithoutRetry(t, client, peer)
|
||||
|
||||
assert.Empty(t, result)
|
||||
|
||||
@@ -444,7 +438,7 @@ func assertSSHPermissionDenied(t *testing.T, client TailscaleClient, peer Tailsc
|
||||
func assertSSHTimeout(t *testing.T, client TailscaleClient, peer TailscaleClient) {
|
||||
t.Helper()
|
||||
|
||||
result, stderr, _ := doSSH(t, client, peer)
|
||||
result, stderr, _ := doSSHWithoutRetry(t, client, peer)
|
||||
|
||||
assert.Empty(t, result)
|
||||
|
||||
|
@@ -251,7 +251,6 @@ func New(
|
||||
Env: []string{},
|
||||
}
|
||||
|
||||
|
||||
if tsic.withWebsocketDERP {
|
||||
if version != VersionHead {
|
||||
return tsic, errInvalidClientConfig
|
||||
@@ -463,7 +462,7 @@ func (t *TailscaleInContainer) buildLoginCommand(
|
||||
|
||||
if len(t.withTags) > 0 {
|
||||
command = append(command,
|
||||
fmt.Sprintf(`--advertise-tags=%s`, strings.Join(t.withTags, ",")),
|
||||
"--advertise-tags="+strings.Join(t.withTags, ","),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -685,7 +684,7 @@ func (t *TailscaleInContainer) MustID() types.NodeID {
|
||||
// Panics if version is lower then minimum.
|
||||
func (t *TailscaleInContainer) Netmap() (*netmap.NetworkMap, error) {
|
||||
if !util.TailscaleVersionNewerOrEqual("1.56", t.version) {
|
||||
panic(fmt.Sprintf("tsic.Netmap() called with unsupported version: %s", t.version))
|
||||
panic("tsic.Netmap() called with unsupported version: " + t.version)
|
||||
}
|
||||
|
||||
command := []string{
|
||||
@@ -1026,7 +1025,7 @@ func (t *TailscaleInContainer) Ping(hostnameOrIP string, opts ...PingOption) err
|
||||
"tailscale", "ping",
|
||||
fmt.Sprintf("--timeout=%s", args.timeout),
|
||||
fmt.Sprintf("--c=%d", args.count),
|
||||
fmt.Sprintf("--until-direct=%s", strconv.FormatBool(args.direct)),
|
||||
"--until-direct=" + strconv.FormatBool(args.direct),
|
||||
}
|
||||
|
||||
command = append(command, hostnameOrIP)
|
||||
@@ -1131,11 +1130,11 @@ func (t *TailscaleInContainer) Curl(url string, opts ...CurlOption) (string, err
|
||||
command := []string{
|
||||
"curl",
|
||||
"--silent",
|
||||
"--connect-timeout", fmt.Sprintf("%d", int(args.connectionTimeout.Seconds())),
|
||||
"--max-time", fmt.Sprintf("%d", int(args.maxTime.Seconds())),
|
||||
"--retry", fmt.Sprintf("%d", args.retry),
|
||||
"--retry-delay", fmt.Sprintf("%d", int(args.retryDelay.Seconds())),
|
||||
"--retry-max-time", fmt.Sprintf("%d", int(args.retryMaxTime.Seconds())),
|
||||
"--connect-timeout", strconv.Itoa(int(args.connectionTimeout.Seconds())),
|
||||
"--max-time", strconv.Itoa(int(args.maxTime.Seconds())),
|
||||
"--retry", strconv.Itoa(args.retry),
|
||||
"--retry-delay", strconv.Itoa(int(args.retryDelay.Seconds())),
|
||||
"--retry-max-time", strconv.Itoa(int(args.retryMaxTime.Seconds())),
|
||||
url,
|
||||
}
|
||||
|
||||
@@ -1230,7 +1229,7 @@ func (t *TailscaleInContainer) ReadFile(path string) ([]byte, error) {
|
||||
}
|
||||
|
||||
if out.Len() == 0 {
|
||||
return nil, fmt.Errorf("file is empty")
|
||||
return nil, errors.New("file is empty")
|
||||
}
|
||||
|
||||
return out.Bytes(), nil
|
||||
@@ -1259,5 +1258,6 @@ func (t *TailscaleInContainer) GetNodePrivateKey() (*key.NodePrivate, error) {
|
||||
if err = json.Unmarshal(currentProfile, &p); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal current profile state: %w", err)
|
||||
}
|
||||
|
||||
return &p.Persist.PrivateNodeKey, nil
|
||||
}
|
||||
|
@@ -3,7 +3,6 @@ package integration
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/netip"
|
||||
@@ -267,7 +266,7 @@ func assertValidStatus(t *testing.T, client TailscaleClient) {
|
||||
|
||||
// This isn't really relevant for Self as it won't be in its own socket/wireguard.
|
||||
// assert.Truef(t, status.Self.InMagicSock, "%q is not tracked by magicsock", client.Hostname())
|
||||
// assert.Truef(t, status.Self.InEngine, "%q is not in in wireguard engine", client.Hostname())
|
||||
// assert.Truef(t, status.Self.InEngine, "%q is not in wireguard engine", client.Hostname())
|
||||
|
||||
for _, peer := range status.Peer {
|
||||
assert.NotEmptyf(t, peer.HostName, "peer (%s) of %q does not have HostName set, likely missing Hostinfo", peer.DNSName, client.Hostname())
|
||||
@@ -311,7 +310,7 @@ func assertValidNetcheck(t *testing.T, client TailscaleClient) {
|
||||
func assertCommandOutputContains(t *testing.T, c TailscaleClient, command []string, contains string) {
|
||||
t.Helper()
|
||||
|
||||
_, err := backoff.Retry(context.Background(), func() (struct{}, error) {
|
||||
_, err := backoff.Retry(t.Context(), func() (struct{}, error) {
|
||||
stdout, stderr, err := c.Execute(command)
|
||||
if err != nil {
|
||||
return struct{}{}, fmt.Errorf("executing command, stdout: %q stderr: %q, err: %w", stdout, stderr, err)
|
||||
@@ -492,6 +491,7 @@ func groupApprover(name string) policyv2.AutoApprover {
|
||||
func tagApprover(name string) policyv2.AutoApprover {
|
||||
return ptr.To(policyv2.Tag(name))
|
||||
}
|
||||
|
||||
//
|
||||
// // findPeerByHostname takes a hostname and a map of peers from status.Peer, and returns a *ipnstate.PeerStatus
|
||||
// // if there is a peer with the given hostname. If no peer is found, nil is returned.
|
||||
|
Reference in New Issue
Block a user