General fixups discovered by checking errors

There was a lot of tests that actually threw a lot of errors and that did
not pass all the way because we didnt check everything. This commit should
fix all of these cases.

Signed-off-by: Kristoffer Dalby <kristoffer@tailscale.com>
This commit is contained in:
Kristoffer Dalby 2023-08-31 18:37:18 +02:00 committed by Kristoffer Dalby
parent f8a58aa15b
commit 1766e6b5df
11 changed files with 134 additions and 64 deletions

View File

@ -1,7 +1,7 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestSSHOneUserAllToAll
name: Integration Test v2 - TestSSHOneUserToAll
on: [pull_request]
@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true
jobs:
TestSSHOneUserAllToAll:
TestSSHOneUserToAll:
runs-on: ubuntu-latest
steps:
@ -34,7 +34,7 @@ jobs:
integration_test/
config-example.yaml
- name: Run TestSSHOneUserAllToAll
- name: Run TestSSHOneUserToAll
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
@ -50,7 +50,7 @@ jobs:
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestSSHOneUserAllToAll$"
-run "^TestSSHOneUserToAll$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'

View File

@ -1,7 +1,7 @@
# DO NOT EDIT, generated with cmd/gh-action-integration-generator/main.go
# To regenerate, run "go generate" in cmd/gh-action-integration-generator/
name: Integration Test v2 - TestSSUserOnlyIsolation
name: Integration Test v2 - TestSSHUserOnlyIsolation
on: [pull_request]
@ -10,7 +10,7 @@ concurrency:
cancel-in-progress: true
jobs:
TestSSUserOnlyIsolation:
TestSSHUserOnlyIsolation:
runs-on: ubuntu-latest
steps:
@ -34,7 +34,7 @@ jobs:
integration_test/
config-example.yaml
- name: Run TestSSUserOnlyIsolation
- name: Run TestSSHUserOnlyIsolation
if: steps.changed-files.outputs.any_changed == 'true'
run: |
nix develop --command -- docker run \
@ -50,7 +50,7 @@ jobs:
-failfast \
-timeout 120m \
-parallel 1 \
-run "^TestSSUserOnlyIsolation$"
-run "^TestSSHUserOnlyIsolation$"
- uses: actions/upload-artifact@v3
if: always() && steps.changed-files.outputs.any_changed == 'true'

View File

@ -30,6 +30,7 @@ linters:
- exhaustruct
- nolintlint
- musttag # causes issues with imported libs
- depguard
# deprecated
- structcheck # replaced by unused

View File

@ -480,6 +480,8 @@ func GetDNSConfig() (*tailcfg.DNSConfig, string) {
baseDomain = "headscale.net" // does not really matter when MagicDNS is not enabled
}
log.Trace().Interface("dns_config", dnsConfig).Msg("DNS configuration loaded")
return dnsConfig, baseDomain
}

View File

@ -61,6 +61,10 @@ func aclScenario(
err = scenario.CreateHeadscaleEnv(spec,
[]tsic.Option{
// Alpine containers dont have ip6tables set up, which causes
// tailscaled to stop configuring the wgengine, causing it
// to not configure DNS.
tsic.WithNetfilter("off"),
tsic.WithDockerEntrypoint([]string{
"/bin/sh",
"-c",

View File

@ -21,6 +21,7 @@ import (
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
"github.com/samber/lo"
"github.com/stretchr/testify/assert"
)
const (
@ -90,6 +91,7 @@ func TestOIDCAuthenticationPingAll(t *testing.T) {
t.Logf("%d successful pings out of %d", success, len(allClients)*len(allIps))
}
// This test is really flaky.
func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
@ -99,13 +101,15 @@ func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) {
baseScenario, err := NewScenario()
assertNoErr(t, err)
baseScenario.pool.MaxWait = 5 * time.Minute
scenario := AuthOIDCScenario{
Scenario: baseScenario,
}
defer scenario.Shutdown()
spec := map[string]int{
"user1": len(MustTestVersions),
"user1": 3,
}
oidcConfig, err := scenario.runMockOIDC(shortAccessTTL)
@ -143,9 +147,13 @@ func TestOIDCExpireNodesBasedOnTokenExpiry(t *testing.T) {
success := pingAllHelper(t, allClients, allAddrs)
t.Logf("%d successful pings out of %d (before expiry)", success, len(allClients)*len(allIps))
// await all nodes being logged out after OIDC token expiry
err = scenario.WaitForTailscaleLogout()
assertNoErrLogout(t, err)
// This is not great, but this sadly is a time dependent test, so the
// safe thing to do is wait out the whole TTL time before checking if
// the clients have logged out. The Wait function cant do it itself
// as it has an upper bound of 1 min.
time.Sleep(shortAccessTTL)
assertTailscaleNodesLogout(t, allClients)
}
func (s *AuthOIDCScenario) CreateHeadscaleEnv(
@ -296,13 +304,15 @@ func (s *AuthOIDCScenario) runTailscaleUp(
log.Printf("%s login url: %s\n", c.Hostname(), loginURL.String())
if err := s.pool.Retry(func() error {
log.Printf("%s logging in with url", c.Hostname())
httpClient := &http.Client{Transport: insecureTransport}
ctx := context.Background()
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil)
resp, err := httpClient.Do(req)
if err != nil {
log.Printf(
"%s failed to get login url %s: %s",
"%s failed to login using url %s: %s",
c.Hostname(),
loginURL,
err,
@ -311,6 +321,12 @@ func (s *AuthOIDCScenario) runTailscaleUp(
return err
}
if resp.StatusCode != http.StatusOK {
log.Printf("%s response code of oidc login request was %s", c.Hostname(), resp.Status)
return errStatusCodeNotOK
}
defer resp.Body.Close()
_, err = io.ReadAll(resp.Body)
@ -320,6 +336,11 @@ func (s *AuthOIDCScenario) runTailscaleUp(
return err
}
return nil
}); err != nil {
return err
}
log.Printf("Finished request for %s to join tailnet", c.Hostname())
return nil
@ -357,3 +378,14 @@ func (s *AuthOIDCScenario) Shutdown() {
s.Scenario.Shutdown()
}
func assertTailscaleNodesLogout(t *testing.T, clients []TailscaleClient) {
t.Helper()
for _, client := range clients {
status, err := client.Status()
assertNoErr(t, err)
assert.Equal(t, "NeedsLogin", status.BackendState)
}
}

View File

@ -78,14 +78,14 @@ var (
)
// MustTestVersions is the minimum set of versions we should test.
// At the moment, this is arbitrarily choosen as:
// At the moment, this is arbitrarily chosen as:
//
// - Two unstable (HEAD and unstable)
// - Two latest versions
// - Two oldest versions
// - Two oldest versions.
MustTestVersions = append(
tailscaleVersions2021[0:4],
tailscaleVersions2019[len(tailscaleVersions2019)-2:len(tailscaleVersions2019)]...,
tailscaleVersions2019[len(tailscaleVersions2019)-2:]...,
)
)
@ -573,7 +573,7 @@ func (s *Scenario) WaitForTailscaleLogout() error {
for _, client := range user.Clients {
c := client
user.syncWaitGroup.Go(func() error {
return c.WaitForLogout()
return c.WaitForNeedsLogin()
})
}
if err := user.syncWaitGroup.Wait(); err != nil {

View File

@ -2,6 +2,7 @@ package integration
import (
"fmt"
"log"
"strings"
"testing"
"time"
@ -53,10 +54,16 @@ func sshScenario(t *testing.T, policy *policy.ACLPolicy, clientsPerUser int) *Sc
err = scenario.CreateHeadscaleEnv(spec,
[]tsic.Option{
tsic.WithSSH(),
// Alpine containers dont have ip6tables set up, which causes
// tailscaled to stop configuring the wgengine, causing it
// to not configure DNS.
tsic.WithNetfilter("off"),
tsic.WithDockerEntrypoint([]string{
"/bin/sh",
"-c",
"/bin/sleep 3 ; apk add openssh ; update-ca-certificates ; tailscaled --tun=tsdev",
"/bin/sleep 3 ; apk add openssh ; adduser ssh-it-user ; update-ca-certificates ; tailscaled --tun=tsdev",
}),
tsic.WithDockerWorkdir("/"),
},
@ -77,7 +84,7 @@ func sshScenario(t *testing.T, policy *policy.ACLPolicy, clientsPerUser int) *Sc
return scenario
}
func TestSSHOneUserAllToAll(t *testing.T) {
func TestSSHOneUserToAll(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
@ -97,7 +104,7 @@ func TestSSHOneUserAllToAll(t *testing.T) {
{
Action: "accept",
Sources: []string{"group:integration-test"},
Destinations: []string{"group:integration-test"},
Destinations: []string{"*"},
Users: []string{"ssh-it-user"},
},
},
@ -109,13 +116,19 @@ func TestSSHOneUserAllToAll(t *testing.T) {
allClients, err := scenario.ListTailscaleClients()
assertNoErrListClients(t, err)
user1Clients, err := scenario.ListTailscaleClients("user1")
assertNoErrListClients(t, err)
user2Clients, err := scenario.ListTailscaleClients("user2")
assertNoErrListClients(t, err)
err = scenario.WaitForTailscaleSync()
assertNoErrSync(t, err)
_, err = scenario.ListTailscaleClientsFQDNs()
assertNoErrListFQDN(t, err)
for _, client := range allClients {
for _, client := range user1Clients {
for _, peer := range allClients {
if client.Hostname() == peer.Hostname() {
continue
@ -124,6 +137,16 @@ func TestSSHOneUserAllToAll(t *testing.T) {
assertSSHHostname(t, client, peer)
}
}
for _, client := range user2Clients {
for _, peer := range allClients {
if client.Hostname() == peer.Hostname() {
continue
}
assertSSHPermissionDenied(t, client, peer)
}
}
}
func TestSSHMultipleUsersAllToAll(t *testing.T) {
@ -270,7 +293,7 @@ func TestSSHIsBlockedInACL(t *testing.T) {
}
}
func TestSSUserOnlyIsolation(t *testing.T) {
func TestSSHUserOnlyIsolation(t *testing.T) {
IntegrationSkip(t)
t.Parallel()
@ -365,11 +388,14 @@ func doSSH(t *testing.T, client TailscaleClient, peer TailscaleClient) (string,
peerFQDN, _ := peer.FQDN()
command := []string{
"ssh", "-o StrictHostKeyChecking=no", "-o ConnectTimeout=1",
"/usr/bin/ssh", "-o StrictHostKeyChecking=no", "-o ConnectTimeout=1",
fmt.Sprintf("%s@%s", "ssh-it-user", peerFQDN),
"'hostname'",
}
log.Printf("Running from %s to %s", client.Hostname(), peer.Hostname())
log.Printf("Command: %s", strings.Join(command, " "))
return retry(10, 1*time.Second, func() (string, string, error) {
return client.Execute(command)
})
@ -381,27 +407,28 @@ func assertSSHHostname(t *testing.T, client TailscaleClient, peer TailscaleClien
result, _, err := doSSH(t, client, peer)
assertNoErr(t, err)
assert.Contains(t, peer.ID(), strings.ReplaceAll(result, "\n", ""))
assertContains(t, peer.ID(), strings.ReplaceAll(result, "\n", ""))
}
func assertSSHPermissionDenied(t *testing.T, client TailscaleClient, peer TailscaleClient) {
t.Helper()
result, stderr, err := doSSH(t, client, peer)
assert.Error(t, err)
result, stderr, _ := doSSH(t, client, peer)
assert.Empty(t, result)
assert.Contains(t, stderr, "Permission denied (tailscale)")
assertContains(t, stderr, "Permission denied (tailscale)")
}
func assertSSHTimeout(t *testing.T, client TailscaleClient, peer TailscaleClient) {
t.Helper()
result, stderr, err := doSSH(t, client, peer)
assertNoErr(t, err)
result, stderr, _ := doSSH(t, client, peer)
assert.Empty(t, result)
assert.Contains(t, stderr, "Connection timed out")
if !strings.Contains(stderr, "Connection timed out") &&
!strings.Contains(stderr, "Operation timed out") {
t.Fatalf("connection did not time out")
}
}

View File

@ -26,7 +26,6 @@ type TailscaleClient interface {
Status() (*ipnstate.Status, error)
WaitForNeedsLogin() error
WaitForRunning() error
WaitForLogout() error
WaitForPeers(expected int) error
Ping(hostnameOrIP string, opts ...tsic.PingOption) error
Curl(url string, opts ...tsic.CurlOption) (string, error)

View File

@ -35,7 +35,6 @@ var (
errTailscaleCannotUpWithoutAuthkey = errors.New("cannot up without authkey")
errTailscaleNotConnected = errors.New("tailscale not connected")
errTailscaledNotReadyForLogin = errors.New("tailscaled not ready for login")
errTailscaleNotLoggedOut = errors.New("tailscale not logged out")
)
func errTailscaleStatus(hostname string, err error) error {
@ -64,6 +63,7 @@ type TailscaleInContainer struct {
withEntrypoint []string
withExtraHosts []string
workdir string
netfilter string
}
// Option represent optional settings that can be given to a
@ -148,6 +148,15 @@ func WithDockerEntrypoint(args []string) Option {
}
}
// WithNetfilter configures Tailscales parameter --netfilter-mode
// allowing us to turn of modifying ip[6]tables/nftables.
// It takes: "on", "off", "nodivert".
func WithNetfilter(state string) Option {
return func(tsic *TailscaleInContainer) {
tsic.netfilter = state
}
}
// New returns a new TailscaleInContainer instance.
func New(
pool *dockertest.Pool,
@ -340,6 +349,10 @@ func (t *TailscaleInContainer) Login(
command = append(command, "--ssh")
}
if t.netfilter != "" {
command = append(command, "--netfilter-mode="+t.netfilter)
}
if len(t.withTags) > 0 {
command = append(command,
fmt.Sprintf(`--advertise-tags=%s`, strings.Join(t.withTags, ",")),
@ -513,22 +526,6 @@ func (t *TailscaleInContainer) WaitForRunning() error {
})
}
// WaitForLogout blocks until the Tailscale instance has logged out.
func (t *TailscaleInContainer) WaitForLogout() error {
return fmt.Errorf("%s err: %w", t.hostname, t.pool.Retry(func() error {
status, err := t.Status()
if err != nil {
return errTailscaleStatus(t.hostname, err)
}
if status.CurrentTailnet == nil {
return nil
}
return errTailscaleNotLoggedOut
}))
}
// WaitForPeers blocks until N number of peers is present in the
// Peer list of the Tailscale instance.
func (t *TailscaleInContainer) WaitForPeers(expected int) error {

View File

@ -1,6 +1,7 @@
package integration
import (
"strings"
"testing"
"time"
@ -59,6 +60,13 @@ func assertNoErrLogout(t *testing.T, err error) {
assertNoErrf(t, "failed to log out tailscale nodes: %s", err)
}
func assertContains(t *testing.T, str, subStr string) {
t.Helper()
if !strings.Contains(str, subStr) {
t.Fatalf("%#v does not contain %#v", str, subStr)
}
}
func pingAllHelper(t *testing.T, clients []TailscaleClient, addrs []string) int {
t.Helper()
success := 0