Test two namespaces

This expands the tests to verify two namespaces instead of only one.

It verifies some of the isolation, and is prework for shared nodes
testing
This commit is contained in:
Kristoffer Dalby 2021-09-20 20:18:28 +01:00
parent d68d201722
commit f905812afa
No known key found for this signature in database
GPG Key ID: 09F62DC067465735

View File

@ -30,30 +30,49 @@ var (
) )
var ( var (
pool dockertest.Pool pool dockertest.Pool
network dockertest.Network network dockertest.Network
headscale dockertest.Resource headscale dockertest.Resource
tailscaleCount int = 25
tailscales map[string]dockertest.Resource
) )
var tailscaleVersions = []string{"1.14.3", "1.12.3"} var tailscaleVersions = []string{"1.14.3", "1.12.3"}
type TestNamespace struct {
count int
tailscales map[string]dockertest.Resource
}
type IntegrationTestSuite struct { type IntegrationTestSuite struct {
suite.Suite suite.Suite
stats *suite.SuiteInformation stats *suite.SuiteInformation
namespaces map[string]TestNamespace
} }
func TestIntegrationTestSuite(t *testing.T) { func TestIntegrationTestSuite(t *testing.T) {
s := new(IntegrationTestSuite) s := new(IntegrationTestSuite)
s.namespaces = map[string]TestNamespace{
"main": {
count: 20,
tailscales: make(map[string]dockertest.Resource),
},
"shared": {
count: 5,
tailscales: make(map[string]dockertest.Resource),
},
}
suite.Run(t, s) suite.Run(t, s)
// HandleStats, which allows us to check if we passed and save logs // HandleStats, which allows us to check if we passed and save logs
// is called after TearDown, so we cannot tear down containers before // is called after TearDown, so we cannot tear down containers before
// we have potentially saved the logs. // we have potentially saved the logs.
for _, tailscale := range tailscales { for _, scales := range s.namespaces {
if err := pool.Purge(&tailscale); err != nil { for _, tailscale := range scales.tailscales {
log.Printf("Could not purge resource: %s\n", err) if err := pool.Purge(&tailscale); err != nil {
log.Printf("Could not purge resource: %s\n", err)
}
} }
} }
@ -147,7 +166,7 @@ func dockerRestartPolicy(config *docker.HostConfig) {
} }
} }
func tailscaleContainer(identifier string, version string) (string, *dockertest.Resource) { func tailscaleContainer(namespace, identifier, version string) (string, *dockertest.Resource) {
tailscaleBuildOptions := &dockertest.BuildOptions{ tailscaleBuildOptions := &dockertest.BuildOptions{
Dockerfile: "Dockerfile.tailscale", Dockerfile: "Dockerfile.tailscale",
ContextDir: ".", ContextDir: ".",
@ -158,7 +177,7 @@ func tailscaleContainer(identifier string, version string) (string, *dockertest.
}, },
}, },
} }
hostname := fmt.Sprintf("tailscale-%s-%s", strings.Replace(version, ".", "-", -1), identifier) hostname := fmt.Sprintf("%s-tailscale-%s-%s", namespace, strings.Replace(version, ".", "-", -1), identifier)
tailscaleOptions := &dockertest.RunOptions{ tailscaleOptions := &dockertest.RunOptions{
Name: hostname, Name: hostname,
Networks: []*dockertest.Network{&network}, Networks: []*dockertest.Network{&network},
@ -224,12 +243,13 @@ func (s *IntegrationTestSuite) SetupSuite() {
fmt.Println("Created headscale container") fmt.Println("Created headscale container")
fmt.Println("Creating tailscale containers") fmt.Println("Creating tailscale containers")
tailscales = make(map[string]dockertest.Resource) for namespace, scales := range s.namespaces {
for i := 0; i < tailscaleCount; i++ { for i := 0; i < scales.count; i++ {
version := tailscaleVersions[i%len(tailscaleVersions)] version := tailscaleVersions[i%len(tailscaleVersions)]
hostname, container := tailscaleContainer(fmt.Sprint(i), version) hostname, container := tailscaleContainer(namespace, fmt.Sprint(i), version)
tailscales[hostname] = *container scales.tailscales[hostname] = *container
}
} }
fmt.Println("Waiting for headscale to be ready") fmt.Println("Waiting for headscale to be ready")
@ -250,35 +270,38 @@ func (s *IntegrationTestSuite) SetupSuite() {
} }
fmt.Println("headscale container is ready") fmt.Println("headscale container is ready")
fmt.Println("Creating headscale namespace") for namespace, scales := range s.namespaces {
result, err := executeCommand( fmt.Printf("Creating headscale namespace: %s\n", namespace)
&headscale, result, err := executeCommand(
[]string{"headscale", "namespaces", "create", "test"}, &headscale,
) []string{"headscale", "namespaces", "create", namespace},
assert.Nil(s.T(), err)
fmt.Println("Creating pre auth key")
authKey, err := executeCommand(
&headscale,
[]string{"headscale", "-n", "test", "preauthkeys", "create", "--reusable", "--expiration", "24h"},
)
assert.Nil(s.T(), err)
headscaleEndpoint := fmt.Sprintf("http://headscale:%s", headscale.GetPort("8080/tcp"))
fmt.Printf("Joining tailscale containers to headscale at %s\n", headscaleEndpoint)
for hostname, tailscale := range tailscales {
command := []string{"tailscale", "up", "-login-server", headscaleEndpoint, "--authkey", strings.TrimSuffix(authKey, "\n"), "--hostname", hostname}
fmt.Println("Join command:", command)
fmt.Printf("Running join command for %s\n", hostname)
result, err = executeCommand(
&tailscale,
command,
) )
fmt.Println("tailscale result: ", result)
assert.Nil(s.T(), err) assert.Nil(s.T(), err)
fmt.Printf("%s joined\n", hostname) fmt.Println("headscale create namespace result: ", result)
fmt.Printf("Creating pre auth key for %s\n", namespace)
authKey, err := executeCommand(
&headscale,
[]string{"headscale", "-n", namespace, "preauthkeys", "create", "--reusable", "--expiration", "24h"},
)
assert.Nil(s.T(), err)
headscaleEndpoint := fmt.Sprintf("http://headscale:%s", headscale.GetPort("8080/tcp"))
fmt.Printf("Joining tailscale containers to headscale at %s\n", headscaleEndpoint)
for hostname, tailscale := range scales.tailscales {
command := []string{"tailscale", "up", "-login-server", headscaleEndpoint, "--authkey", strings.TrimSuffix(authKey, "\n"), "--hostname", hostname}
fmt.Println("Join command:", command)
fmt.Printf("Running join command for %s\n", hostname)
result, err := executeCommand(
&tailscale,
command,
)
fmt.Println("tailscale result: ", result)
assert.Nil(s.T(), err)
fmt.Printf("%s joined\n", hostname)
}
} }
// The nodes need a bit of time to get their updated maps from headscale // The nodes need a bit of time to get their updated maps from headscale
@ -294,109 +317,117 @@ func (s *IntegrationTestSuite) HandleStats(suiteName string, stats *suite.SuiteI
} }
func (s *IntegrationTestSuite) TestListNodes() { func (s *IntegrationTestSuite) TestListNodes() {
fmt.Println("Listing nodes") for namespace, scales := range s.namespaces {
result, err := executeCommand( fmt.Println("Listing nodes")
&headscale, result, err := executeCommand(
[]string{"headscale", "-n", "test", "nodes", "list"}, &headscale,
) []string{"headscale", "-n", namespace, "nodes", "list"},
assert.Nil(s.T(), err) )
assert.Nil(s.T(), err)
fmt.Printf("List nodes: \n%s\n", result) fmt.Printf("List nodes: \n%s\n", result)
// Chck that the correct count of host is present in node list // Chck that the correct count of host is present in node list
lines := strings.Split(result, "\n") lines := strings.Split(result, "\n")
assert.Equal(s.T(), len(tailscales), len(lines)-2) assert.Equal(s.T(), len(scales.tailscales), len(lines)-2)
for hostname := range tailscales { for hostname := range scales.tailscales {
assert.Contains(s.T(), result, hostname) assert.Contains(s.T(), result, hostname)
}
} }
} }
func (s *IntegrationTestSuite) TestGetIpAddresses() { func (s *IntegrationTestSuite) TestGetIpAddresses() {
ipPrefix := netaddr.MustParseIPPrefix("100.64.0.0/10") for _, scales := range s.namespaces {
ips, err := getIPs() ipPrefix := netaddr.MustParseIPPrefix("100.64.0.0/10")
assert.Nil(s.T(), err) ips, err := getIPs(scales.tailscales)
assert.Nil(s.T(), err)
for hostname := range tailscales { for hostname := range scales.tailscales {
s.T().Run(hostname, func(t *testing.T) { s.T().Run(hostname, func(t *testing.T) {
ip := ips[hostname] ip := ips[hostname]
fmt.Printf("IP for %s: %s\n", hostname, ip) fmt.Printf("IP for %s: %s\n", hostname, ip)
// c.Assert(ip.Valid(), check.IsTrue) // c.Assert(ip.Valid(), check.IsTrue)
assert.True(t, ip.Is4()) assert.True(t, ip.Is4())
assert.True(t, ipPrefix.Contains(ip)) assert.True(t, ipPrefix.Contains(ip))
ips[hostname] = ip ips[hostname] = ip
}) })
}
} }
} }
func (s *IntegrationTestSuite) TestStatus() { func (s *IntegrationTestSuite) TestStatus() {
ips, err := getIPs() for _, scales := range s.namespaces {
assert.Nil(s.T(), err) ips, err := getIPs(scales.tailscales)
assert.Nil(s.T(), err)
for hostname, tailscale := range tailscales { for hostname, tailscale := range scales.tailscales {
s.T().Run(hostname, func(t *testing.T) { s.T().Run(hostname, func(t *testing.T) {
command := []string{"tailscale", "status"} command := []string{"tailscale", "status"}
fmt.Printf("Getting status for %s\n", hostname) fmt.Printf("Getting status for %s\n", hostname)
result, err := executeCommand( result, err := executeCommand(
&tailscale, &tailscale,
command, command,
) )
assert.Nil(t, err) assert.Nil(t, err)
// fmt.Printf("Status for %s: %s", hostname, result) // fmt.Printf("Status for %s: %s", hostname, result)
// Check if we have as many nodes in status // Check if we have as many nodes in status
// as we have IPs/tailscales // as we have IPs/tailscales
lines := strings.Split(result, "\n") lines := strings.Split(result, "\n")
assert.Equal(t, len(ips), len(lines)-1) assert.Equal(t, len(ips), len(lines)-1)
assert.Equal(t, len(tailscales), len(lines)-1) assert.Equal(t, len(scales.tailscales), len(lines)-1)
// Check that all hosts is present in all hosts status // Check that all hosts is present in all hosts status
for ipHostname, ip := range ips { for ipHostname, ip := range ips {
assert.Contains(t, result, ip.String()) assert.Contains(t, result, ip.String())
assert.Contains(t, result, ipHostname) assert.Contains(t, result, ipHostname)
}
})
}
}
func (s *IntegrationTestSuite) TestPingAllPeers() {
ips, err := getIPs()
assert.Nil(s.T(), err)
for hostname, tailscale := range tailscales {
for peername, ip := range ips {
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
// We currently cant ping ourselves, so skip that.
if peername != hostname {
// We are only interested in "direct ping" which means what we
// might need a couple of more attempts before reaching the node.
command := []string{
"tailscale", "ping",
"--timeout=1s",
"--c=20",
"--until-direct=true",
ip.String(),
}
fmt.Printf("Pinging from %s (%s) to %s (%s)\n", hostname, ips[hostname], peername, ip)
result, err := executeCommand(
&tailscale,
command,
)
assert.Nil(t, err)
fmt.Printf("Result for %s: %s\n", hostname, result)
assert.Contains(t, result, "pong")
} }
}) })
} }
} }
} }
func getIPs() (map[string]netaddr.IP, error) { func (s *IntegrationTestSuite) TestPingAllPeers() {
for _, scales := range s.namespaces {
ips, err := getIPs(scales.tailscales)
assert.Nil(s.T(), err)
for hostname, tailscale := range scales.tailscales {
for peername, ip := range ips {
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
// We currently cant ping ourselves, so skip that.
if peername != hostname {
// We are only interested in "direct ping" which means what we
// might need a couple of more attempts before reaching the node.
command := []string{
"tailscale", "ping",
"--timeout=1s",
"--c=20",
"--until-direct=true",
ip.String(),
}
fmt.Printf("Pinging from %s (%s) to %s (%s)\n", hostname, ips[hostname], peername, ip)
result, err := executeCommand(
&tailscale,
command,
)
assert.Nil(t, err)
fmt.Printf("Result for %s: %s\n", hostname, result)
assert.Contains(t, result, "pong")
}
})
}
}
}
}
func getIPs(tailscales map[string]dockertest.Resource) (map[string]netaddr.IP, error) {
ips := make(map[string]netaddr.IP) ips := make(map[string]netaddr.IP)
for hostname, tailscale := range tailscales { for hostname, tailscale := range tailscales {
command := []string{"tailscale", "ip"} command := []string{"tailscale", "ip"}