From 911c5bddce34b3474e4915fab5e1ed43fbe33978 Mon Sep 17 00:00:00 2001 From: Kristoffer Dalby Date: Mon, 27 Jun 2022 11:56:37 +0000 Subject: [PATCH] Make saving logs from tests an option (default false) We currently have a bit of flaky logic which prevents the docker plugin from cleaning up the containers if the tests or setup fatals or crashes, this is due to a limitation in the save / passed stats handling. This change makes it an environment variable which by default ditches the logs and makes the containers clean up "correctly" in the teardown method. --- integration_common_test.go | 31 ++++++++++++++++- integration_embedded_derp_test.go | 58 ++++++++++++++++++++++--------- integration_test.go | 56 +++++++++++++++++++++-------- 3 files changed, 113 insertions(+), 32 deletions(-) diff --git a/integration_common_test.go b/integration_common_test.go index f1c4e868..4ee2d3b3 100644 --- a/integration_common_test.go +++ b/integration_common_test.go @@ -6,7 +6,10 @@ package headscale import ( "bytes" "encoding/json" + "errors" "fmt" + "os" + "strconv" "strings" "time" @@ -16,9 +19,13 @@ import ( "inet.af/netaddr" ) -const DOCKER_EXECUTE_TIMEOUT = 10 * time.Second +const ( + DOCKER_EXECUTE_TIMEOUT = 10 * time.Second +) var ( + errEnvVarEmpty = errors.New("getenv: environment variable empty") + IpPrefix4 = netaddr.MustParseIPPrefix("100.64.0.0/10") IpPrefix6 = netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/48") @@ -283,3 +290,25 @@ func getMagicFQDN( return hostnames, nil } + +func GetEnvStr(key string) (string, error) { + v := os.Getenv(key) + if v == "" { + return v, errEnvVarEmpty + } + + return v, nil +} + +func GetEnvBool(key string) (bool, error) { + s, err := GetEnvStr(key) + if err != nil { + return false, err + } + v, err := strconv.ParseBool(s) + if err != nil { + return false, err + } + + return v, nil +} diff --git a/integration_embedded_derp_test.go b/integration_embedded_derp_test.go index 5f388694..d8918f4e 100644 --- a/integration_embedded_derp_test.go +++ b/integration_embedded_derp_test.go @@ -40,41 +40,50 @@ type IntegrationDERPTestSuite struct { pool dockertest.Pool networks map[int]dockertest.Network // so we keep the containers isolated headscale dockertest.Resource + saveLogs bool tailscales map[string]dockertest.Resource joinWaitGroup sync.WaitGroup } func TestDERPIntegrationTestSuite(t *testing.T) { + saveLogs, err := GetEnvBool("HEADSCALE_INTEGRATION_SAVE_LOG") + if err != nil { + saveLogs = false + } + s := new(IntegrationDERPTestSuite) s.tailscales = make(map[string]dockertest.Resource) s.networks = make(map[int]dockertest.Network) + s.saveLogs = saveLogs suite.Run(t, s) // HandleStats, which allows us to check if we passed and save logs // is called after TearDown, so we cannot tear down containers before // we have potentially saved the logs. - for _, tailscale := range s.tailscales { - if err := s.pool.Purge(&tailscale); err != nil { + if s.saveLogs { + for _, tailscale := range s.tailscales { + if err := s.pool.Purge(&tailscale); err != nil { + log.Printf("Could not purge resource: %s\n", err) + } + } + + if !s.stats.Passed() { + err := s.saveLog(&s.headscale, "test_output") + if err != nil { + log.Printf("Could not save log: %s\n", err) + } + } + if err := s.pool.Purge(&s.headscale); err != nil { log.Printf("Could not purge resource: %s\n", err) } - } - if !s.stats.Passed() { - err := s.saveLog(&s.headscale, "test_output") - if err != nil { - log.Printf("Could not save log: %s\n", err) - } - } - if err := s.pool.Purge(&s.headscale); err != nil { - log.Printf("Could not purge resource: %s\n", err) - } - - for _, network := range s.networks { - if err := network.Close(); err != nil { - log.Printf("Could not close network: %s\n", err) + for _, network := range s.networks { + if err := network.Close(); err != nil { + log.Printf("Could not close network: %s\n", err) + } } } } @@ -290,6 +299,23 @@ func (s *IntegrationDERPTestSuite) tailscaleContainer( } func (s *IntegrationDERPTestSuite) TearDownSuite() { + if !s.saveLogs { + for _, tailscale := range s.tailscales { + if err := s.pool.Purge(&tailscale); err != nil { + log.Printf("Could not purge resource: %s\n", err) + } + } + + if err := s.pool.Purge(&s.headscale); err != nil { + log.Printf("Could not purge resource: %s\n", err) + } + + for _, network := range s.networks { + if err := network.Close(); err != nil { + log.Printf("Could not close network: %s\n", err) + } + } + } } func (s *IntegrationDERPTestSuite) HandleStats( diff --git a/integration_test.go b/integration_test.go index 18f28b28..22cc0ae5 100644 --- a/integration_test.go +++ b/integration_test.go @@ -36,6 +36,7 @@ type IntegrationTestSuite struct { pool dockertest.Pool network dockertest.Network headscale dockertest.Resource + saveLogs bool namespaces map[string]TestNamespace @@ -43,6 +44,11 @@ type IntegrationTestSuite struct { } func TestIntegrationTestSuite(t *testing.T) { + saveLogs, err := GetEnvBool("HEADSCALE_INTEGRATION_SAVE_LOG") + if err != nil { + saveLogs = false + } + s := new(IntegrationTestSuite) s.namespaces = map[string]TestNamespace{ @@ -55,32 +61,35 @@ func TestIntegrationTestSuite(t *testing.T) { tailscales: make(map[string]dockertest.Resource), }, } + s.saveLogs = saveLogs suite.Run(t, s) // HandleStats, which allows us to check if we passed and save logs // is called after TearDown, so we cannot tear down containers before // we have potentially saved the logs. - for _, scales := range s.namespaces { - for _, tailscale := range scales.tailscales { - if err := s.pool.Purge(&tailscale); err != nil { - log.Printf("Could not purge resource: %s\n", err) + if s.saveLogs { + for _, scales := range s.namespaces { + for _, tailscale := range scales.tailscales { + if err := s.pool.Purge(&tailscale); err != nil { + log.Printf("Could not purge resource: %s\n", err) + } } } - } - if !s.stats.Passed() { - err := s.saveLog(&s.headscale, "test_output") - if err != nil { - log.Printf("Could not save log: %s\n", err) + if !s.stats.Passed() { + err := s.saveLog(&s.headscale, "test_output") + if err != nil { + log.Printf("Could not save log: %s\n", err) + } + } + if err := s.pool.Purge(&s.headscale); err != nil { + log.Printf("Could not purge resource: %s\n", err) } - } - if err := s.pool.Purge(&s.headscale); err != nil { - log.Printf("Could not purge resource: %s\n", err) - } - if err := s.network.Close(); err != nil { - log.Printf("Could not close network: %s\n", err) + if err := s.network.Close(); err != nil { + log.Printf("Could not close network: %s\n", err) + } } } @@ -338,6 +347,23 @@ func (s *IntegrationTestSuite) SetupSuite() { } func (s *IntegrationTestSuite) TearDownSuite() { + if !s.saveLogs { + for _, scales := range s.namespaces { + for _, tailscale := range scales.tailscales { + if err := s.pool.Purge(&tailscale); err != nil { + log.Printf("Could not purge resource: %s\n", err) + } + } + } + + if err := s.pool.Purge(&s.headscale); err != nil { + log.Printf("Could not purge resource: %s\n", err) + } + + if err := s.network.Close(); err != nil { + log.Printf("Could not close network: %s\n", err) + } + } } func (s *IntegrationTestSuite) HandleStats(