2022-10-13 16:03:38 +02:00
|
|
|
package integration
|
|
|
|
|
|
|
|
|
|
import (
|
2025-03-21 11:49:32 +01:00
|
|
|
"context"
|
|
|
|
|
"crypto/tls"
|
|
|
|
|
"encoding/json"
|
2022-10-13 16:03:38 +02:00
|
|
|
"errors"
|
|
|
|
|
"fmt"
|
2025-03-21 11:49:32 +01:00
|
|
|
"io"
|
2022-10-13 16:03:38 +02:00
|
|
|
"log"
|
2025-03-21 11:49:32 +01:00
|
|
|
"net"
|
|
|
|
|
"net/http"
|
|
|
|
|
"net/http/cookiejar"
|
2022-10-14 12:17:59 +02:00
|
|
|
"net/netip"
|
2025-03-21 11:49:32 +01:00
|
|
|
"net/url"
|
2022-10-13 16:03:38 +02:00
|
|
|
"os"
|
2025-12-01 19:40:25 +01:00
|
|
|
"slices"
|
2025-03-21 11:49:32 +01:00
|
|
|
"strconv"
|
|
|
|
|
"strings"
|
2022-10-13 16:03:38 +02:00
|
|
|
"sync"
|
2024-09-11 12:00:32 +02:00
|
|
|
"testing"
|
2024-04-21 18:28:17 +02:00
|
|
|
"time"
|
2022-10-13 16:03:38 +02:00
|
|
|
|
|
|
|
|
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
2025-01-30 21:49:09 +00:00
|
|
|
"github.com/juanfont/headscale/hscontrol/capver"
|
2025-03-21 11:49:32 +01:00
|
|
|
"github.com/juanfont/headscale/hscontrol/types"
|
2023-05-11 09:09:18 +02:00
|
|
|
"github.com/juanfont/headscale/hscontrol/util"
|
2022-10-13 16:03:38 +02:00
|
|
|
"github.com/juanfont/headscale/integration/dockertestutil"
|
2024-11-22 20:23:05 +08:00
|
|
|
"github.com/juanfont/headscale/integration/dsic"
|
2022-10-13 16:03:38 +02:00
|
|
|
"github.com/juanfont/headscale/integration/hsic"
|
2025-07-23 16:03:58 +02:00
|
|
|
"github.com/juanfont/headscale/integration/integrationutil"
|
2022-10-13 16:03:38 +02:00
|
|
|
"github.com/juanfont/headscale/integration/tsic"
|
2025-03-21 11:49:32 +01:00
|
|
|
"github.com/oauth2-proxy/mockoidc"
|
2022-10-13 16:03:38 +02:00
|
|
|
"github.com/ory/dockertest/v3"
|
2025-03-21 11:49:32 +01:00
|
|
|
"github.com/ory/dockertest/v3/docker"
|
2025-06-23 13:43:14 +02:00
|
|
|
"github.com/puzpuzpuz/xsync/v4"
|
2023-12-09 18:09:24 +01:00
|
|
|
"github.com/samber/lo"
|
2024-09-11 12:00:32 +02:00
|
|
|
"github.com/stretchr/testify/assert"
|
2024-11-22 16:54:58 +01:00
|
|
|
"github.com/stretchr/testify/require"
|
2024-12-10 16:23:55 +01:00
|
|
|
xmaps "golang.org/x/exp/maps"
|
2023-08-29 08:33:33 +02:00
|
|
|
"golang.org/x/sync/errgroup"
|
2024-02-18 19:31:29 +01:00
|
|
|
"tailscale.com/envknob"
|
2025-03-21 11:49:32 +01:00
|
|
|
"tailscale.com/util/mak"
|
2025-07-23 16:03:58 +02:00
|
|
|
"tailscale.com/util/multierr"
|
2022-10-13 16:03:38 +02:00
|
|
|
)
|
|
|
|
|
|
2022-10-18 12:21:38 +02:00
|
|
|
const (
|
|
|
|
|
scenarioHashLength = 6
|
|
|
|
|
)
|
2022-10-13 16:03:38 +02:00
|
|
|
|
2024-02-18 19:31:29 +01:00
|
|
|
var usePostgresForTest = envknob.Bool("HEADSCALE_INTEGRATION_POSTGRES")
|
|
|
|
|
|
2022-10-18 12:09:10 +02:00
|
|
|
var (
|
|
|
|
|
errNoHeadscaleAvailable = errors.New("no headscale available")
|
2023-01-17 20:36:46 +01:00
|
|
|
errNoUserAvailable = errors.New("no user available")
|
2023-02-02 16:05:52 +01:00
|
|
|
errNoClientFound = errors.New("client not found")
|
2022-11-14 17:10:29 +00:00
|
|
|
|
2023-08-31 14:57:43 +02:00
|
|
|
// AllVersions represents a list of Tailscale versions the suite
|
2023-02-03 12:24:27 +01:00
|
|
|
// uses to test compatibility with the ControlServer.
|
|
|
|
|
//
|
|
|
|
|
// The list contains two special cases, "head" and "unstable" which
|
|
|
|
|
// points to the current tip of Tailscale's main branch and the latest
|
|
|
|
|
// released unstable version.
|
|
|
|
|
//
|
|
|
|
|
// The rest of the version represents Tailscale versions that can be
|
|
|
|
|
// found in Tailscale's apt repository.
|
2025-11-12 13:26:54 -06:00
|
|
|
AllVersions = append([]string{"head", "unstable"}, capver.TailscaleLatestMajorMinor(capver.SupportedMajorMinorVersions, true)...)
|
2023-08-31 14:57:43 +02:00
|
|
|
|
|
|
|
|
// MustTestVersions is the minimum set of versions we should test.
|
2023-08-31 18:37:18 +02:00
|
|
|
// At the moment, this is arbitrarily chosen as:
|
2023-08-31 14:57:43 +02:00
|
|
|
//
|
|
|
|
|
// - Two unstable (HEAD and unstable)
|
|
|
|
|
// - Two latest versions
|
2023-12-09 18:09:24 +01:00
|
|
|
// - Two oldest supported version.
|
2023-08-31 14:57:43 +02:00
|
|
|
MustTestVersions = append(
|
2023-11-23 08:31:33 +01:00
|
|
|
AllVersions[0:4],
|
|
|
|
|
AllVersions[len(AllVersions)-2:]...,
|
2023-08-31 14:57:43 +02:00
|
|
|
)
|
2022-10-18 12:09:10 +02:00
|
|
|
)
|
2022-10-13 16:03:38 +02:00
|
|
|
|
2023-02-03 12:24:27 +01:00
|
|
|
// User represents a User in the ControlServer and a map of TailscaleClient's
|
|
|
|
|
// associated with the User.
|
2023-01-17 17:43:44 +01:00
|
|
|
type User struct {
|
2022-10-21 13:17:54 +02:00
|
|
|
Clients map[string]TailscaleClient
|
2022-10-13 16:03:38 +02:00
|
|
|
|
2023-08-29 08:33:33 +02:00
|
|
|
createWaitGroup errgroup.Group
|
|
|
|
|
joinWaitGroup errgroup.Group
|
|
|
|
|
syncWaitGroup errgroup.Group
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
|
|
|
|
|
2023-02-03 12:24:27 +01:00
|
|
|
// Scenario is a representation of an environment with one ControlServer and
|
|
|
|
|
// one or more User's and its associated TailscaleClients.
|
|
|
|
|
// A Scenario is intended to simplify setting up a new testcase for testing
|
|
|
|
|
// a ControlServer with TailscaleClients.
|
2022-11-02 10:01:21 +01:00
|
|
|
// TODO(kradalby): make control server configurable, test correctness with Tailscale SaaS.
|
2022-10-13 16:03:38 +02:00
|
|
|
type Scenario struct {
|
|
|
|
|
// TODO(kradalby): support multiple headcales for later, currently only
|
|
|
|
|
// use one.
|
2022-11-14 14:27:02 +01:00
|
|
|
controlServers *xsync.MapOf[string, ControlServer]
|
2024-11-22 20:23:05 +08:00
|
|
|
derpServers []*dsic.DERPServerInContainer
|
2022-10-13 16:03:38 +02:00
|
|
|
|
2023-01-17 17:43:44 +01:00
|
|
|
users map[string]*User
|
2022-10-13 16:03:38 +02:00
|
|
|
|
2025-03-21 11:49:32 +01:00
|
|
|
pool *dockertest.Pool
|
|
|
|
|
networks map[string]*dockertest.Network
|
|
|
|
|
mockOIDC scenarioOIDC
|
|
|
|
|
extraServices map[string][]*dockertest.Resource
|
2022-11-14 14:27:02 +01:00
|
|
|
|
2023-09-10 09:17:17 +02:00
|
|
|
mu sync.Mutex
|
2025-03-21 11:49:32 +01:00
|
|
|
|
|
|
|
|
spec ScenarioSpec
|
|
|
|
|
userToNetwork map[string]*dockertest.Network
|
2025-04-30 08:54:04 +03:00
|
|
|
|
|
|
|
|
testHashPrefix string
|
|
|
|
|
testDefaultNetwork string
|
2025-03-21 11:49:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ScenarioSpec describes the users, nodes, and network topology to
|
|
|
|
|
// set up for a given scenario.
|
|
|
|
|
type ScenarioSpec struct {
|
|
|
|
|
// Users is a list of usernames that will be created.
|
|
|
|
|
// Each created user will get nodes equivalent to NodesPerUser
|
|
|
|
|
Users []string
|
|
|
|
|
|
|
|
|
|
// NodesPerUser is how many nodes should be attached to each user.
|
|
|
|
|
NodesPerUser int
|
|
|
|
|
|
2025-07-10 23:38:55 +02:00
|
|
|
// Networks, if set, is the separate Docker networks that should be
|
2025-03-21 11:49:32 +01:00
|
|
|
// created and a list of the users that should be placed in those networks.
|
|
|
|
|
// If not set, a single network will be created and all users+nodes will be
|
|
|
|
|
// added there.
|
|
|
|
|
// Please note that Docker networks are not necessarily routable and
|
|
|
|
|
// connections between them might fall back to DERP.
|
|
|
|
|
Networks map[string][]string
|
|
|
|
|
|
|
|
|
|
// ExtraService, if set, is additional a map of network to additional
|
|
|
|
|
// container services that should be set up. These container services
|
|
|
|
|
// typically dont run Tailscale, e.g. web service to test subnet router.
|
|
|
|
|
ExtraService map[string][]extraServiceFunc
|
|
|
|
|
|
|
|
|
|
// Versions is specific list of versions to use for the test.
|
|
|
|
|
Versions []string
|
|
|
|
|
|
|
|
|
|
// OIDCUsers, if populated, will start a Mock OIDC server and populate
|
|
|
|
|
// the user login stack with the given users.
|
|
|
|
|
// If the NodesPerUser is set, it should align with this list to ensure
|
|
|
|
|
// the correct users are logged in.
|
|
|
|
|
// This is because the MockOIDC server can only serve login
|
|
|
|
|
// requests based on a queue it has been given on startup.
|
|
|
|
|
// We currently only populates it with one login request per user.
|
|
|
|
|
OIDCUsers []mockoidc.MockUser
|
|
|
|
|
OIDCAccessTTL time.Duration
|
|
|
|
|
|
|
|
|
|
MaxWait time.Duration
|
|
|
|
|
}
|
|
|
|
|
|
2025-04-30 08:54:04 +03:00
|
|
|
func (s *Scenario) prefixedNetworkName(name string) string {
|
|
|
|
|
return s.testHashPrefix + "-" + name
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
|
|
|
|
|
2023-02-03 12:24:27 +01:00
|
|
|
// NewScenario creates a test Scenario which can be used to bootstraps a ControlServer with
|
|
|
|
|
// a set of Users and TailscaleClients.
|
2025-03-21 11:49:32 +01:00
|
|
|
func NewScenario(spec ScenarioSpec) (*Scenario, error) {
|
2022-10-13 16:03:38 +02:00
|
|
|
pool, err := dockertest.NewPool("")
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("could not connect to docker: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2025-04-18 12:06:28 +02:00
|
|
|
// Opportunity to clean up unreferenced networks.
|
|
|
|
|
// This might be a no op, but it is worth a try as we sometime
|
|
|
|
|
// dont clean up nicely after ourselves.
|
|
|
|
|
dockertestutil.CleanUnreferencedNetworks(pool)
|
2025-04-30 08:54:04 +03:00
|
|
|
dockertestutil.CleanImagesInCI(pool)
|
2025-04-18 12:06:28 +02:00
|
|
|
|
2025-03-21 11:49:32 +01:00
|
|
|
if spec.MaxWait == 0 {
|
|
|
|
|
pool.MaxWait = dockertestMaxWait()
|
|
|
|
|
} else {
|
|
|
|
|
pool.MaxWait = spec.MaxWait
|
|
|
|
|
}
|
|
|
|
|
|
2025-04-30 08:54:04 +03:00
|
|
|
testHashPrefix := "hs-" + util.MustGenerateRandomStringDNSSafe(scenarioHashLength)
|
2025-03-21 11:49:32 +01:00
|
|
|
s := &Scenario{
|
|
|
|
|
controlServers: xsync.NewMapOf[string, ControlServer](),
|
|
|
|
|
users: make(map[string]*User),
|
2022-10-18 11:59:28 +02:00
|
|
|
|
2025-03-21 11:49:32 +01:00
|
|
|
pool: pool,
|
|
|
|
|
spec: spec,
|
2025-04-30 08:54:04 +03:00
|
|
|
|
|
|
|
|
testHashPrefix: testHashPrefix,
|
|
|
|
|
testDefaultNetwork: testHashPrefix + "-default",
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
|
|
|
|
|
2025-03-21 11:49:32 +01:00
|
|
|
var userToNetwork map[string]*dockertest.Network
|
|
|
|
|
if spec.Networks != nil || len(spec.Networks) != 0 {
|
|
|
|
|
for name, users := range s.spec.Networks {
|
2025-04-30 08:54:04 +03:00
|
|
|
networkName := testHashPrefix + "-" + name
|
2025-03-21 11:49:32 +01:00
|
|
|
network, err := s.AddNetwork(networkName)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, user := range users {
|
|
|
|
|
if n2, ok := userToNetwork[user]; ok {
|
|
|
|
|
return nil, fmt.Errorf("users can only have nodes placed in one network: %s into %s but already in %s", user, network.Network.Name, n2.Network.Name)
|
|
|
|
|
}
|
|
|
|
|
mak.Set(&userToNetwork, user, network)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
2025-04-30 08:54:04 +03:00
|
|
|
_, err := s.AddNetwork(s.testDefaultNetwork)
|
2025-03-21 11:49:32 +01:00
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for network, extras := range spec.ExtraService {
|
|
|
|
|
for _, extra := range extras {
|
|
|
|
|
svc, err := extra(s, network)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2025-04-30 08:54:04 +03:00
|
|
|
mak.Set(&s.extraServices, s.prefixedNetworkName(network), append(s.extraServices[s.prefixedNetworkName(network)], svc))
|
2025-03-21 11:49:32 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
s.userToNetwork = userToNetwork
|
|
|
|
|
|
2025-07-28 11:15:53 +02:00
|
|
|
if len(spec.OIDCUsers) != 0 {
|
2025-03-21 11:49:32 +01:00
|
|
|
ttl := defaultAccessTTL
|
|
|
|
|
if spec.OIDCAccessTTL != 0 {
|
|
|
|
|
ttl = spec.OIDCAccessTTL
|
|
|
|
|
}
|
|
|
|
|
err = s.runMockOIDC(ttl, spec.OIDCUsers)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return s, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (s *Scenario) AddNetwork(name string) (*dockertest.Network, error) {
|
|
|
|
|
network, err := dockertestutil.GetFirstOrCreateNetwork(s.pool, name)
|
2022-10-13 16:03:38 +02:00
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("failed to create or get network: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2022-10-14 10:38:27 +02:00
|
|
|
// We run the test suite in a docker container that calls a couple of endpoints for
|
|
|
|
|
// readiness checks, this ensures that we can run the tests with individual networks
|
|
|
|
|
// and have the client reach the different containers
|
2025-03-21 11:49:32 +01:00
|
|
|
// TODO(kradalby): Can the test-suite be renamed so we can have multiple?
|
|
|
|
|
err = dockertestutil.AddContainerToNetwork(s.pool, network, "headscale-test-suite")
|
2022-10-14 10:38:27 +02:00
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("failed to add test suite container to network: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2025-03-21 11:49:32 +01:00
|
|
|
mak.Set(&s.networks, name, network)
|
|
|
|
|
|
|
|
|
|
return network, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (s *Scenario) Networks() []*dockertest.Network {
|
|
|
|
|
if len(s.networks) == 0 {
|
|
|
|
|
panic("Scenario.Networks called with empty network list")
|
|
|
|
|
}
|
|
|
|
|
return xmaps.Values(s.networks)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (s *Scenario) Network(name string) (*dockertest.Network, error) {
|
2025-04-30 08:54:04 +03:00
|
|
|
net, ok := s.networks[s.prefixedNetworkName(name)]
|
2025-03-21 11:49:32 +01:00
|
|
|
if !ok {
|
|
|
|
|
return nil, fmt.Errorf("no network named: %s", name)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return net, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (s *Scenario) SubnetOfNetwork(name string) (*netip.Prefix, error) {
|
2025-04-30 08:54:04 +03:00
|
|
|
net, ok := s.networks[s.prefixedNetworkName(name)]
|
2025-03-21 11:49:32 +01:00
|
|
|
if !ok {
|
|
|
|
|
return nil, fmt.Errorf("no network named: %s", name)
|
|
|
|
|
}
|
|
|
|
|
|
2025-07-05 23:30:47 +02:00
|
|
|
if len(net.Network.IPAM.Config) == 0 {
|
|
|
|
|
return nil, fmt.Errorf("no IPAM config found in network: %s", name)
|
|
|
|
|
}
|
2025-03-21 11:49:32 +01:00
|
|
|
|
2025-07-05 23:30:47 +02:00
|
|
|
pref, err := netip.ParsePrefix(net.Network.IPAM.Config[0].Subnet)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
2025-03-21 11:49:32 +01:00
|
|
|
}
|
|
|
|
|
|
2025-07-05 23:30:47 +02:00
|
|
|
return &pref, nil
|
2025-03-21 11:49:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (s *Scenario) Services(name string) ([]*dockertest.Resource, error) {
|
2025-04-30 08:54:04 +03:00
|
|
|
res, ok := s.extraServices[s.prefixedNetworkName(name)]
|
2025-03-21 11:49:32 +01:00
|
|
|
if !ok {
|
|
|
|
|
return nil, fmt.Errorf("no network named: %s", name)
|
|
|
|
|
}
|
2022-10-13 16:03:38 +02:00
|
|
|
|
2025-03-21 11:49:32 +01:00
|
|
|
return res, nil
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
|
|
|
|
|
2024-09-11 12:00:32 +02:00
|
|
|
func (s *Scenario) ShutdownAssertNoPanics(t *testing.T) {
|
2025-04-18 12:06:28 +02:00
|
|
|
defer dockertestutil.CleanUnreferencedNetworks(s.pool)
|
2025-04-30 08:54:04 +03:00
|
|
|
defer dockertestutil.CleanImagesInCI(s.pool)
|
2025-04-18 12:06:28 +02:00
|
|
|
|
2022-11-14 14:27:02 +01:00
|
|
|
s.controlServers.Range(func(_ string, control ControlServer) bool {
|
2024-09-11 12:00:32 +02:00
|
|
|
stdoutPath, stderrPath, err := control.Shutdown()
|
2022-10-13 16:03:38 +02:00
|
|
|
if err != nil {
|
2022-11-14 14:27:02 +01:00
|
|
|
log.Printf(
|
|
|
|
|
"Failed to shut down control: %s",
|
|
|
|
|
fmt.Errorf("failed to tear down control: %w", err),
|
|
|
|
|
)
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
2022-11-14 14:27:02 +01:00
|
|
|
|
2024-09-11 12:00:32 +02:00
|
|
|
if t != nil {
|
|
|
|
|
stdout, err := os.ReadFile(stdoutPath)
|
2024-11-22 16:54:58 +01:00
|
|
|
require.NoError(t, err)
|
2024-09-11 12:00:32 +02:00
|
|
|
assert.NotContains(t, string(stdout), "panic")
|
|
|
|
|
|
|
|
|
|
stderr, err := os.ReadFile(stderrPath)
|
2024-11-22 16:54:58 +01:00
|
|
|
require.NoError(t, err)
|
2024-09-11 12:00:32 +02:00
|
|
|
assert.NotContains(t, string(stderr), "panic")
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-14 14:27:02 +01:00
|
|
|
return true
|
|
|
|
|
})
|
2022-10-13 16:03:38 +02:00
|
|
|
|
2025-08-06 08:37:02 +02:00
|
|
|
s.mu.Lock()
|
2023-01-17 17:43:44 +01:00
|
|
|
for userName, user := range s.users {
|
|
|
|
|
for _, client := range user.Clients {
|
|
|
|
|
log.Printf("removing client %s in user %s", client.Hostname(), userName)
|
2024-12-09 17:15:38 +01:00
|
|
|
stdoutPath, stderrPath, err := client.Shutdown()
|
2022-10-13 16:03:38 +02:00
|
|
|
if err != nil {
|
2023-08-29 08:33:33 +02:00
|
|
|
log.Printf("failed to tear down client: %s", err)
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
2024-12-09 17:15:38 +01:00
|
|
|
|
|
|
|
|
if t != nil {
|
|
|
|
|
stdout, err := os.ReadFile(stdoutPath)
|
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
assert.NotContains(t, string(stdout), "panic")
|
|
|
|
|
|
|
|
|
|
stderr, err := os.ReadFile(stderrPath)
|
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
assert.NotContains(t, string(stderr), "panic")
|
|
|
|
|
}
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
|
|
|
|
}
|
2025-08-06 08:37:02 +02:00
|
|
|
s.mu.Unlock()
|
2022-10-13 16:03:38 +02:00
|
|
|
|
2024-11-22 20:23:05 +08:00
|
|
|
for _, derp := range s.derpServers {
|
|
|
|
|
err := derp.Shutdown()
|
|
|
|
|
if err != nil {
|
|
|
|
|
log.Printf("failed to tear down derp server: %s", err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-03-21 11:49:32 +01:00
|
|
|
for _, svcs := range s.extraServices {
|
|
|
|
|
for _, svc := range svcs {
|
|
|
|
|
err := svc.Close()
|
|
|
|
|
if err != nil {
|
|
|
|
|
log.Printf("failed to tear down service %q: %s", svc.Container.Name, err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if s.mockOIDC.r != nil {
|
|
|
|
|
s.mockOIDC.r.Close()
|
|
|
|
|
if err := s.mockOIDC.r.Close(); err != nil {
|
|
|
|
|
log.Printf("failed to tear down oidc server: %s", err)
|
|
|
|
|
}
|
2022-10-14 10:38:27 +02:00
|
|
|
}
|
2022-10-13 16:03:38 +02:00
|
|
|
|
2025-03-21 11:49:32 +01:00
|
|
|
for _, network := range s.networks {
|
|
|
|
|
if err := network.Close(); err != nil {
|
|
|
|
|
log.Printf("failed to tear down network: %s", err)
|
|
|
|
|
}
|
|
|
|
|
}
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
|
|
|
|
|
2024-09-11 12:00:32 +02:00
|
|
|
// Shutdown shuts down and cleans up all the containers (ControlServer, TailscaleClient)
|
|
|
|
|
// and networks associated with it.
|
|
|
|
|
// In addition, it will save the logs of the ControlServer to `/tmp/control` in the
|
|
|
|
|
// environment running the tests.
|
|
|
|
|
func (s *Scenario) Shutdown() {
|
|
|
|
|
s.ShutdownAssertNoPanics(nil)
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-03 12:24:27 +01:00
|
|
|
// Users returns the name of all users associated with the Scenario.
|
2023-01-17 17:43:44 +01:00
|
|
|
func (s *Scenario) Users() []string {
|
|
|
|
|
users := make([]string, 0)
|
|
|
|
|
for user := range s.users {
|
|
|
|
|
users = append(users, user)
|
2022-10-23 12:41:35 +02:00
|
|
|
}
|
|
|
|
|
|
2023-01-17 17:43:44 +01:00
|
|
|
return users
|
2022-10-23 12:41:35 +02:00
|
|
|
}
|
|
|
|
|
|
2022-10-13 16:03:38 +02:00
|
|
|
/// Headscale related stuff
|
|
|
|
|
// Note: These functions assume that there is a _single_ headscale instance for now
|
|
|
|
|
|
2023-02-03 12:24:27 +01:00
|
|
|
// Headscale returns a ControlServer instance based on hsic (HeadscaleInContainer)
|
|
|
|
|
// If the Scenario already has an instance, the pointer to the running container
|
|
|
|
|
// will be return, otherwise a new instance will be created.
|
2022-10-13 16:03:38 +02:00
|
|
|
// TODO(kradalby): make port and headscale configurable, multiple instances support?
|
2022-11-14 14:27:02 +01:00
|
|
|
func (s *Scenario) Headscale(opts ...hsic.Option) (ControlServer, error) {
|
2023-09-10 09:17:17 +02:00
|
|
|
s.mu.Lock()
|
|
|
|
|
defer s.mu.Unlock()
|
2022-11-14 14:27:02 +01:00
|
|
|
|
|
|
|
|
if headscale, ok := s.controlServers.Load("headscale"); ok {
|
|
|
|
|
return headscale, nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-09-03 00:22:17 -07:00
|
|
|
if usePostgresForTest {
|
|
|
|
|
opts = append(opts, hsic.WithPostgres())
|
|
|
|
|
}
|
|
|
|
|
|
2025-03-21 11:49:32 +01:00
|
|
|
headscale, err := hsic.New(s.pool, s.Networks(), opts...)
|
2022-10-13 16:03:38 +02:00
|
|
|
if err != nil {
|
2022-11-14 14:27:02 +01:00
|
|
|
return nil, fmt.Errorf("failed to create headscale container: %w", err)
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
|
|
|
|
|
2023-08-29 08:33:33 +02:00
|
|
|
err = headscale.WaitForRunning()
|
2022-11-10 17:06:10 +01:00
|
|
|
if err != nil {
|
2022-11-14 14:27:02 +01:00
|
|
|
return nil, fmt.Errorf("failed reach headscale container: %w", err)
|
2022-11-10 17:06:10 +01:00
|
|
|
}
|
|
|
|
|
|
2022-11-14 14:27:02 +01:00
|
|
|
s.controlServers.Store("headscale", headscale)
|
2022-10-13 16:03:38 +02:00
|
|
|
|
2022-11-14 14:27:02 +01:00
|
|
|
return headscale, nil
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
|
|
|
|
|
2025-08-06 08:37:02 +02:00
|
|
|
// Pool returns the dockertest pool for the scenario.
|
|
|
|
|
func (s *Scenario) Pool() *dockertest.Pool {
|
|
|
|
|
return s.pool
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetOrCreateUser gets or creates a user in the scenario.
|
|
|
|
|
func (s *Scenario) GetOrCreateUser(userStr string) *User {
|
|
|
|
|
s.mu.Lock()
|
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
|
|
if user, ok := s.users[userStr]; ok {
|
|
|
|
|
return user
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
user := &User{
|
|
|
|
|
Clients: make(map[string]TailscaleClient),
|
|
|
|
|
}
|
|
|
|
|
s.users[userStr] = user
|
|
|
|
|
|
|
|
|
|
return user
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-03 12:24:27 +01:00
|
|
|
// CreatePreAuthKey creates a "pre authentorised key" to be created in the
|
|
|
|
|
// Headscale instance on behalf of the Scenario.
|
2023-02-02 16:05:52 +01:00
|
|
|
func (s *Scenario) CreatePreAuthKey(
|
2025-04-30 12:45:08 +03:00
|
|
|
user uint64,
|
2023-02-02 16:05:52 +01:00
|
|
|
reusable bool,
|
|
|
|
|
ephemeral bool,
|
|
|
|
|
) (*v1.PreAuthKey, error) {
|
2022-11-14 14:27:02 +01:00
|
|
|
if headscale, err := s.Headscale(); err == nil {
|
2023-01-17 17:43:44 +01:00
|
|
|
key, err := headscale.CreateAuthKey(user, reusable, ephemeral)
|
2022-10-13 16:03:38 +02:00
|
|
|
if err != nil {
|
2023-01-17 17:43:44 +01:00
|
|
|
return nil, fmt.Errorf("failed to create user: %w", err)
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return key, nil
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-17 17:43:44 +01:00
|
|
|
return nil, fmt.Errorf("failed to create user: %w", errNoHeadscaleAvailable)
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
|
|
|
|
|
tags: process tags on registration, simplify policy (#2931)
This PR investigates, adds tests and aims to correctly implement Tailscale's model for how Tags should be accepted, assigned and used to identify nodes in the Tailscale access and ownership model.
When evaluating in Headscale's policy, Tags are now only checked against a nodes "tags" list, which defines the source of truth for all tags for a given node. This simplifies the code for dealing with tags greatly, and should help us have less access bugs related to nodes belonging to tags or users.
A node can either be owned by a user, or a tag.
Next, to ensure the tags list on the node is correctly implemented, we first add tests for every registration scenario and combination of user, pre auth key and pre auth key with tags with the same registration expectation as observed by trying them all with the Tailscale control server. This should ensure that we implement the correct behaviour and that it does not change or break over time.
Lastly, the missing parts of the auth has been added, or changed in the cases where it was wrong. This has in large parts allowed us to delete and simplify a lot of code.
Now, tags can only be changed when a node authenticates or if set via the CLI/API. Tags can only be fully overwritten/replaced and any use of either auth or CLI will replace the current set if different.
A user owned device can be converted to a tagged device, but it cannot be changed back. A tagged device can never remove the last tag either, it has to have a minimum of one.
2025-12-08 18:51:07 +01:00
|
|
|
// CreatePreAuthKeyWithTags creates a "pre authorised key" with the specified tags
|
|
|
|
|
// to be created in the Headscale instance on behalf of the Scenario.
|
|
|
|
|
func (s *Scenario) CreatePreAuthKeyWithTags(
|
|
|
|
|
user uint64,
|
|
|
|
|
reusable bool,
|
|
|
|
|
ephemeral bool,
|
|
|
|
|
tags []string,
|
|
|
|
|
) (*v1.PreAuthKey, error) {
|
|
|
|
|
headscale, err := s.Headscale()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("failed to create preauth key with tags: %w", errNoHeadscaleAvailable)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
key, err := headscale.CreateAuthKeyWithTags(user, reusable, ephemeral, tags)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("failed to create preauth key with tags: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return key, nil
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-03 12:24:27 +01:00
|
|
|
// CreateUser creates a User to be created in the
|
|
|
|
|
// Headscale instance on behalf of the Scenario.
|
2025-04-30 12:45:08 +03:00
|
|
|
func (s *Scenario) CreateUser(user string) (*v1.User, error) {
|
2022-11-14 14:27:02 +01:00
|
|
|
if headscale, err := s.Headscale(); err == nil {
|
2025-04-30 12:45:08 +03:00
|
|
|
u, err := headscale.CreateUser(user)
|
2022-10-13 16:03:38 +02:00
|
|
|
if err != nil {
|
2025-04-30 12:45:08 +03:00
|
|
|
return nil, fmt.Errorf("failed to create user: %w", err)
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
|
|
|
|
|
2025-08-06 08:37:02 +02:00
|
|
|
s.mu.Lock()
|
2023-01-17 17:43:44 +01:00
|
|
|
s.users[user] = &User{
|
2022-10-21 13:17:54 +02:00
|
|
|
Clients: make(map[string]TailscaleClient),
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
2025-08-06 08:37:02 +02:00
|
|
|
s.mu.Unlock()
|
2022-10-13 16:03:38 +02:00
|
|
|
|
2025-04-30 12:45:08 +03:00
|
|
|
return u, nil
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
|
|
|
|
|
2025-04-30 12:45:08 +03:00
|
|
|
return nil, fmt.Errorf("failed to create user: %w", errNoHeadscaleAvailable)
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Client related stuff
|
|
|
|
|
|
2025-01-26 22:20:11 +01:00
|
|
|
func (s *Scenario) CreateTailscaleNode(
|
|
|
|
|
version string,
|
|
|
|
|
opts ...tsic.Option,
|
|
|
|
|
) (TailscaleClient, error) {
|
|
|
|
|
headscale, err := s.Headscale()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("failed to create tailscale node (version: %s): %w", version, err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cert := headscale.GetCert()
|
|
|
|
|
hostname := headscale.GetHostname()
|
|
|
|
|
|
|
|
|
|
s.mu.Lock()
|
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
opts = append(opts,
|
|
|
|
|
tsic.WithCACert(cert),
|
|
|
|
|
tsic.WithHeadscaleName(hostname),
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
tsClient, err := tsic.New(
|
|
|
|
|
s.pool,
|
|
|
|
|
version,
|
|
|
|
|
opts...,
|
|
|
|
|
)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf(
|
2025-04-30 08:54:04 +03:00
|
|
|
"failed to create tailscale node: %w",
|
2025-01-26 22:20:11 +01:00
|
|
|
err,
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
2025-07-23 16:03:58 +02:00
|
|
|
err = tsClient.WaitForNeedsLogin(integrationutil.PeerSyncTimeout())
|
2025-01-26 22:20:11 +01:00
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf(
|
|
|
|
|
"failed to wait for tailscaled (%s) to need login: %w",
|
|
|
|
|
tsClient.Hostname(),
|
|
|
|
|
err,
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return tsClient, nil
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-03 12:24:27 +01:00
|
|
|
// CreateTailscaleNodesInUser creates and adds a new TailscaleClient to a
|
|
|
|
|
// User in the Scenario.
|
2023-01-17 17:43:44 +01:00
|
|
|
func (s *Scenario) CreateTailscaleNodesInUser(
|
|
|
|
|
userStr string,
|
2022-10-18 11:59:28 +02:00
|
|
|
requestedVersion string,
|
2022-10-13 16:03:38 +02:00
|
|
|
count int,
|
2022-11-08 15:10:03 +00:00
|
|
|
opts ...tsic.Option,
|
2022-10-13 16:03:38 +02:00
|
|
|
) error {
|
2023-01-17 17:43:44 +01:00
|
|
|
if user, ok := s.users[userStr]; ok {
|
2023-12-09 18:09:24 +01:00
|
|
|
var versions []string
|
2025-03-21 11:49:32 +01:00
|
|
|
for i := range count {
|
2022-10-18 11:59:28 +02:00
|
|
|
version := requestedVersion
|
|
|
|
|
if requestedVersion == "all" {
|
2025-03-21 11:49:32 +01:00
|
|
|
if s.spec.Versions != nil {
|
|
|
|
|
version = s.spec.Versions[i%len(s.spec.Versions)]
|
|
|
|
|
} else {
|
|
|
|
|
version = MustTestVersions[i%len(MustTestVersions)]
|
|
|
|
|
}
|
2022-10-18 11:59:28 +02:00
|
|
|
}
|
2023-12-09 18:09:24 +01:00
|
|
|
versions = append(versions, version)
|
2022-10-18 11:59:28 +02:00
|
|
|
|
2022-11-14 14:27:02 +01:00
|
|
|
headscale, err := s.Headscale()
|
|
|
|
|
if err != nil {
|
2023-05-02 11:51:30 +02:00
|
|
|
return fmt.Errorf("failed to create tailscale node (version: %s): %w", version, err)
|
2022-11-14 14:27:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cert := headscale.GetCert()
|
|
|
|
|
hostname := headscale.GetHostname()
|
|
|
|
|
|
2025-08-06 08:37:02 +02:00
|
|
|
// Determine which network this tailscale client will be in
|
|
|
|
|
var network *dockertest.Network
|
|
|
|
|
if s.userToNetwork != nil && s.userToNetwork[userStr] != nil {
|
|
|
|
|
network = s.userToNetwork[userStr]
|
|
|
|
|
} else {
|
|
|
|
|
network = s.networks[s.testDefaultNetwork]
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Get headscale IP in this network for /etc/hosts fallback DNS
|
|
|
|
|
headscaleIP := headscale.GetIPInNetwork(network)
|
|
|
|
|
extraHosts := []string{hostname + ":" + headscaleIP}
|
|
|
|
|
|
2024-12-17 22:06:57 +09:00
|
|
|
s.mu.Lock()
|
2022-11-08 15:10:03 +00:00
|
|
|
opts = append(opts,
|
2024-11-22 20:23:05 +08:00
|
|
|
tsic.WithCACert(cert),
|
2022-11-08 15:10:03 +00:00
|
|
|
tsic.WithHeadscaleName(hostname),
|
2025-08-06 08:37:02 +02:00
|
|
|
tsic.WithExtraHosts(extraHosts),
|
2022-11-08 15:10:03 +00:00
|
|
|
)
|
2025-08-06 08:37:02 +02:00
|
|
|
|
2024-12-17 22:06:57 +09:00
|
|
|
s.mu.Unlock()
|
2022-11-08 15:10:03 +00:00
|
|
|
|
2023-08-29 08:33:33 +02:00
|
|
|
user.createWaitGroup.Go(func() error {
|
2024-12-17 22:06:57 +09:00
|
|
|
s.mu.Lock()
|
2022-11-06 20:22:21 +01:00
|
|
|
tsClient, err := tsic.New(
|
|
|
|
|
s.pool,
|
|
|
|
|
version,
|
2022-11-08 15:10:03 +00:00
|
|
|
opts...,
|
2022-11-06 20:22:21 +01:00
|
|
|
)
|
2024-12-17 22:06:57 +09:00
|
|
|
s.mu.Unlock()
|
2022-10-13 16:03:38 +02:00
|
|
|
if err != nil {
|
2023-08-29 08:33:33 +02:00
|
|
|
return fmt.Errorf(
|
2025-03-21 11:49:32 +01:00
|
|
|
"failed to create tailscale node: %w",
|
2023-08-29 08:33:33 +02:00
|
|
|
err,
|
|
|
|
|
)
|
2022-11-06 20:22:21 +01:00
|
|
|
}
|
|
|
|
|
|
2025-07-23 16:03:58 +02:00
|
|
|
err = tsClient.WaitForNeedsLogin(integrationutil.PeerSyncTimeout())
|
2022-11-06 20:22:21 +01:00
|
|
|
if err != nil {
|
2023-08-29 08:33:33 +02:00
|
|
|
return fmt.Errorf(
|
|
|
|
|
"failed to wait for tailscaled (%s) to need login: %w",
|
|
|
|
|
tsClient.Hostname(),
|
|
|
|
|
err,
|
|
|
|
|
)
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
|
|
|
|
|
2023-09-10 09:17:17 +02:00
|
|
|
s.mu.Lock()
|
2023-01-17 17:43:44 +01:00
|
|
|
user.Clients[tsClient.Hostname()] = tsClient
|
2023-09-10 09:17:17 +02:00
|
|
|
s.mu.Unlock()
|
2023-08-29 08:33:33 +02:00
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
if err := user.createWaitGroup.Wait(); err != nil {
|
|
|
|
|
return err
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
|
|
|
|
|
2024-01-02 10:41:40 +01:00
|
|
|
log.Printf("testing versions %v, MustTestVersions %v", lo.Uniq(versions), MustTestVersions)
|
2023-12-09 18:09:24 +01:00
|
|
|
|
2022-10-13 16:03:38 +02:00
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-17 17:43:44 +01:00
|
|
|
return fmt.Errorf("failed to add tailscale node: %w", errNoUserAvailable)
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
|
|
|
|
|
2023-02-03 12:24:27 +01:00
|
|
|
// RunTailscaleUp will log in all of the TailscaleClients associated with a
|
|
|
|
|
// User to the given ControlServer (by URL).
|
2022-10-13 16:03:38 +02:00
|
|
|
func (s *Scenario) RunTailscaleUp(
|
2023-01-17 17:43:44 +01:00
|
|
|
userStr, loginServer, authKey string,
|
2022-10-13 16:03:38 +02:00
|
|
|
) error {
|
2023-01-17 17:43:44 +01:00
|
|
|
if user, ok := s.users[userStr]; ok {
|
|
|
|
|
for _, client := range user.Clients {
|
2023-08-29 08:33:33 +02:00
|
|
|
c := client
|
|
|
|
|
user.joinWaitGroup.Go(func() error {
|
|
|
|
|
return c.Login(loginServer, authKey)
|
|
|
|
|
})
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
2022-11-13 16:58:20 +01:00
|
|
|
|
2023-08-29 08:33:33 +02:00
|
|
|
if err := user.joinWaitGroup.Wait(); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2022-10-13 16:03:38 +02:00
|
|
|
|
2023-04-12 09:25:51 +02:00
|
|
|
for _, client := range user.Clients {
|
2025-07-23 16:03:58 +02:00
|
|
|
err := client.WaitForRunning(integrationutil.PeerSyncTimeout())
|
2023-04-12 09:25:51 +02:00
|
|
|
if err != nil {
|
2023-08-29 08:33:33 +02:00
|
|
|
return fmt.Errorf("%s failed to up tailscale node: %w", client.Hostname(), err)
|
2023-04-12 09:25:51 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-10-13 16:03:38 +02:00
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-17 17:43:44 +01:00
|
|
|
return fmt.Errorf("failed to up tailscale node: %w", errNoUserAvailable)
|
2022-10-13 16:03:38 +02:00
|
|
|
}
|
2022-10-14 12:17:59 +02:00
|
|
|
|
2023-02-03 12:24:27 +01:00
|
|
|
// CountTailscale returns the total number of TailscaleClients in a Scenario.
|
|
|
|
|
// This is the sum of Users x TailscaleClients.
|
2022-10-18 11:59:28 +02:00
|
|
|
func (s *Scenario) CountTailscale() int {
|
|
|
|
|
count := 0
|
|
|
|
|
|
2023-01-17 17:43:44 +01:00
|
|
|
for _, user := range s.users {
|
|
|
|
|
count += len(user.Clients)
|
2022-10-18 11:59:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return count
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-03 12:24:27 +01:00
|
|
|
// WaitForTailscaleSync blocks execution until all the TailscaleClient reports
|
|
|
|
|
// to have all other TailscaleClients present in their netmap.NetworkMap.
|
2022-10-18 11:59:28 +02:00
|
|
|
func (s *Scenario) WaitForTailscaleSync() error {
|
|
|
|
|
tsCount := s.CountTailscale()
|
|
|
|
|
|
2025-07-23 16:03:58 +02:00
|
|
|
err := s.WaitForTailscaleSyncWithPeerCount(tsCount-1, integrationutil.PeerSyncTimeout(), integrationutil.PeerSyncRetryInterval())
|
2023-12-09 18:09:24 +01:00
|
|
|
if err != nil {
|
|
|
|
|
for _, user := range s.users {
|
|
|
|
|
for _, client := range user.Clients {
|
2024-05-24 09:15:34 +01:00
|
|
|
peers, allOnline, _ := client.FailingPeersAsString()
|
|
|
|
|
if !allOnline {
|
|
|
|
|
log.Println(peers)
|
|
|
|
|
}
|
2023-12-09 18:09:24 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return err
|
2023-08-29 08:33:33 +02:00
|
|
|
}
|
|
|
|
|
|
2025-10-23 17:57:41 +02:00
|
|
|
// WaitForTailscaleSyncPerUser blocks execution until each TailscaleClient has the expected
|
|
|
|
|
// number of peers for its user. This is useful for policies like autogroup:self where nodes
|
|
|
|
|
// only see same-user peers, not all nodes in the network.
|
|
|
|
|
func (s *Scenario) WaitForTailscaleSyncPerUser(timeout, retryInterval time.Duration) error {
|
|
|
|
|
var allErrors []error
|
|
|
|
|
|
|
|
|
|
for _, user := range s.users {
|
|
|
|
|
// Calculate expected peer count: number of nodes in this user minus 1 (self)
|
|
|
|
|
expectedPeers := len(user.Clients) - 1
|
|
|
|
|
|
|
|
|
|
for _, client := range user.Clients {
|
|
|
|
|
c := client
|
|
|
|
|
expectedCount := expectedPeers
|
|
|
|
|
user.syncWaitGroup.Go(func() error {
|
|
|
|
|
return c.WaitForPeers(expectedCount, timeout, retryInterval)
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
if err := user.syncWaitGroup.Wait(); err != nil {
|
|
|
|
|
allErrors = append(allErrors, err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if len(allErrors) > 0 {
|
|
|
|
|
return multierr.New(allErrors...)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-29 08:33:33 +02:00
|
|
|
// WaitForTailscaleSyncWithPeerCount blocks execution until all the TailscaleClient reports
|
|
|
|
|
// to have all other TailscaleClients present in their netmap.NetworkMap.
|
2025-07-23 16:03:58 +02:00
|
|
|
func (s *Scenario) WaitForTailscaleSyncWithPeerCount(peerCount int, timeout, retryInterval time.Duration) error {
|
|
|
|
|
var allErrors []error
|
|
|
|
|
|
2023-01-17 17:43:44 +01:00
|
|
|
for _, user := range s.users {
|
|
|
|
|
for _, client := range user.Clients {
|
2023-08-29 08:33:33 +02:00
|
|
|
c := client
|
|
|
|
|
user.syncWaitGroup.Go(func() error {
|
2025-07-23 16:03:58 +02:00
|
|
|
return c.WaitForPeers(peerCount, timeout, retryInterval)
|
2023-08-29 08:33:33 +02:00
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
if err := user.syncWaitGroup.Wait(); err != nil {
|
2025-07-23 16:03:58 +02:00
|
|
|
allErrors = append(allErrors, err)
|
2022-10-18 11:59:28 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-07-23 16:03:58 +02:00
|
|
|
if len(allErrors) > 0 {
|
|
|
|
|
return multierr.New(allErrors...)
|
|
|
|
|
}
|
2025-08-06 08:37:02 +02:00
|
|
|
|
2022-10-18 11:59:28 +02:00
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2025-03-21 11:49:32 +01:00
|
|
|
func (s *Scenario) CreateHeadscaleEnvWithLoginURL(
|
|
|
|
|
tsOpts []tsic.Option,
|
|
|
|
|
opts ...hsic.Option,
|
|
|
|
|
) error {
|
|
|
|
|
return s.createHeadscaleEnv(true, tsOpts, opts...)
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-08 15:10:03 +00:00
|
|
|
func (s *Scenario) CreateHeadscaleEnv(
|
2025-03-21 11:49:32 +01:00
|
|
|
tsOpts []tsic.Option,
|
|
|
|
|
opts ...hsic.Option,
|
|
|
|
|
) error {
|
|
|
|
|
return s.createHeadscaleEnv(false, tsOpts, opts...)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// CreateHeadscaleEnv starts the headscale environment and the clients
|
|
|
|
|
// according to the ScenarioSpec passed to the Scenario.
|
|
|
|
|
func (s *Scenario) createHeadscaleEnv(
|
|
|
|
|
withURL bool,
|
2022-11-08 15:10:03 +00:00
|
|
|
tsOpts []tsic.Option,
|
|
|
|
|
opts ...hsic.Option,
|
tags: process tags on registration, simplify policy (#2931)
This PR investigates, adds tests and aims to correctly implement Tailscale's model for how Tags should be accepted, assigned and used to identify nodes in the Tailscale access and ownership model.
When evaluating in Headscale's policy, Tags are now only checked against a nodes "tags" list, which defines the source of truth for all tags for a given node. This simplifies the code for dealing with tags greatly, and should help us have less access bugs related to nodes belonging to tags or users.
A node can either be owned by a user, or a tag.
Next, to ensure the tags list on the node is correctly implemented, we first add tests for every registration scenario and combination of user, pre auth key and pre auth key with tags with the same registration expectation as observed by trying them all with the Tailscale control server. This should ensure that we implement the correct behaviour and that it does not change or break over time.
Lastly, the missing parts of the auth has been added, or changed in the cases where it was wrong. This has in large parts allowed us to delete and simplify a lot of code.
Now, tags can only be changed when a node authenticates or if set via the CLI/API. Tags can only be fully overwritten/replaced and any use of either auth or CLI will replace the current set if different.
A user owned device can be converted to a tagged device, but it cannot be changed back. A tagged device can never remove the last tag either, it has to have a minimum of one.
2025-12-08 18:51:07 +01:00
|
|
|
) error {
|
2025-12-15 12:41:58 +00:00
|
|
|
return s.createHeadscaleEnvWithTags(withURL, tsOpts, nil, "", opts...)
|
tags: process tags on registration, simplify policy (#2931)
This PR investigates, adds tests and aims to correctly implement Tailscale's model for how Tags should be accepted, assigned and used to identify nodes in the Tailscale access and ownership model.
When evaluating in Headscale's policy, Tags are now only checked against a nodes "tags" list, which defines the source of truth for all tags for a given node. This simplifies the code for dealing with tags greatly, and should help us have less access bugs related to nodes belonging to tags or users.
A node can either be owned by a user, or a tag.
Next, to ensure the tags list on the node is correctly implemented, we first add tests for every registration scenario and combination of user, pre auth key and pre auth key with tags with the same registration expectation as observed by trying them all with the Tailscale control server. This should ensure that we implement the correct behaviour and that it does not change or break over time.
Lastly, the missing parts of the auth has been added, or changed in the cases where it was wrong. This has in large parts allowed us to delete and simplify a lot of code.
Now, tags can only be changed when a node authenticates or if set via the CLI/API. Tags can only be fully overwritten/replaced and any use of either auth or CLI will replace the current set if different.
A user owned device can be converted to a tagged device, but it cannot be changed back. A tagged device can never remove the last tag either, it has to have a minimum of one.
2025-12-08 18:51:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// createHeadscaleEnvWithTags starts the headscale environment and the clients
|
|
|
|
|
// according to the ScenarioSpec passed to the Scenario. If preAuthKeyTags is
|
|
|
|
|
// non-empty and withURL is false, the tags will be applied to the PreAuthKey
|
|
|
|
|
// (tags-as-identity model).
|
2025-12-15 12:41:58 +00:00
|
|
|
//
|
|
|
|
|
// For webauth (withURL=true), if webauthTagUser is non-empty and preAuthKeyTags
|
|
|
|
|
// is non-empty, only nodes belonging to that user will request tags via
|
|
|
|
|
// --advertise-tags. This is necessary because tagOwners ACL controls which
|
|
|
|
|
// users can request specific tags.
|
tags: process tags on registration, simplify policy (#2931)
This PR investigates, adds tests and aims to correctly implement Tailscale's model for how Tags should be accepted, assigned and used to identify nodes in the Tailscale access and ownership model.
When evaluating in Headscale's policy, Tags are now only checked against a nodes "tags" list, which defines the source of truth for all tags for a given node. This simplifies the code for dealing with tags greatly, and should help us have less access bugs related to nodes belonging to tags or users.
A node can either be owned by a user, or a tag.
Next, to ensure the tags list on the node is correctly implemented, we first add tests for every registration scenario and combination of user, pre auth key and pre auth key with tags with the same registration expectation as observed by trying them all with the Tailscale control server. This should ensure that we implement the correct behaviour and that it does not change or break over time.
Lastly, the missing parts of the auth has been added, or changed in the cases where it was wrong. This has in large parts allowed us to delete and simplify a lot of code.
Now, tags can only be changed when a node authenticates or if set via the CLI/API. Tags can only be fully overwritten/replaced and any use of either auth or CLI will replace the current set if different.
A user owned device can be converted to a tagged device, but it cannot be changed back. A tagged device can never remove the last tag either, it has to have a minimum of one.
2025-12-08 18:51:07 +01:00
|
|
|
func (s *Scenario) createHeadscaleEnvWithTags(
|
|
|
|
|
withURL bool,
|
|
|
|
|
tsOpts []tsic.Option,
|
|
|
|
|
preAuthKeyTags []string,
|
2025-12-15 12:41:58 +00:00
|
|
|
webauthTagUser string,
|
tags: process tags on registration, simplify policy (#2931)
This PR investigates, adds tests and aims to correctly implement Tailscale's model for how Tags should be accepted, assigned and used to identify nodes in the Tailscale access and ownership model.
When evaluating in Headscale's policy, Tags are now only checked against a nodes "tags" list, which defines the source of truth for all tags for a given node. This simplifies the code for dealing with tags greatly, and should help us have less access bugs related to nodes belonging to tags or users.
A node can either be owned by a user, or a tag.
Next, to ensure the tags list on the node is correctly implemented, we first add tests for every registration scenario and combination of user, pre auth key and pre auth key with tags with the same registration expectation as observed by trying them all with the Tailscale control server. This should ensure that we implement the correct behaviour and that it does not change or break over time.
Lastly, the missing parts of the auth has been added, or changed in the cases where it was wrong. This has in large parts allowed us to delete and simplify a lot of code.
Now, tags can only be changed when a node authenticates or if set via the CLI/API. Tags can only be fully overwritten/replaced and any use of either auth or CLI will replace the current set if different.
A user owned device can be converted to a tagged device, but it cannot be changed back. A tagged device can never remove the last tag either, it has to have a minimum of one.
2025-12-08 18:51:07 +01:00
|
|
|
opts ...hsic.Option,
|
2022-11-08 15:10:03 +00:00
|
|
|
) error {
|
2022-11-14 14:27:02 +01:00
|
|
|
headscale, err := s.Headscale(opts...)
|
2022-10-18 11:59:28 +02:00
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2025-03-21 11:49:32 +01:00
|
|
|
for _, user := range s.spec.Users {
|
2025-04-30 12:45:08 +03:00
|
|
|
u, err := s.CreateUser(user)
|
2022-10-18 11:59:28 +02:00
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2025-12-15 12:41:58 +00:00
|
|
|
var userOpts []tsic.Option
|
2025-03-21 11:49:32 +01:00
|
|
|
if s.userToNetwork != nil {
|
2025-12-15 12:41:58 +00:00
|
|
|
userOpts = append(tsOpts, tsic.WithNetwork(s.userToNetwork[user]))
|
2025-03-21 11:49:32 +01:00
|
|
|
} else {
|
2025-12-15 12:41:58 +00:00
|
|
|
userOpts = append(tsOpts, tsic.WithNetwork(s.networks[s.testDefaultNetwork]))
|
2025-03-21 11:49:32 +01:00
|
|
|
}
|
|
|
|
|
|
2025-12-15 12:41:58 +00:00
|
|
|
// For webauth with tags, only apply tags to the specified webauthTagUser
|
|
|
|
|
// (other users may not be authorized via tagOwners)
|
|
|
|
|
if withURL && webauthTagUser != "" && len(preAuthKeyTags) > 0 && user == webauthTagUser {
|
|
|
|
|
userOpts = append(userOpts, tsic.WithTags(preAuthKeyTags))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = s.CreateTailscaleNodesInUser(user, "all", s.spec.NodesPerUser, userOpts...)
|
2022-10-18 11:59:28 +02:00
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2025-03-21 11:49:32 +01:00
|
|
|
if withURL {
|
|
|
|
|
err = s.RunTailscaleUpWithURL(user, headscale.GetEndpoint())
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
} else {
|
tags: process tags on registration, simplify policy (#2931)
This PR investigates, adds tests and aims to correctly implement Tailscale's model for how Tags should be accepted, assigned and used to identify nodes in the Tailscale access and ownership model.
When evaluating in Headscale's policy, Tags are now only checked against a nodes "tags" list, which defines the source of truth for all tags for a given node. This simplifies the code for dealing with tags greatly, and should help us have less access bugs related to nodes belonging to tags or users.
A node can either be owned by a user, or a tag.
Next, to ensure the tags list on the node is correctly implemented, we first add tests for every registration scenario and combination of user, pre auth key and pre auth key with tags with the same registration expectation as observed by trying them all with the Tailscale control server. This should ensure that we implement the correct behaviour and that it does not change or break over time.
Lastly, the missing parts of the auth has been added, or changed in the cases where it was wrong. This has in large parts allowed us to delete and simplify a lot of code.
Now, tags can only be changed when a node authenticates or if set via the CLI/API. Tags can only be fully overwritten/replaced and any use of either auth or CLI will replace the current set if different.
A user owned device can be converted to a tagged device, but it cannot be changed back. A tagged device can never remove the last tag either, it has to have a minimum of one.
2025-12-08 18:51:07 +01:00
|
|
|
// Use tagged PreAuthKey if tags are provided (tags-as-identity model)
|
|
|
|
|
var key *v1.PreAuthKey
|
|
|
|
|
if len(preAuthKeyTags) > 0 {
|
|
|
|
|
key, err = s.CreatePreAuthKeyWithTags(u.GetId(), true, false, preAuthKeyTags)
|
|
|
|
|
} else {
|
|
|
|
|
key, err = s.CreatePreAuthKey(u.GetId(), true, false)
|
|
|
|
|
}
|
2025-03-21 11:49:32 +01:00
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = s.RunTailscaleUp(user, headscale.GetEndpoint(), key.GetKey())
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (s *Scenario) RunTailscaleUpWithURL(userStr, loginServer string) error {
|
|
|
|
|
log.Printf("running tailscale up for user %s", userStr)
|
|
|
|
|
if user, ok := s.users[userStr]; ok {
|
|
|
|
|
for _, client := range user.Clients {
|
|
|
|
|
tsc := client
|
|
|
|
|
user.joinWaitGroup.Go(func() error {
|
|
|
|
|
loginURL, err := tsc.LoginWithURL(loginServer)
|
|
|
|
|
if err != nil {
|
|
|
|
|
log.Printf("%s failed to run tailscale up: %s", tsc.Hostname(), err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
body, err := doLoginURL(tsc.Hostname(), loginURL)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If the URL is not a OIDC URL, then we need to
|
|
|
|
|
// run the register command to fully log in the client.
|
|
|
|
|
if !strings.Contains(loginURL.String(), "/oidc/") {
|
|
|
|
|
s.runHeadscaleRegister(userStr, body)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
log.Printf("client %s is ready", client.Hostname())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err := user.joinWaitGroup.Wait(); err != nil {
|
2022-10-18 11:59:28 +02:00
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2025-03-21 11:49:32 +01:00
|
|
|
for _, client := range user.Clients {
|
2025-07-23 16:03:58 +02:00
|
|
|
err := client.WaitForRunning(integrationutil.PeerSyncTimeout())
|
2025-03-21 11:49:32 +01:00
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf(
|
|
|
|
|
"%s tailscale node has not reached running: %w",
|
|
|
|
|
client.Hostname(),
|
|
|
|
|
err,
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return fmt.Errorf("failed to up tailscale node: %w", errNoUserAvailable)
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-04 07:18:51 +02:00
|
|
|
type debugJar struct {
|
|
|
|
|
inner *cookiejar.Jar
|
|
|
|
|
mu sync.RWMutex
|
|
|
|
|
store map[string]map[string]map[string]*http.Cookie // domain -> path -> name -> cookie
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func newDebugJar() (*debugJar, error) {
|
|
|
|
|
jar, err := cookiejar.New(nil)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
return &debugJar{
|
|
|
|
|
inner: jar,
|
|
|
|
|
store: make(map[string]map[string]map[string]*http.Cookie),
|
|
|
|
|
}, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (j *debugJar) SetCookies(u *url.URL, cookies []*http.Cookie) {
|
|
|
|
|
j.inner.SetCookies(u, cookies)
|
2025-03-21 11:49:32 +01:00
|
|
|
|
2025-11-04 07:18:51 +02:00
|
|
|
j.mu.Lock()
|
|
|
|
|
defer j.mu.Unlock()
|
|
|
|
|
|
|
|
|
|
for _, c := range cookies {
|
|
|
|
|
if c == nil || c.Name == "" {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
domain := c.Domain
|
|
|
|
|
if domain == "" {
|
|
|
|
|
domain = u.Hostname()
|
|
|
|
|
}
|
|
|
|
|
path := c.Path
|
|
|
|
|
if path == "" {
|
|
|
|
|
path = "/"
|
|
|
|
|
}
|
|
|
|
|
if _, ok := j.store[domain]; !ok {
|
|
|
|
|
j.store[domain] = make(map[string]map[string]*http.Cookie)
|
|
|
|
|
}
|
|
|
|
|
if _, ok := j.store[domain][path]; !ok {
|
|
|
|
|
j.store[domain][path] = make(map[string]*http.Cookie)
|
|
|
|
|
}
|
|
|
|
|
j.store[domain][path][c.Name] = copyCookie(c)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (j *debugJar) Cookies(u *url.URL) []*http.Cookie {
|
|
|
|
|
return j.inner.Cookies(u)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (j *debugJar) Dump(w io.Writer) {
|
|
|
|
|
j.mu.RLock()
|
|
|
|
|
defer j.mu.RUnlock()
|
|
|
|
|
|
|
|
|
|
for domain, paths := range j.store {
|
|
|
|
|
fmt.Fprintf(w, "Domain: %s\n", domain)
|
|
|
|
|
for path, byName := range paths {
|
|
|
|
|
fmt.Fprintf(w, " Path: %s\n", path)
|
|
|
|
|
for _, c := range byName {
|
|
|
|
|
fmt.Fprintf(
|
|
|
|
|
w, " %s=%s; Expires=%v; Secure=%v; HttpOnly=%v; SameSite=%v\n",
|
|
|
|
|
c.Name, c.Value, c.Expires, c.Secure, c.HttpOnly, c.SameSite,
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func copyCookie(c *http.Cookie) *http.Cookie {
|
|
|
|
|
cc := *c
|
|
|
|
|
return &cc
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func newLoginHTTPClient(hostname string) (*http.Client, error) {
|
2025-03-21 11:49:32 +01:00
|
|
|
hc := &http.Client{
|
2025-10-16 12:17:43 +02:00
|
|
|
Transport: LoggingRoundTripper{Hostname: hostname},
|
2025-03-21 11:49:32 +01:00
|
|
|
}
|
2025-11-04 07:18:51 +02:00
|
|
|
|
|
|
|
|
jar, err := newDebugJar()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("%s failed to create cookiejar: %w", hostname, err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
hc.Jar = jar
|
|
|
|
|
|
|
|
|
|
return hc, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// doLoginURL visits the given login URL and returns the body as a string.
|
|
|
|
|
func doLoginURL(hostname string, loginURL *url.URL) (string, error) {
|
|
|
|
|
log.Printf("%s login url: %s\n", hostname, loginURL.String())
|
|
|
|
|
|
|
|
|
|
hc, err := newLoginHTTPClient(hostname)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
body, _, err := doLoginURLWithClient(hostname, loginURL, hc, true)
|
2025-03-21 11:49:32 +01:00
|
|
|
if err != nil {
|
2025-11-04 07:18:51 +02:00
|
|
|
return "", err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return body, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// doLoginURLWithClient performs the login request using the provided HTTP client.
|
|
|
|
|
// When followRedirects is false, it will return the first redirect without following it.
|
|
|
|
|
func doLoginURLWithClient(hostname string, loginURL *url.URL, hc *http.Client, followRedirects bool) (
|
|
|
|
|
string,
|
|
|
|
|
*url.URL,
|
|
|
|
|
error,
|
|
|
|
|
) {
|
|
|
|
|
if hc == nil {
|
|
|
|
|
return "", nil, fmt.Errorf("%s http client is nil", hostname)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if loginURL == nil {
|
|
|
|
|
return "", nil, fmt.Errorf("%s login url is nil", hostname)
|
2025-03-21 11:49:32 +01:00
|
|
|
}
|
|
|
|
|
|
2025-10-16 12:17:43 +02:00
|
|
|
log.Printf("%s logging in with url: %s", hostname, loginURL.String())
|
2025-03-21 11:49:32 +01:00
|
|
|
ctx := context.Background()
|
2025-11-04 07:18:51 +02:00
|
|
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, loginURL.String(), nil)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return "", nil, fmt.Errorf("%s failed to create http request: %w", hostname, err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
originalRedirect := hc.CheckRedirect
|
|
|
|
|
if !followRedirects {
|
|
|
|
|
hc.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
|
|
|
|
return http.ErrUseLastResponse
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
defer func() {
|
|
|
|
|
hc.CheckRedirect = originalRedirect
|
|
|
|
|
}()
|
|
|
|
|
|
2025-03-21 11:49:32 +01:00
|
|
|
resp, err := hc.Do(req)
|
|
|
|
|
if err != nil {
|
2025-11-04 07:18:51 +02:00
|
|
|
return "", nil, fmt.Errorf("%s failed to send http request: %w", hostname, err)
|
|
|
|
|
}
|
|
|
|
|
defer resp.Body.Close()
|
|
|
|
|
|
|
|
|
|
bodyBytes, err := io.ReadAll(resp.Body)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return "", nil, fmt.Errorf("%s failed to read response body: %w", hostname, err)
|
2025-03-21 11:49:32 +01:00
|
|
|
}
|
2025-11-04 07:18:51 +02:00
|
|
|
body := string(bodyBytes)
|
2025-03-21 11:49:32 +01:00
|
|
|
|
2025-11-04 07:18:51 +02:00
|
|
|
var redirectURL *url.URL
|
|
|
|
|
if resp.StatusCode >= http.StatusMultipleChoices && resp.StatusCode < http.StatusBadRequest {
|
|
|
|
|
redirectURL, err = resp.Location()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return body, nil, fmt.Errorf("%s failed to resolve redirect location: %w", hostname, err)
|
|
|
|
|
}
|
|
|
|
|
}
|
2025-03-21 11:49:32 +01:00
|
|
|
|
2025-11-04 07:18:51 +02:00
|
|
|
if followRedirects && resp.StatusCode != http.StatusOK {
|
2025-03-21 11:49:32 +01:00
|
|
|
log.Printf("body: %s", body)
|
|
|
|
|
|
2025-11-04 07:18:51 +02:00
|
|
|
return body, redirectURL, fmt.Errorf("%s unexpected status code %d", hostname, resp.StatusCode)
|
2025-03-21 11:49:32 +01:00
|
|
|
}
|
|
|
|
|
|
2025-11-04 07:18:51 +02:00
|
|
|
if resp.StatusCode >= http.StatusBadRequest {
|
|
|
|
|
log.Printf("body: %s", body)
|
2025-03-21 11:49:32 +01:00
|
|
|
|
2025-11-04 07:18:51 +02:00
|
|
|
return body, redirectURL, fmt.Errorf("%s unexpected status code %d", hostname, resp.StatusCode)
|
|
|
|
|
}
|
2025-03-21 11:49:32 +01:00
|
|
|
|
2025-11-04 07:18:51 +02:00
|
|
|
if hc.Jar != nil {
|
|
|
|
|
if jar, ok := hc.Jar.(*debugJar); ok {
|
|
|
|
|
jar.Dump(os.Stdout)
|
|
|
|
|
} else {
|
|
|
|
|
log.Printf("cookies: %+v", hc.Jar.Cookies(loginURL))
|
|
|
|
|
}
|
2025-03-21 11:49:32 +01:00
|
|
|
}
|
|
|
|
|
|
2025-11-04 07:18:51 +02:00
|
|
|
return body, redirectURL, nil
|
2025-03-21 11:49:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var errParseAuthPage = errors.New("failed to parse auth page")
|
|
|
|
|
|
|
|
|
|
func (s *Scenario) runHeadscaleRegister(userStr string, body string) error {
|
|
|
|
|
// see api.go HTML template
|
|
|
|
|
codeSep := strings.Split(string(body), "</code>")
|
|
|
|
|
if len(codeSep) != 2 {
|
|
|
|
|
return errParseAuthPage
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
keySep := strings.Split(codeSep[0], "key ")
|
|
|
|
|
if len(keySep) != 2 {
|
|
|
|
|
return errParseAuthPage
|
|
|
|
|
}
|
|
|
|
|
key := keySep[1]
|
2025-05-21 04:18:53 -05:00
|
|
|
key = strings.SplitN(key, " ", 2)[0]
|
2025-03-21 11:49:32 +01:00
|
|
|
log.Printf("registering node %s", key)
|
|
|
|
|
|
|
|
|
|
if headscale, err := s.Headscale(); err == nil {
|
|
|
|
|
_, err = headscale.Execute(
|
|
|
|
|
[]string{"headscale", "nodes", "register", "--user", userStr, "--key", key},
|
|
|
|
|
)
|
2022-10-18 11:59:28 +02:00
|
|
|
if err != nil {
|
2025-03-21 11:49:32 +01:00
|
|
|
log.Printf("failed to register node: %s", err)
|
|
|
|
|
|
2022-10-18 11:59:28 +02:00
|
|
|
return err
|
|
|
|
|
}
|
2025-03-21 11:49:32 +01:00
|
|
|
|
|
|
|
|
return nil
|
2022-10-18 11:59:28 +02:00
|
|
|
}
|
|
|
|
|
|
2025-03-21 11:49:32 +01:00
|
|
|
return fmt.Errorf("failed to find headscale: %w", errNoHeadscaleAvailable)
|
|
|
|
|
}
|
|
|
|
|
|
2025-10-16 12:17:43 +02:00
|
|
|
type LoggingRoundTripper struct {
|
|
|
|
|
Hostname string
|
|
|
|
|
}
|
2025-03-21 11:49:32 +01:00
|
|
|
|
|
|
|
|
func (t LoggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
|
|
|
|
noTls := &http.Transport{
|
|
|
|
|
TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // nolint
|
|
|
|
|
}
|
|
|
|
|
resp, err := noTls.RoundTrip(req)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
2025-10-16 12:17:43 +02:00
|
|
|
log.Printf(`
|
|
|
|
|
---
|
|
|
|
|
%s - method: %s | url: %s
|
|
|
|
|
%s - status: %d | cookies: %+v
|
|
|
|
|
---
|
|
|
|
|
`, t.Hostname, req.Method, req.URL.String(), t.Hostname, resp.StatusCode, resp.Cookies())
|
2025-03-21 11:49:32 +01:00
|
|
|
|
|
|
|
|
return resp, nil
|
2022-10-18 11:59:28 +02:00
|
|
|
}
|
|
|
|
|
|
2023-02-03 12:24:27 +01:00
|
|
|
// GetIPs returns all netip.Addr of TailscaleClients associated with a User
|
|
|
|
|
// in a Scenario.
|
2023-01-17 17:43:44 +01:00
|
|
|
func (s *Scenario) GetIPs(user string) ([]netip.Addr, error) {
|
2022-10-14 12:17:59 +02:00
|
|
|
var ips []netip.Addr
|
2023-01-17 17:43:44 +01:00
|
|
|
if ns, ok := s.users[user]; ok {
|
2022-10-14 12:17:59 +02:00
|
|
|
for _, client := range ns.Clients {
|
|
|
|
|
clientIps, err := client.IPs()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return ips, fmt.Errorf("failed to get ips: %w", err)
|
|
|
|
|
}
|
|
|
|
|
ips = append(ips, clientIps...)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return ips, nil
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-17 17:43:44 +01:00
|
|
|
return ips, fmt.Errorf("failed to get ips: %w", errNoUserAvailable)
|
2022-10-14 12:17:59 +02:00
|
|
|
}
|
2022-10-18 11:59:28 +02:00
|
|
|
|
2024-04-22 00:19:38 +08:00
|
|
|
// GetClients returns all TailscaleClients associated with a User in a Scenario.
|
2023-01-17 17:43:44 +01:00
|
|
|
func (s *Scenario) GetClients(user string) ([]TailscaleClient, error) {
|
2022-10-21 13:17:54 +02:00
|
|
|
var clients []TailscaleClient
|
2023-01-17 17:43:44 +01:00
|
|
|
if ns, ok := s.users[user]; ok {
|
2022-10-18 11:59:28 +02:00
|
|
|
for _, client := range ns.Clients {
|
|
|
|
|
clients = append(clients, client)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return clients, nil
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-17 17:43:44 +01:00
|
|
|
return clients, fmt.Errorf("failed to get clients: %w", errNoUserAvailable)
|
2022-10-18 11:59:28 +02:00
|
|
|
}
|
2022-10-23 12:41:35 +02:00
|
|
|
|
2023-02-03 12:24:27 +01:00
|
|
|
// ListTailscaleClients returns a list of TailscaleClients given the Users
|
|
|
|
|
// passed as parameters.
|
2023-01-17 17:43:44 +01:00
|
|
|
func (s *Scenario) ListTailscaleClients(users ...string) ([]TailscaleClient, error) {
|
2022-10-23 12:41:35 +02:00
|
|
|
var allClients []TailscaleClient
|
|
|
|
|
|
2023-01-17 17:43:44 +01:00
|
|
|
if len(users) == 0 {
|
|
|
|
|
users = s.Users()
|
2022-10-23 12:41:35 +02:00
|
|
|
}
|
|
|
|
|
|
2023-01-17 17:43:44 +01:00
|
|
|
for _, user := range users {
|
|
|
|
|
clients, err := s.GetClients(user)
|
2022-10-23 12:41:35 +02:00
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
allClients = append(allClients, clients...)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return allClients, nil
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-03 12:24:27 +01:00
|
|
|
// FindTailscaleClientByIP returns a TailscaleClient associated with an IP address
|
|
|
|
|
// if it exists.
|
2023-02-02 16:05:52 +01:00
|
|
|
func (s *Scenario) FindTailscaleClientByIP(ip netip.Addr) (TailscaleClient, error) {
|
|
|
|
|
clients, err := s.ListTailscaleClients()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, client := range clients {
|
|
|
|
|
ips, _ := client.IPs()
|
2025-12-01 19:40:25 +01:00
|
|
|
if slices.Contains(ips, ip) {
|
|
|
|
|
return client, nil
|
2023-02-02 16:05:52 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil, errNoClientFound
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-03 12:24:27 +01:00
|
|
|
// ListTailscaleClientsIPs returns a list of netip.Addr based on Users
|
|
|
|
|
// passed as parameters.
|
2023-01-17 17:43:44 +01:00
|
|
|
func (s *Scenario) ListTailscaleClientsIPs(users ...string) ([]netip.Addr, error) {
|
2022-10-23 12:41:35 +02:00
|
|
|
var allIps []netip.Addr
|
|
|
|
|
|
2023-01-17 17:43:44 +01:00
|
|
|
if len(users) == 0 {
|
|
|
|
|
users = s.Users()
|
2022-10-23 12:41:35 +02:00
|
|
|
}
|
|
|
|
|
|
2023-01-17 17:43:44 +01:00
|
|
|
for _, user := range users {
|
|
|
|
|
ips, err := s.GetIPs(user)
|
2022-10-23 12:41:35 +02:00
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
allIps = append(allIps, ips...)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return allIps, nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-04-22 00:19:38 +08:00
|
|
|
// ListTailscaleClientsFQDNs returns a list of FQDN based on Users
|
2023-02-03 12:24:27 +01:00
|
|
|
// passed as parameters.
|
2023-01-17 17:43:44 +01:00
|
|
|
func (s *Scenario) ListTailscaleClientsFQDNs(users ...string) ([]string, error) {
|
2022-10-23 12:41:35 +02:00
|
|
|
allFQDNs := make([]string, 0)
|
|
|
|
|
|
2023-01-17 17:43:44 +01:00
|
|
|
clients, err := s.ListTailscaleClients(users...)
|
2022-10-23 12:41:35 +02:00
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, client := range clients {
|
|
|
|
|
fqdn, err := client.FQDN()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
allFQDNs = append(allFQDNs, fqdn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return allFQDNs, nil
|
|
|
|
|
}
|
2022-12-22 15:24:37 +00:00
|
|
|
|
2023-02-03 12:24:27 +01:00
|
|
|
// WaitForTailscaleLogout blocks execution until all TailscaleClients have
|
|
|
|
|
// logged out of the ControlServer.
|
2023-08-29 08:33:33 +02:00
|
|
|
func (s *Scenario) WaitForTailscaleLogout() error {
|
2023-01-17 17:43:44 +01:00
|
|
|
for _, user := range s.users {
|
|
|
|
|
for _, client := range user.Clients {
|
2023-08-29 08:33:33 +02:00
|
|
|
c := client
|
|
|
|
|
user.syncWaitGroup.Go(func() error {
|
2025-07-23 16:03:58 +02:00
|
|
|
return c.WaitForNeedsLogin(integrationutil.PeerSyncTimeout())
|
2023-08-29 08:33:33 +02:00
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
if err := user.syncWaitGroup.Wait(); err != nil {
|
|
|
|
|
return err
|
2022-12-22 15:24:37 +00:00
|
|
|
}
|
|
|
|
|
}
|
2023-08-29 08:33:33 +02:00
|
|
|
|
|
|
|
|
return nil
|
2022-12-22 15:24:37 +00:00
|
|
|
}
|
2024-11-22 20:23:05 +08:00
|
|
|
|
|
|
|
|
// CreateDERPServer creates a new DERP server in a container.
|
|
|
|
|
func (s *Scenario) CreateDERPServer(version string, opts ...dsic.Option) (*dsic.DERPServerInContainer, error) {
|
2025-03-21 11:49:32 +01:00
|
|
|
derp, err := dsic.New(s.pool, version, s.Networks(), opts...)
|
2024-11-22 20:23:05 +08:00
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("failed to create DERP server: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = derp.WaitForRunning()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("failed to reach DERP server: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
s.derpServers = append(s.derpServers, derp)
|
|
|
|
|
|
|
|
|
|
return derp, nil
|
|
|
|
|
}
|
2025-03-21 11:49:32 +01:00
|
|
|
|
|
|
|
|
type scenarioOIDC struct {
|
|
|
|
|
r *dockertest.Resource
|
|
|
|
|
cfg *types.OIDCConfig
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (o *scenarioOIDC) Issuer() string {
|
|
|
|
|
if o.cfg == nil {
|
|
|
|
|
panic("OIDC has not been created")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return o.cfg.Issuer
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (o *scenarioOIDC) ClientSecret() string {
|
|
|
|
|
if o.cfg == nil {
|
|
|
|
|
panic("OIDC has not been created")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return o.cfg.ClientSecret
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (o *scenarioOIDC) ClientID() string {
|
|
|
|
|
if o.cfg == nil {
|
|
|
|
|
panic("OIDC has not been created")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return o.cfg.ClientID
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
dockerContextPath = "../."
|
|
|
|
|
hsicOIDCMockHashLength = 6
|
|
|
|
|
defaultAccessTTL = 10 * time.Minute
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
var errStatusCodeNotOK = errors.New("status code not OK")
|
|
|
|
|
|
|
|
|
|
func (s *Scenario) runMockOIDC(accessTTL time.Duration, users []mockoidc.MockUser) error {
|
|
|
|
|
port, err := dockertestutil.RandomFreeHostPort()
|
|
|
|
|
if err != nil {
|
|
|
|
|
log.Fatalf("could not find an open port: %s", err)
|
|
|
|
|
}
|
|
|
|
|
portNotation := fmt.Sprintf("%d/tcp", port)
|
|
|
|
|
|
|
|
|
|
hash, _ := util.GenerateRandomStringDNSSafe(hsicOIDCMockHashLength)
|
|
|
|
|
|
2025-07-10 23:38:55 +02:00
|
|
|
hostname := "hs-oidcmock-" + hash
|
2025-03-21 11:49:32 +01:00
|
|
|
|
|
|
|
|
usersJSON, err := json.Marshal(users)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mockOidcOptions := &dockertest.RunOptions{
|
|
|
|
|
Name: hostname,
|
|
|
|
|
Cmd: []string{"headscale", "mockoidc"},
|
|
|
|
|
ExposedPorts: []string{portNotation},
|
|
|
|
|
PortBindings: map[docker.Port][]docker.PortBinding{
|
|
|
|
|
docker.Port(portNotation): {{HostPort: strconv.Itoa(port)}},
|
|
|
|
|
},
|
|
|
|
|
Networks: s.Networks(),
|
|
|
|
|
Env: []string{
|
2025-07-10 23:38:55 +02:00
|
|
|
"MOCKOIDC_ADDR=" + hostname,
|
2025-03-21 11:49:32 +01:00
|
|
|
fmt.Sprintf("MOCKOIDC_PORT=%d", port),
|
|
|
|
|
"MOCKOIDC_CLIENT_ID=superclient",
|
|
|
|
|
"MOCKOIDC_CLIENT_SECRET=supersecret",
|
2025-07-10 23:38:55 +02:00
|
|
|
"MOCKOIDC_ACCESS_TTL=" + accessTTL.String(),
|
|
|
|
|
"MOCKOIDC_USERS=" + string(usersJSON),
|
2025-03-21 11:49:32 +01:00
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
headscaleBuildOptions := &dockertest.BuildOptions{
|
|
|
|
|
Dockerfile: hsic.IntegrationTestDockerFileName,
|
|
|
|
|
ContextDir: dockerContextPath,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = s.pool.RemoveContainerByName(hostname)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
s.mockOIDC = scenarioOIDC{}
|
|
|
|
|
|
2025-06-23 13:43:14 +02:00
|
|
|
// Add integration test labels if running under hi tool
|
|
|
|
|
dockertestutil.DockerAddIntegrationLabels(mockOidcOptions, "oidc")
|
2025-07-10 23:38:55 +02:00
|
|
|
|
2025-03-21 11:49:32 +01:00
|
|
|
if pmockoidc, err := s.pool.BuildAndRunWithBuildOptions(
|
|
|
|
|
headscaleBuildOptions,
|
|
|
|
|
mockOidcOptions,
|
|
|
|
|
dockertestutil.DockerRestartPolicy); err == nil {
|
|
|
|
|
s.mockOIDC.r = pmockoidc
|
|
|
|
|
} else {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// headscale needs to set up the provider with a specific
|
|
|
|
|
// IP addr to ensure we get the correct config from the well-known
|
|
|
|
|
// endpoint.
|
|
|
|
|
network := s.Networks()[0]
|
|
|
|
|
ipAddr := s.mockOIDC.r.GetIPInNetwork(network)
|
|
|
|
|
|
|
|
|
|
log.Println("Waiting for headscale mock oidc to be ready for tests")
|
|
|
|
|
hostEndpoint := net.JoinHostPort(ipAddr, strconv.Itoa(port))
|
|
|
|
|
|
|
|
|
|
if err := s.pool.Retry(func() error {
|
|
|
|
|
oidcConfigURL := fmt.Sprintf("http://%s/oidc/.well-known/openid-configuration", hostEndpoint)
|
|
|
|
|
httpClient := &http.Client{}
|
|
|
|
|
ctx := context.Background()
|
|
|
|
|
req, _ := http.NewRequestWithContext(ctx, http.MethodGet, oidcConfigURL, nil)
|
|
|
|
|
resp, err := httpClient.Do(req)
|
|
|
|
|
if err != nil {
|
|
|
|
|
log.Printf("headscale mock OIDC tests is not ready: %s\n", err)
|
|
|
|
|
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
defer resp.Body.Close()
|
|
|
|
|
|
|
|
|
|
if resp.StatusCode != http.StatusOK {
|
|
|
|
|
return errStatusCodeNotOK
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
s.mockOIDC.cfg = &types.OIDCConfig{
|
|
|
|
|
Issuer: fmt.Sprintf(
|
|
|
|
|
"http://%s/oidc",
|
|
|
|
|
hostEndpoint,
|
|
|
|
|
),
|
|
|
|
|
ClientID: "superclient",
|
|
|
|
|
ClientSecret: "supersecret",
|
|
|
|
|
OnlyStartIfOIDCIsAvailable: true,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
log.Printf("headscale mock oidc is ready for tests at %s", hostEndpoint)
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type extraServiceFunc func(*Scenario, string) (*dockertest.Resource, error)
|
|
|
|
|
|
|
|
|
|
func Webservice(s *Scenario, networkName string) (*dockertest.Resource, error) {
|
|
|
|
|
// port, err := dockertestutil.RandomFreeHostPort()
|
|
|
|
|
// if err != nil {
|
|
|
|
|
// log.Fatalf("could not find an open port: %s", err)
|
|
|
|
|
// }
|
|
|
|
|
// portNotation := fmt.Sprintf("%d/tcp", port)
|
|
|
|
|
|
|
|
|
|
hash := util.MustGenerateRandomStringDNSSafe(hsicOIDCMockHashLength)
|
|
|
|
|
|
2025-07-10 23:38:55 +02:00
|
|
|
hostname := "hs-webservice-" + hash
|
2025-03-21 11:49:32 +01:00
|
|
|
|
2025-04-30 08:54:04 +03:00
|
|
|
network, ok := s.networks[s.prefixedNetworkName(networkName)]
|
2025-03-21 11:49:32 +01:00
|
|
|
if !ok {
|
|
|
|
|
return nil, fmt.Errorf("network does not exist: %s", networkName)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
webOpts := &dockertest.RunOptions{
|
|
|
|
|
Name: hostname,
|
|
|
|
|
Cmd: []string{"/bin/sh", "-c", "cd / ; python3 -m http.server --bind :: 80"},
|
|
|
|
|
// ExposedPorts: []string{portNotation},
|
|
|
|
|
// PortBindings: map[docker.Port][]docker.PortBinding{
|
|
|
|
|
// docker.Port(portNotation): {{HostPort: strconv.Itoa(port)}},
|
|
|
|
|
// },
|
|
|
|
|
Networks: []*dockertest.Network{network},
|
|
|
|
|
Env: []string{},
|
|
|
|
|
}
|
|
|
|
|
|
2025-06-23 13:43:14 +02:00
|
|
|
// Add integration test labels if running under hi tool
|
|
|
|
|
dockertestutil.DockerAddIntegrationLabels(webOpts, "web")
|
|
|
|
|
|
2025-03-21 11:49:32 +01:00
|
|
|
webBOpts := &dockertest.BuildOptions{
|
|
|
|
|
Dockerfile: hsic.IntegrationTestDockerFileName,
|
|
|
|
|
ContextDir: dockerContextPath,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
web, err := s.pool.BuildAndRunWithBuildOptions(
|
|
|
|
|
webBOpts,
|
|
|
|
|
webOpts,
|
|
|
|
|
dockertestutil.DockerRestartPolicy)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// headscale needs to set up the provider with a specific
|
|
|
|
|
// IP addr to ensure we get the correct config from the well-known
|
|
|
|
|
// endpoint.
|
|
|
|
|
// ipAddr := web.GetIPInNetwork(network)
|
|
|
|
|
|
|
|
|
|
// log.Println("Waiting for headscale mock oidc to be ready for tests")
|
|
|
|
|
// hostEndpoint := net.JoinHostPort(ipAddr, strconv.Itoa(port))
|
|
|
|
|
|
|
|
|
|
// if err := s.pool.Retry(func() error {
|
|
|
|
|
// oidcConfigURL := fmt.Sprintf("http://%s/etc/hostname", hostEndpoint)
|
|
|
|
|
// httpClient := &http.Client{}
|
|
|
|
|
// ctx := context.Background()
|
|
|
|
|
// req, _ := http.NewRequestWithContext(ctx, http.MethodGet, oidcConfigURL, nil)
|
|
|
|
|
// resp, err := httpClient.Do(req)
|
|
|
|
|
// if err != nil {
|
|
|
|
|
// log.Printf("headscale mock OIDC tests is not ready: %s\n", err)
|
|
|
|
|
|
|
|
|
|
// return err
|
|
|
|
|
// }
|
|
|
|
|
// defer resp.Body.Close()
|
|
|
|
|
|
|
|
|
|
// if resp.StatusCode != http.StatusOK {
|
|
|
|
|
// return errStatusCodeNotOK
|
|
|
|
|
// }
|
|
|
|
|
|
|
|
|
|
// return nil
|
|
|
|
|
// }); err != nil {
|
|
|
|
|
// return err
|
|
|
|
|
// }
|
|
|
|
|
|
|
|
|
|
return web, nil
|
|
|
|
|
}
|