mirror of
https://github.com/juanfont/headscale.git
synced 2024-11-23 10:05:19 +00:00
fix gosum merge
This commit is contained in:
commit
a806694d23
@ -3,6 +3,7 @@
|
||||
// development
|
||||
integration_test.go
|
||||
integration_test/
|
||||
!integration_test/etc_embedded_derp/tls/server.crt
|
||||
|
||||
Dockerfile*
|
||||
docker-compose*
|
||||
|
@ -19,6 +19,7 @@
|
||||
- Users can now use emails in ACL's groups [#372](https://github.com/juanfont/headscale/issues/372)
|
||||
- Add shorthand aliases for commands and subcommands [#376](https://github.com/juanfont/headscale/pull/376)
|
||||
- Add `/windows` endpoint for Windows configuration instructions + registry file download [#392](https://github.com/juanfont/headscale/pull/392)
|
||||
- Added embedded DERP server into Headscale [#388](https://github.com/juanfont/headscale/pull/388)
|
||||
|
||||
### Changes
|
||||
|
||||
|
@ -7,5 +7,10 @@ RUN apt-get update \
|
||||
&& curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.gpg | apt-key add - \
|
||||
&& curl -fsSL https://pkgs.tailscale.com/stable/ubuntu/focal.list | tee /etc/apt/sources.list.d/tailscale.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y tailscale=${TAILSCALE_VERSION} dnsutils \
|
||||
&& apt-get install -y ca-certificates tailscale=${TAILSCALE_VERSION} dnsutils \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ADD integration_test/etc_embedded_derp/tls/server.crt /usr/local/share/ca-certificates/
|
||||
RUN chmod 644 /usr/local/share/ca-certificates/server.crt
|
||||
|
||||
RUN update-ca-certificates
|
||||
|
3
Makefile
3
Makefile
@ -23,6 +23,9 @@ test_integration:
|
||||
test_integration_cli:
|
||||
go test -tags integration -v integration_cli_test.go integration_common_test.go
|
||||
|
||||
test_integration_derp:
|
||||
go test -tags integration -v integration_embedded_derp_test.go integration_common_test.go
|
||||
|
||||
coverprofile_func:
|
||||
go tool cover -func=coverage.out
|
||||
|
||||
|
34
README.md
34
README.md
@ -63,6 +63,7 @@ one of the maintainers.
|
||||
- Dual stack (IPv4 and IPv6)
|
||||
- Routing advertising (including exit nodes)
|
||||
- Ephemeral nodes
|
||||
- Embedded [DERP server](https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp)
|
||||
|
||||
## Client OS support
|
||||
|
||||
@ -194,6 +195,15 @@ make build
|
||||
<sub style="font-size:14px"><b>ohdearaugustin</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/e-zk>
|
||||
<img src=https://avatars.githubusercontent.com/u/58356365?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=e-zk/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>e-zk</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/arch4ngel>
|
||||
<img src=https://avatars.githubusercontent.com/u/11574161?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Justin Angel/>
|
||||
@ -201,8 +211,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Justin Angel</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ItalyPaleAle>
|
||||
<img src=https://avatars.githubusercontent.com/u/43508?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Alessandro (Ale) Segala/>
|
||||
@ -210,13 +218,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Alessandro (Ale) Segala</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/e-zk>
|
||||
<img src=https://avatars.githubusercontent.com/u/58356365?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=e-zk/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>e-zk</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/unreality>
|
||||
<img src=https://avatars.githubusercontent.com/u/352522?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=unreality/>
|
||||
@ -393,6 +394,13 @@ make build
|
||||
<sub style="font-size:14px"><b>rcursaru</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/renovate-bot>
|
||||
<img src=https://avatars.githubusercontent.com/u/25180681?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=WhiteSource Renovate/>
|
||||
<br />
|
||||
<sub style="font-size:14px"><b>WhiteSource Renovate</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ryanfowler>
|
||||
<img src=https://avatars.githubusercontent.com/u/2668821?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Ryan Fowler/>
|
||||
@ -414,6 +422,8 @@ make build
|
||||
<sub style="font-size:14px"><b>Tanner</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/Teteros>
|
||||
<img src=https://avatars.githubusercontent.com/u/5067989?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=Teteros/>
|
||||
@ -421,8 +431,6 @@ make build
|
||||
<sub style="font-size:14px"><b>Teteros</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/gitter-badger>
|
||||
<img src=https://avatars.githubusercontent.com/u/8518239?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=The Gitter Badger/>
|
||||
@ -458,6 +466,8 @@ make build
|
||||
<sub style="font-size:14px"><b>ZiYuan</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/derelm>
|
||||
<img src=https://avatars.githubusercontent.com/u/465155?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=derelm/>
|
||||
@ -465,8 +475,6 @@ make build
|
||||
<sub style="font-size:14px"><b>derelm</b></sub>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center" style="word-wrap: break-word; width: 150.0; height: 150.0">
|
||||
<a href=https://github.com/ignoramous>
|
||||
<img src=https://avatars.githubusercontent.com/u/852289?v=4 width="100;" style="border-radius:50%;align-items:center;justify-content:center;overflow:hidden;padding-top:10px" alt=ignoramous/>
|
||||
|
27
api.go
27
api.go
@ -45,22 +45,21 @@ type registerWebAPITemplateConfig struct {
|
||||
}
|
||||
|
||||
var registerWebAPITemplate = template.Must(
|
||||
template.New("registerweb").Parse(`<html>
|
||||
template.New("registerweb").Parse(`
|
||||
<html>
|
||||
<head>
|
||||
<title>Registration - Headscale</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>headscale</h1>
|
||||
<p>
|
||||
Run the command below in the headscale server to add this machine to your network:
|
||||
</p>
|
||||
|
||||
<p>
|
||||
<code>
|
||||
<b>headscale -n NAMESPACE nodes register --key {{.Key}}</b>
|
||||
</code>
|
||||
</p>
|
||||
|
||||
<h1>headscale</h1>
|
||||
<h2>Machine registration</h2>
|
||||
<p>
|
||||
Run the command below in the headscale server to add this machine to your network:
|
||||
</p>
|
||||
<pre><code>headscale -n NAMESPACE nodes register --key {{.Key}}</code></pre>
|
||||
</body>
|
||||
</html>`),
|
||||
)
|
||||
</html>
|
||||
`))
|
||||
|
||||
// RegisterWebAPI shows a simple message in the browser to point to the CLI
|
||||
// Listens in /register.
|
||||
|
39
app.go
39
app.go
@ -120,10 +120,16 @@ type OIDCConfig struct {
|
||||
}
|
||||
|
||||
type DERPConfig struct {
|
||||
URLs []url.URL
|
||||
Paths []string
|
||||
AutoUpdate bool
|
||||
UpdateFrequency time.Duration
|
||||
ServerEnabled bool
|
||||
ServerRegionID int
|
||||
ServerRegionCode string
|
||||
ServerRegionName string
|
||||
STUNEnabled bool
|
||||
STUNAddr string
|
||||
URLs []url.URL
|
||||
Paths []string
|
||||
AutoUpdate bool
|
||||
UpdateFrequency time.Duration
|
||||
}
|
||||
|
||||
type CLIConfig struct {
|
||||
@ -142,7 +148,8 @@ type Headscale struct {
|
||||
dbDebug bool
|
||||
privateKey *key.MachinePrivate
|
||||
|
||||
DERPMap *tailcfg.DERPMap
|
||||
DERPMap *tailcfg.DERPMap
|
||||
DERPServer *DERPServer
|
||||
|
||||
aclPolicy *ACLPolicy
|
||||
aclRules []tailcfg.FilterRule
|
||||
@ -178,7 +185,6 @@ func LookupTLSClientAuthMode(mode string) (tls.ClientAuthType, bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// NewHeadscale returns the Headscale app.
|
||||
func NewHeadscale(cfg Config) (*Headscale, error) {
|
||||
privKey, err := readOrCreatePrivateKey(cfg.PrivateKeyPath)
|
||||
if err != nil {
|
||||
@ -239,6 +245,14 @@ func NewHeadscale(cfg Config) (*Headscale, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.DERP.ServerEnabled {
|
||||
embeddedDERPServer, err := app.NewDERPServer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
app.DERPServer = embeddedDERPServer
|
||||
}
|
||||
|
||||
return &app, nil
|
||||
}
|
||||
|
||||
@ -463,6 +477,12 @@ func (h *Headscale) createRouter(grpcMux *runtime.ServeMux) *gin.Engine {
|
||||
router.GET("/swagger", SwaggerUI)
|
||||
router.GET("/swagger/v1/openapiv2.json", SwaggerAPIv1)
|
||||
|
||||
if h.cfg.DERP.ServerEnabled {
|
||||
router.Any("/derp", h.DERPHandler)
|
||||
router.Any("/derp/probe", h.DERPProbeHandler)
|
||||
router.Any("/bootstrap-dns", h.DERPBootstrapDNSHandler)
|
||||
}
|
||||
|
||||
api := router.Group("/api")
|
||||
api.Use(h.httpAuthenticationMiddleware)
|
||||
{
|
||||
@ -481,6 +501,13 @@ func (h *Headscale) Serve() error {
|
||||
// Fetch an initial DERP Map before we start serving
|
||||
h.DERPMap = GetDERPMap(h.cfg.DERP)
|
||||
|
||||
if h.cfg.DERP.ServerEnabled {
|
||||
h.DERPMap.Regions[h.DERPServer.region.RegionID] = &h.DERPServer.region
|
||||
if h.cfg.DERP.STUNEnabled {
|
||||
go h.ServeSTUN()
|
||||
}
|
||||
}
|
||||
|
||||
if h.cfg.DERP.AutoUpdate {
|
||||
derpMapCancelChannel := make(chan struct{})
|
||||
defer func() { derpMapCancelChannel <- struct{}{} }()
|
||||
|
@ -1,8 +1,7 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -19,12 +18,12 @@ var serveCmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
h, err := getHeadscaleApp()
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
log.Fatal().Caller().Err(err).Msg("Error initializing")
|
||||
}
|
||||
|
||||
err = h.Serve()
|
||||
if err != nil {
|
||||
log.Fatalf("Error initializing: %s", err)
|
||||
log.Fatal().Caller().Err(err).Msg("Error starting server")
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@ -117,6 +117,13 @@ func LoadConfig(path string) error {
|
||||
}
|
||||
|
||||
func GetDERPConfig() headscale.DERPConfig {
|
||||
serverEnabled := viper.GetBool("derp.server.enabled")
|
||||
serverRegionID := viper.GetInt("derp.server.region_id")
|
||||
serverRegionCode := viper.GetString("derp.server.region_code")
|
||||
serverRegionName := viper.GetString("derp.server.region_name")
|
||||
stunEnabled := viper.GetBool("derp.server.stun.enabled")
|
||||
stunAddr := viper.GetString("derp.server.stun.listen_addr")
|
||||
|
||||
urlStrs := viper.GetStringSlice("derp.urls")
|
||||
|
||||
urls := make([]url.URL, len(urlStrs))
|
||||
@ -138,10 +145,16 @@ func GetDERPConfig() headscale.DERPConfig {
|
||||
updateFrequency := viper.GetDuration("derp.update_frequency")
|
||||
|
||||
return headscale.DERPConfig{
|
||||
URLs: urls,
|
||||
Paths: paths,
|
||||
AutoUpdate: autoUpdate,
|
||||
UpdateFrequency: updateFrequency,
|
||||
ServerEnabled: serverEnabled,
|
||||
ServerRegionID: serverRegionID,
|
||||
ServerRegionCode: serverRegionCode,
|
||||
ServerRegionName: serverRegionName,
|
||||
STUNEnabled: stunEnabled,
|
||||
STUNAddr: stunAddr,
|
||||
URLs: urls,
|
||||
Paths: paths,
|
||||
AutoUpdate: autoUpdate,
|
||||
UpdateFrequency: updateFrequency,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -55,6 +55,26 @@ ip_prefixes:
|
||||
# headscale needs a list of DERP servers that can be presented
|
||||
# to the clients.
|
||||
derp:
|
||||
server:
|
||||
# If enabled, runs the embedded DERP server and merges it into the rest of the DERP config
|
||||
# The Headscale server_url defined above MUST be using https, DERP requires TLS to be in place
|
||||
enabled: false
|
||||
|
||||
# Region ID to use for the embedded DERP server.
|
||||
# The local DERP prevails if the region ID collides with other region ID coming from
|
||||
# the regular DERP config.
|
||||
region_id: 999
|
||||
|
||||
# Region code and name are displayed in the Tailscale UI to identify a DERP region
|
||||
region_code: "headscale"
|
||||
region_name: "Headscale Embedded DERP"
|
||||
|
||||
# If enabled, also listens in UDP at the configured address for STUN connections to help on NAT traversal
|
||||
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
|
||||
stun:
|
||||
enabled: false
|
||||
listen_addr: "0.0.0.0:3478"
|
||||
|
||||
# List of externally available DERP maps encoded in JSON
|
||||
urls:
|
||||
- https://controlplane.tailscale.com/derpmap/default
|
||||
|
1
derp.go
1
derp.go
@ -148,6 +148,7 @@ func (h *Headscale) scheduledDERPMapUpdateWorker(cancelChan <-chan struct{}) {
|
||||
case <-ticker.C:
|
||||
log.Info().Msg("Fetching DERPMap updates")
|
||||
h.DERPMap = GetDERPMap(h.cfg.DERP)
|
||||
h.DERPMap.Regions[h.DERPServer.region.RegionID] = &h.DERPServer.region
|
||||
|
||||
namespaces, err := h.ListNamespaces()
|
||||
if err != nil {
|
||||
|
233
derp_server.go
Normal file
233
derp_server.go
Normal file
@ -0,0 +1,233 @@
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/rs/zerolog/log"
|
||||
"tailscale.com/derp"
|
||||
"tailscale.com/net/stun"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
// fastStartHeader is the header (with value "1") that signals to the HTTP
|
||||
// server that the DERP HTTP client does not want the HTTP 101 response
|
||||
// headers and it will begin writing & reading the DERP protocol immediately
|
||||
// following its HTTP request.
|
||||
const fastStartHeader = "Derp-Fast-Start"
|
||||
|
||||
type DERPServer struct {
|
||||
tailscaleDERP *derp.Server
|
||||
region tailcfg.DERPRegion
|
||||
}
|
||||
|
||||
func (h *Headscale) NewDERPServer() (*DERPServer, error) {
|
||||
server := derp.NewServer(key.NodePrivate(*h.privateKey), log.Info().Msgf)
|
||||
region, err := h.generateRegionLocalDERP()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &DERPServer{server, region}, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) generateRegionLocalDERP() (tailcfg.DERPRegion, error) {
|
||||
serverURL, err := url.Parse(h.cfg.ServerURL)
|
||||
if err != nil {
|
||||
return tailcfg.DERPRegion{}, err
|
||||
}
|
||||
var host string
|
||||
var port int
|
||||
host, portStr, err := net.SplitHostPort(serverURL.Host)
|
||||
if err != nil {
|
||||
if serverURL.Scheme == "https" {
|
||||
host = serverURL.Host
|
||||
port = 443
|
||||
} else {
|
||||
host = serverURL.Host
|
||||
port = 80
|
||||
}
|
||||
} else {
|
||||
port, err = strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return tailcfg.DERPRegion{}, err
|
||||
}
|
||||
}
|
||||
|
||||
localDERPregion := tailcfg.DERPRegion{
|
||||
RegionID: h.cfg.DERP.ServerRegionID,
|
||||
RegionCode: h.cfg.DERP.ServerRegionCode,
|
||||
RegionName: h.cfg.DERP.ServerRegionName,
|
||||
Avoid: false,
|
||||
Nodes: []*tailcfg.DERPNode{
|
||||
{
|
||||
Name: fmt.Sprintf("%d", h.cfg.DERP.ServerRegionID),
|
||||
RegionID: h.cfg.DERP.ServerRegionID,
|
||||
HostName: host,
|
||||
DERPPort: port,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if h.cfg.DERP.STUNEnabled {
|
||||
_, portStr, err := net.SplitHostPort(h.cfg.DERP.STUNAddr)
|
||||
if err != nil {
|
||||
return tailcfg.DERPRegion{}, err
|
||||
}
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return tailcfg.DERPRegion{}, err
|
||||
}
|
||||
localDERPregion.Nodes[0].STUNPort = port
|
||||
}
|
||||
|
||||
return localDERPregion, nil
|
||||
}
|
||||
|
||||
func (h *Headscale) DERPHandler(ctx *gin.Context) {
|
||||
log.Trace().Caller().Msgf("/derp request from %v", ctx.ClientIP())
|
||||
up := strings.ToLower(ctx.Request.Header.Get("Upgrade"))
|
||||
if up != "websocket" && up != "derp" {
|
||||
if up != "" {
|
||||
log.Warn().Caller().Msgf("Weird websockets connection upgrade: %q", up)
|
||||
}
|
||||
ctx.String(http.StatusUpgradeRequired, "DERP requires connection upgrade")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
fastStart := ctx.Request.Header.Get(fastStartHeader) == "1"
|
||||
|
||||
hijacker, ok := ctx.Writer.(http.Hijacker)
|
||||
if !ok {
|
||||
log.Error().Caller().Msg("DERP requires Hijacker interface from Gin")
|
||||
ctx.String(http.StatusInternalServerError, "HTTP does not support general TCP support")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
netConn, conn, err := hijacker.Hijack()
|
||||
if err != nil {
|
||||
log.Error().Caller().Err(err).Msgf("Hijack failed")
|
||||
ctx.String(http.StatusInternalServerError, "HTTP does not support general TCP support")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
if !fastStart {
|
||||
pubKey := h.privateKey.Public()
|
||||
pubKeyStr := pubKey.UntypedHexString() // nolint
|
||||
fmt.Fprintf(conn, "HTTP/1.1 101 Switching Protocols\r\n"+
|
||||
"Upgrade: DERP\r\n"+
|
||||
"Connection: Upgrade\r\n"+
|
||||
"Derp-Version: %v\r\n"+
|
||||
"Derp-Public-Key: %s\r\n\r\n",
|
||||
derp.ProtocolVersion,
|
||||
pubKeyStr)
|
||||
}
|
||||
|
||||
h.DERPServer.tailscaleDERP.Accept(netConn, conn, netConn.RemoteAddr().String())
|
||||
}
|
||||
|
||||
// DERPProbeHandler is the endpoint that js/wasm clients hit to measure
|
||||
// DERP latency, since they can't do UDP STUN queries.
|
||||
func (h *Headscale) DERPProbeHandler(ctx *gin.Context) {
|
||||
switch ctx.Request.Method {
|
||||
case "HEAD", "GET":
|
||||
ctx.Writer.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
default:
|
||||
ctx.String(http.StatusMethodNotAllowed, "bogus probe method")
|
||||
}
|
||||
}
|
||||
|
||||
// DERPBootstrapDNSHandler implements the /bootsrap-dns endpoint
|
||||
// Described in https://github.com/tailscale/tailscale/issues/1405,
|
||||
// this endpoint provides a way to help a client when it fails to start up
|
||||
// because its DNS are broken.
|
||||
// The initial implementation is here https://github.com/tailscale/tailscale/pull/1406
|
||||
// They have a cache, but not clear if that is really necessary at Headscale, uh, scale.
|
||||
// An example implementation is found here https://derp.tailscale.com/bootstrap-dns
|
||||
func (h *Headscale) DERPBootstrapDNSHandler(ctx *gin.Context) {
|
||||
dnsEntries := make(map[string][]net.IP)
|
||||
|
||||
resolvCtx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||
defer cancel()
|
||||
var r net.Resolver
|
||||
for _, region := range h.DERPMap.Regions {
|
||||
for _, node := range region.Nodes { // we don't care if we override some nodes
|
||||
addrs, err := r.LookupIP(resolvCtx, "ip", node.HostName)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("bootstrap DNS lookup failed %q", node.HostName)
|
||||
|
||||
continue
|
||||
}
|
||||
dnsEntries[node.HostName] = addrs
|
||||
}
|
||||
}
|
||||
ctx.JSON(http.StatusOK, dnsEntries)
|
||||
}
|
||||
|
||||
// ServeSTUN starts a STUN server on the configured addr.
|
||||
func (h *Headscale) ServeSTUN() {
|
||||
packetConn, err := net.ListenPacket("udp", h.cfg.DERP.STUNAddr)
|
||||
if err != nil {
|
||||
log.Fatal().Msgf("failed to open STUN listener: %v", err)
|
||||
}
|
||||
log.Info().Msgf("STUN server started at %s", packetConn.LocalAddr())
|
||||
|
||||
udpConn, ok := packetConn.(*net.UDPConn)
|
||||
if !ok {
|
||||
log.Fatal().Msg("STUN listener is not a UDP listener")
|
||||
}
|
||||
serverSTUNListener(context.Background(), udpConn)
|
||||
}
|
||||
|
||||
func serverSTUNListener(ctx context.Context, packetConn *net.UDPConn) {
|
||||
var buf [64 << 10]byte
|
||||
var (
|
||||
bytesRead int
|
||||
udpAddr *net.UDPAddr
|
||||
err error
|
||||
)
|
||||
for {
|
||||
bytesRead, udpAddr, err = packetConn.ReadFromUDP(buf[:])
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
log.Error().Caller().Err(err).Msgf("STUN ReadFrom")
|
||||
time.Sleep(time.Second)
|
||||
|
||||
continue
|
||||
}
|
||||
log.Trace().Caller().Msgf("STUN request from %v", udpAddr)
|
||||
pkt := buf[:bytesRead]
|
||||
if !stun.Is(pkt) {
|
||||
log.Trace().Caller().Msgf("UDP packet is not STUN")
|
||||
|
||||
continue
|
||||
}
|
||||
txid, err := stun.ParseBindingRequest(pkt)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("STUN parse error")
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
res := stun.Response(txid, udpAddr.IP, uint16(udpAddr.Port))
|
||||
_, err = packetConn.WriteTo(res, udpAddr)
|
||||
if err != nil {
|
||||
log.Trace().Caller().Err(err).Msgf("Issue writing to UDP")
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
362
docs/proposals/001-acls.md
Normal file
362
docs/proposals/001-acls.md
Normal file
@ -0,0 +1,362 @@
|
||||
# ACLs
|
||||
|
||||
A key component of tailscale is the notion of Tailnet. This notion is hidden
|
||||
but the implications that it have on how to use tailscale are not.
|
||||
|
||||
For tailscale an [tailnet](https://tailscale.com/kb/1136/tailnet/) is the
|
||||
following:
|
||||
|
||||
> For personal users, you are a tailnet of many devices and one person. Each
|
||||
> device gets a private Tailscale IP address in the CGNAT range and every
|
||||
> device can talk directly to every other device, wherever they are on the
|
||||
> internet.
|
||||
>
|
||||
> For businesses and organizations, a tailnet is many devices and many users.
|
||||
> It can be based on your Microsoft Active Directory, your Google Workspace, a
|
||||
> GitHub organization, Okta tenancy, or other identity provider namespace. All
|
||||
> of the devices and users in your tailnet can be seen by the tailnet
|
||||
> administrators in the Tailscale admin console. There you can apply
|
||||
> tailnet-wide configuration, such as ACLs that affect visibility of devices
|
||||
> inside your tailnet, DNS settings, and more.
|
||||
|
||||
## Current implementation and issues
|
||||
|
||||
Currently in headscale, the namespaces are used both as tailnet and users. The
|
||||
issue is that if we want to use the ACL's we can't use both at the same time.
|
||||
|
||||
Tailnet's cannot communicate with each others. So we can't have an ACL that
|
||||
authorize tailnet (namespace) A to talk to tailnet (namespace) B.
|
||||
|
||||
We also can't write ACLs based on the users (namespaces in headscale) since all
|
||||
devices belong to the same user.
|
||||
|
||||
With the current implementation the only ACL that we can user is to associate
|
||||
each headscale IP to a host manually then write the ACLs according to this
|
||||
manual mapping.
|
||||
|
||||
```json
|
||||
{
|
||||
"hosts": {
|
||||
"host1": "100.64.0.1",
|
||||
"server": "100.64.0.2"
|
||||
},
|
||||
"acls": [
|
||||
{ "action": "accept", "users": ["host1"], "ports": ["host2:80,443"] }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
While this works, it requires a lot of manual editing on the configuration and
|
||||
to keep track of all devices IP address.
|
||||
|
||||
## Proposition for a next implementation
|
||||
|
||||
In order to ease the use of ACL's we need to split the tailnet and users
|
||||
notion.
|
||||
|
||||
A solution could be to consider a headscale server (in it's entirety) as a
|
||||
tailnet.
|
||||
|
||||
For personal users the default behavior could either allow all communications
|
||||
between all namespaces (like tailscale) or dissallow all communications between
|
||||
namespaces (current behavior).
|
||||
|
||||
For businesses and organisations, viewing a headscale instance a single tailnet
|
||||
would allow users (namespace) to talk to each other with the ACLs. As described
|
||||
in tailscale's documentation [[1]], a server should be tagged and personnal
|
||||
devices should be tied to a user. Translated in headscale's terms each user can
|
||||
have multiple devices and all those devices should be in the same namespace.
|
||||
The servers should be tagged and used as such.
|
||||
|
||||
This implementation would render useless the sharing feature that is currently
|
||||
implemented since an ACL could do the same. Simplifying to only one user
|
||||
interface to do one thing is easier and less confusing for the users.
|
||||
|
||||
To better suit the ACLs in this proposition, it's advised to consider that each
|
||||
namespaces belong to one person. This person can have multiple devices, they
|
||||
will all be considered as the same user in the ACLs. OIDC feature wouldn't need
|
||||
to map people to namespace, just create a namespace if the person isn't
|
||||
registered yet.
|
||||
|
||||
As a sidenote, users would like to write ACLs as YAML. We should offer users
|
||||
the ability to rules in either format (HuJSON or YAML).
|
||||
|
||||
[1]: https://tailscale.com/kb/1068/acl-tags/
|
||||
|
||||
## Example
|
||||
|
||||
Let's build an example use case for a small business (It may be the place where
|
||||
ACL's are the most useful).
|
||||
|
||||
We have a small company with a boss, an admin, two developper and an intern.
|
||||
|
||||
The boss should have access to all servers but not to the users hosts. Admin
|
||||
should also have access to all hosts except that their permissions should be
|
||||
limited to maintaining the hosts (for example purposes). The developers can do
|
||||
anything they want on dev hosts, but only watch on productions hosts. Intern
|
||||
can only interact with the development servers.
|
||||
|
||||
Each user have at least a device connected to the network and we have some
|
||||
servers.
|
||||
|
||||
- database.prod
|
||||
- database.dev
|
||||
- app-server1.prod
|
||||
- app-server1.dev
|
||||
- billing.internal
|
||||
|
||||
### Current headscale implementation
|
||||
|
||||
Let's create some namespaces
|
||||
|
||||
```bash
|
||||
headscale namespaces create prod
|
||||
headscale namespaces create dev
|
||||
headscale namespaces create internal
|
||||
headscale namespaces create users
|
||||
|
||||
headscale nodes register -n users boss-computer
|
||||
headscale nodes register -n users admin1-computer
|
||||
headscale nodes register -n users dev1-computer
|
||||
headscale nodes register -n users dev1-phone
|
||||
headscale nodes register -n users dev2-computer
|
||||
headscale nodes register -n users intern1-computer
|
||||
|
||||
headscale nodes register -n prod database
|
||||
headscale nodes register -n prod app-server1
|
||||
|
||||
headscale nodes register -n dev database
|
||||
headscale nodes register -n dev app-server1
|
||||
|
||||
headscale nodes register -n internal billing
|
||||
|
||||
headscale nodes list
|
||||
ID | Name | Namespace | IP address
|
||||
1 | boss-computer | users | 100.64.0.1
|
||||
2 | admin1-computer | users | 100.64.0.2
|
||||
3 | dev1-computer | users | 100.64.0.3
|
||||
4 | dev1-phone | users | 100.64.0.4
|
||||
5 | dev2-computer | users | 100.64.0.5
|
||||
6 | intern1-computer | users | 100.64.0.6
|
||||
7 | database | prod | 100.64.0.7
|
||||
8 | app-server1 | prod | 100.64.0.8
|
||||
9 | database | dev | 100.64.0.9
|
||||
10 | app-server1 | dev | 100.64.0.10
|
||||
11 | internal | internal | 100.64.0.11
|
||||
```
|
||||
|
||||
In order to only allow the communications related to our description above we
|
||||
need to add the following ACLs
|
||||
|
||||
```json
|
||||
{
|
||||
"hosts": {
|
||||
"boss-computer": "100.64.0.1",
|
||||
"admin1-computer": "100.64.0.2",
|
||||
"dev1-computer": "100.64.0.3",
|
||||
"dev1-phone": "100.64.0.4",
|
||||
"dev2-computer": "100.64.0.5",
|
||||
"intern1-computer": "100.64.0.6",
|
||||
"prod-app-server1": "100.64.0.8"
|
||||
},
|
||||
"groups": {
|
||||
"group:dev": ["dev1-computer", "dev1-phone", "dev2-computer"],
|
||||
"group:admin": ["admin1-computer"],
|
||||
"group:boss": ["boss-computer"],
|
||||
"group:intern": ["intern1-computer"]
|
||||
},
|
||||
"acls": [
|
||||
// boss have access to all servers but no users hosts
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["group:boss"],
|
||||
"ports": ["prod:*", "dev:*", "internal:*"]
|
||||
},
|
||||
|
||||
// admin have access to adminstration port (lets only consider port 22 here)
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["group:admin"],
|
||||
"ports": ["prod:22", "dev:22", "internal:22"]
|
||||
},
|
||||
|
||||
// dev can do anything on dev servers and check access on prod servers
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["group:dev"],
|
||||
"ports": ["dev:*", "prod-app-server1:80,443"]
|
||||
},
|
||||
|
||||
// interns only have access to port 80 and 443 on dev servers (lame internship)
|
||||
{ "action": "accept", "users": ["group:intern"], "ports": ["dev:80,443"] },
|
||||
|
||||
// users can access their own devices
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["dev1-computer"],
|
||||
"ports": ["dev1-phone:*"]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["dev1-phone"],
|
||||
"ports": ["dev1-computer:*"]
|
||||
},
|
||||
|
||||
// internal namespace communications should still be allowed within the namespace
|
||||
{ "action": "accept", "users": ["dev"], "ports": ["dev:*"] },
|
||||
{ "action": "accept", "users": ["prod"], "ports": ["prod:*"] },
|
||||
{ "action": "accept", "users": ["internal"], "ports": ["internal:*"] }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Since communications between namespace isn't possible we also have to share the
|
||||
devices between the namespaces.
|
||||
|
||||
```bash
|
||||
|
||||
// add boss host to prod, dev and internal network
|
||||
headscale nodes share -i 1 -n prod
|
||||
headscale nodes share -i 1 -n dev
|
||||
headscale nodes share -i 1 -n internal
|
||||
|
||||
// add admin computer to prod, dev and internal network
|
||||
headscale nodes share -i 2 -n prod
|
||||
headscale nodes share -i 2 -n dev
|
||||
headscale nodes share -i 2 -n internal
|
||||
|
||||
// add all dev to prod and dev network
|
||||
headscale nodes share -i 3 -n dev
|
||||
headscale nodes share -i 4 -n dev
|
||||
headscale nodes share -i 3 -n prod
|
||||
headscale nodes share -i 4 -n prod
|
||||
headscale nodes share -i 5 -n dev
|
||||
headscale nodes share -i 5 -n prod
|
||||
|
||||
headscale nodes share -i 6 -n dev
|
||||
```
|
||||
|
||||
This fake network have not been tested but it should work. Operating it could
|
||||
be quite tedious if the company grows. Each time a new user join we have to add
|
||||
it to a group, and share it to the correct namespaces. If the user want
|
||||
multiple devices we have to allow communication to each of them one by one. If
|
||||
business conduct a change in the organisations we may have to rewrite all acls
|
||||
and reorganise all namespaces.
|
||||
|
||||
If we add servers in production we should also update the ACLs to allow dev
|
||||
access to certain category of them (only app servers for example).
|
||||
|
||||
### example based on the proposition in this document
|
||||
|
||||
Let's create the namespaces
|
||||
|
||||
```bash
|
||||
headscale namespaces create boss
|
||||
headscale namespaces create admin1
|
||||
headscale namespaces create dev1
|
||||
headscale namespaces create dev2
|
||||
headscale namespaces create intern1
|
||||
```
|
||||
|
||||
We don't need to create namespaces for the servers because the servers will be
|
||||
tagged. When registering the servers we will need to add the flag
|
||||
`--advertised-tags=tag:<tag1>,tag:<tag2>`, and the user (namespace) that is
|
||||
registering the server should be allowed to do it. Since anyone can add tags to
|
||||
a server they can register, the check of the tags is done on headscale server
|
||||
and only valid tags are applied. A tag is valid if the namespace that is
|
||||
registering it is allowed to do it.
|
||||
|
||||
Here are the ACL's to implement the same permissions as above:
|
||||
|
||||
```json
|
||||
{
|
||||
// groups are simpler and only list the namespaces name
|
||||
"groups": {
|
||||
"group:boss": ["boss"],
|
||||
"group:dev": ["dev1", "dev2"],
|
||||
"group:admin": ["admin1"],
|
||||
"group:intern": ["intern1"]
|
||||
},
|
||||
"tagOwners": {
|
||||
// the administrators can add servers in production
|
||||
"tag:prod-databases": ["group:admin"],
|
||||
"tag:prod-app-servers": ["group:admin"],
|
||||
|
||||
// the boss can tag any server as internal
|
||||
"tag:internal": ["group:boss"],
|
||||
|
||||
// dev can add servers for dev purposes as well as admins
|
||||
"tag:dev-databases": ["group:admin", "group:dev"],
|
||||
"tag:dev-app-servers": ["group:admin", "group:dev"]
|
||||
|
||||
// interns cannot add servers
|
||||
},
|
||||
"acls": [
|
||||
// boss have access to all servers
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["group:boss"],
|
||||
"ports": [
|
||||
"tag:prod-databases:*",
|
||||
"tag:prod-app-servers:*",
|
||||
"tag:internal:*",
|
||||
"tag:dev-databases:*",
|
||||
"tag:dev-app-servers:*"
|
||||
]
|
||||
},
|
||||
|
||||
// admin have only access to administrative ports of the servers
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["group:admin"],
|
||||
"ports": [
|
||||
"tag:prod-databases:22",
|
||||
"tag:prod-app-servers:22",
|
||||
"tag:internal:22",
|
||||
"tag:dev-databases:22",
|
||||
"tag:dev-app-servers:22"
|
||||
]
|
||||
},
|
||||
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["group:dev"],
|
||||
"ports": [
|
||||
"tag:dev-databases:*",
|
||||
"tag:dev-app-servers:*",
|
||||
"tag:prod-app-servers:80,443"
|
||||
]
|
||||
},
|
||||
|
||||
// servers should be able to talk to database. Database should not be able to initiate connections to server
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["tag:dev-app-servers"],
|
||||
"ports": ["tag:dev-databases:5432"]
|
||||
},
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["tag:prod-app-servers"],
|
||||
"ports": ["tag:prod-databases:5432"]
|
||||
},
|
||||
|
||||
// interns have access to dev-app-servers only in reading mode
|
||||
{
|
||||
"action": "accept",
|
||||
"users": ["group:intern"],
|
||||
"ports": ["tag:dev-app-servers:80,443"]
|
||||
},
|
||||
|
||||
// we still have to allow internal namespaces communications since nothing guarantees that each user have their own namespaces. This could be talked over.
|
||||
{ "action": "accept", "users": ["boss"], "ports": ["boss:*"] },
|
||||
{ "action": "accept", "users": ["dev1"], "ports": ["dev1:*"] },
|
||||
{ "action": "accept", "users": ["dev2"], "ports": ["dev2:*"] },
|
||||
{ "action": "accept", "users": ["admin1"], "ports": ["admin1:*"] },
|
||||
{ "action": "accept", "users": ["intern1"], "ports": ["intern1:*"] }
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
With this implementation, the sharing step is not necessary. Maintenance cost
|
||||
of the ACL file is lower and less tedious (no need to map hostname and IP's
|
||||
into it).
|
4
go.mod
4
go.mod
@ -4,6 +4,7 @@ go 1.17
|
||||
|
||||
require (
|
||||
github.com/AlecAivazis/survey/v2 v2.3.2
|
||||
github.com/ccding/go-stun/stun v0.0.0-20200514191101-4dc67bcdb029
|
||||
github.com/coreos/go-oidc/v3 v3.1.0
|
||||
github.com/efekarakus/termcolor v1.0.1
|
||||
github.com/fatih/set v0.2.1
|
||||
@ -44,6 +45,7 @@ require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
|
||||
github.com/akutz/memconn v0.1.0 // indirect
|
||||
github.com/atomicgo/cursor v0.0.1 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.2 // indirect
|
||||
@ -93,6 +95,7 @@ require (
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/mitchellh/go-ps v1.0.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.3 // indirect
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
@ -127,6 +130,7 @@ require (
|
||||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9 // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
gopkg.in/ini.v1 v1.66.4 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
|
9
go.sum
9
go.sum
@ -75,6 +75,8 @@ github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/akutz/memconn v0.1.0 h1:NawI0TORU4hcOMsMr11g7vwlCdkYeLKXBcxWu2W/P8A=
|
||||
github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
@ -95,6 +97,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/ccding/go-stun/stun v0.0.0-20200514191101-4dc67bcdb029 h1:POmUHfxXdeyM8Aomg4tKDcwATCFuW+cYLkj6pwsw9pc=
|
||||
github.com/ccding/go-stun/stun v0.0.0-20200514191101-4dc67bcdb029/go.mod h1:Rpr5n9cGHYdM3S3IK8ROSUUUYjQOu+MSUCZDcJbYWi8=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
@ -185,6 +189,7 @@ github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYF
|
||||
github.com/fatih/set v0.2.1 h1:nn2CaJyknWE/6txyUDGwysr3G5QC6xWB/PtVjPBbeaA=
|
||||
github.com/fatih/set v0.2.1/go.mod h1:+RKtMCH+favT2+3YecHGxcc0b4KyVWA1QWWJUs4E0CI=
|
||||
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
||||
github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
|
||||
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
|
||||
@ -505,6 +510,8 @@ github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKju
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc=
|
||||
github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
@ -992,6 +999,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 h1:GZokNIeuVkl3aZHJchRrr13WCsols02MLUcz1U9is6M=
|
||||
golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
@ -6,6 +6,7 @@ package headscale
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ory/dockertest/v3"
|
||||
@ -18,8 +19,15 @@ const DOCKER_EXECUTE_TIMEOUT = 10 * time.Second
|
||||
var (
|
||||
IpPrefix4 = netaddr.MustParseIPPrefix("100.64.0.0/10")
|
||||
IpPrefix6 = netaddr.MustParseIPPrefix("fd7a:115c:a1e0::/48")
|
||||
|
||||
tailscaleVersions = []string{"1.22.0", "1.20.4", "1.18.2", "1.16.2", "1.14.3", "1.12.3"}
|
||||
)
|
||||
|
||||
type TestNamespace struct {
|
||||
count int
|
||||
tailscales map[string]dockertest.Resource
|
||||
}
|
||||
|
||||
type ExecuteCommandConfig struct {
|
||||
timeout time.Duration
|
||||
}
|
||||
@ -119,3 +127,35 @@ func DockerAllowNetworkAdministration(config *docker.HostConfig) {
|
||||
Target: "/dev/net/tun",
|
||||
})
|
||||
}
|
||||
|
||||
func getIPs(
|
||||
tailscales map[string]dockertest.Resource,
|
||||
) (map[string][]netaddr.IP, error) {
|
||||
ips := make(map[string][]netaddr.IP)
|
||||
for hostname, tailscale := range tailscales {
|
||||
command := []string{"tailscale", "ip"}
|
||||
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, address := range strings.Split(result, "\n") {
|
||||
address = strings.TrimSuffix(address, "\n")
|
||||
if len(address) < 1 {
|
||||
continue
|
||||
}
|
||||
ip, err := netaddr.ParseIP(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ips[hostname] = append(ips[hostname], ip)
|
||||
}
|
||||
}
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
396
integration_embedded_derp_test.go
Normal file
396
integration_embedded_derp_test.go
Normal file
@ -0,0 +1,396 @@
|
||||
//go:build integration
|
||||
|
||||
package headscale
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
|
||||
"github.com/ory/dockertest/v3"
|
||||
"github.com/ory/dockertest/v3/docker"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/suite"
|
||||
|
||||
"github.com/ccding/go-stun/stun"
|
||||
)
|
||||
|
||||
const (
|
||||
headscaleHostname = "headscale-derp"
|
||||
namespaceName = "derpnamespace"
|
||||
totalContainers = 3
|
||||
)
|
||||
|
||||
type IntegrationDERPTestSuite struct {
|
||||
suite.Suite
|
||||
stats *suite.SuiteInformation
|
||||
|
||||
pool dockertest.Pool
|
||||
networks map[int]dockertest.Network // so we keep the containers isolated
|
||||
headscale dockertest.Resource
|
||||
|
||||
tailscales map[string]dockertest.Resource
|
||||
joinWaitGroup sync.WaitGroup
|
||||
}
|
||||
|
||||
func TestDERPIntegrationTestSuite(t *testing.T) {
|
||||
s := new(IntegrationDERPTestSuite)
|
||||
|
||||
s.tailscales = make(map[string]dockertest.Resource)
|
||||
s.networks = make(map[int]dockertest.Network)
|
||||
|
||||
suite.Run(t, s)
|
||||
|
||||
// HandleStats, which allows us to check if we passed and save logs
|
||||
// is called after TearDown, so we cannot tear down containers before
|
||||
// we have potentially saved the logs.
|
||||
for _, tailscale := range s.tailscales {
|
||||
if err := s.pool.Purge(&tailscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !s.stats.Passed() {
|
||||
err := s.saveLog(&s.headscale, "test_output")
|
||||
if err != nil {
|
||||
log.Printf("Could not save log: %s\n", err)
|
||||
}
|
||||
}
|
||||
if err := s.pool.Purge(&s.headscale); err != nil {
|
||||
log.Printf("Could not purge resource: %s\n", err)
|
||||
}
|
||||
|
||||
for _, network := range s.networks {
|
||||
if err := network.Close(); err != nil {
|
||||
log.Printf("Could not close network: %s\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) SetupSuite() {
|
||||
if ppool, err := dockertest.NewPool(""); err == nil {
|
||||
s.pool = *ppool
|
||||
} else {
|
||||
log.Fatalf("Could not connect to docker: %s", err)
|
||||
}
|
||||
|
||||
for i := 0; i < totalContainers; i++ {
|
||||
if pnetwork, err := s.pool.CreateNetwork(fmt.Sprintf("headscale-derp-%d", i)); err == nil {
|
||||
s.networks[i] = *pnetwork
|
||||
} else {
|
||||
log.Fatalf("Could not create network: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
headscaleBuildOptions := &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile",
|
||||
ContextDir: ".",
|
||||
}
|
||||
|
||||
currentPath, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatalf("Could not determine current path: %s", err)
|
||||
}
|
||||
|
||||
headscaleOptions := &dockertest.RunOptions{
|
||||
Name: headscaleHostname,
|
||||
Mounts: []string{
|
||||
fmt.Sprintf("%s/integration_test/etc_embedded_derp:/etc/headscale", currentPath),
|
||||
},
|
||||
Cmd: []string{"headscale", "serve"},
|
||||
ExposedPorts: []string{"8443/tcp", "3478/udp"},
|
||||
PortBindings: map[docker.Port][]docker.PortBinding{
|
||||
"8443/tcp": {{HostPort: "8443"}},
|
||||
"3478/udp": {{HostPort: "3478"}},
|
||||
},
|
||||
}
|
||||
|
||||
log.Println("Creating headscale container")
|
||||
if pheadscale, err := s.pool.BuildAndRunWithBuildOptions(headscaleBuildOptions, headscaleOptions, DockerRestartPolicy); err == nil {
|
||||
s.headscale = *pheadscale
|
||||
} else {
|
||||
log.Fatalf("Could not start resource: %s", err)
|
||||
}
|
||||
log.Println("Created headscale container to test DERP")
|
||||
|
||||
log.Println("Creating tailscale containers")
|
||||
|
||||
for i := 0; i < totalContainers; i++ {
|
||||
version := tailscaleVersions[i%len(tailscaleVersions)]
|
||||
hostname, container := s.tailscaleContainer(
|
||||
fmt.Sprint(i),
|
||||
version,
|
||||
s.networks[i],
|
||||
)
|
||||
s.tailscales[hostname] = *container
|
||||
}
|
||||
|
||||
log.Println("Waiting for headscale to be ready")
|
||||
hostEndpoint := fmt.Sprintf("localhost:%s", s.headscale.GetPort("8443/tcp"))
|
||||
|
||||
if err := s.pool.Retry(func() error {
|
||||
url := fmt.Sprintf("https://%s/health", hostEndpoint)
|
||||
insecureTransport := http.DefaultTransport.(*http.Transport).Clone()
|
||||
insecureTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
client := &http.Client{Transport: insecureTransport}
|
||||
resp, err := client.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("status code not OK")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
// TODO(kradalby): If we cannot access headscale, or any other fatal error during
|
||||
// test setup, we need to abort and tear down. However, testify does not seem to
|
||||
// support that at the moment:
|
||||
// https://github.com/stretchr/testify/issues/849
|
||||
return // fmt.Errorf("Could not connect to headscale: %s", err)
|
||||
}
|
||||
log.Println("headscale container is ready")
|
||||
|
||||
log.Printf("Creating headscale namespace: %s\n", namespaceName)
|
||||
result, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{"headscale", "namespaces", "create", namespaceName},
|
||||
[]string{},
|
||||
)
|
||||
log.Println("headscale create namespace result: ", result)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
log.Printf("Creating pre auth key for %s\n", namespaceName)
|
||||
preAuthResult, err := ExecuteCommand(
|
||||
&s.headscale,
|
||||
[]string{
|
||||
"headscale",
|
||||
"--namespace",
|
||||
namespaceName,
|
||||
"preauthkeys",
|
||||
"create",
|
||||
"--reusable",
|
||||
"--expiration",
|
||||
"24h",
|
||||
"--output",
|
||||
"json",
|
||||
},
|
||||
[]string{"LOG_LEVEL=error"},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
|
||||
var preAuthKey v1.PreAuthKey
|
||||
err = json.Unmarshal([]byte(preAuthResult), &preAuthKey)
|
||||
assert.Nil(s.T(), err)
|
||||
assert.True(s.T(), preAuthKey.Reusable)
|
||||
|
||||
headscaleEndpoint := fmt.Sprintf("https://headscale:%s", s.headscale.GetPort("8443/tcp"))
|
||||
|
||||
log.Printf(
|
||||
"Joining tailscale containers to headscale at %s\n",
|
||||
headscaleEndpoint,
|
||||
)
|
||||
for hostname, tailscale := range s.tailscales {
|
||||
s.joinWaitGroup.Add(1)
|
||||
go s.Join(headscaleEndpoint, preAuthKey.Key, hostname, tailscale)
|
||||
}
|
||||
|
||||
s.joinWaitGroup.Wait()
|
||||
|
||||
// The nodes need a bit of time to get their updated maps from headscale
|
||||
// TODO: See if we can have a more deterministic wait here.
|
||||
time.Sleep(60 * time.Second)
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) Join(
|
||||
endpoint, key, hostname string,
|
||||
tailscale dockertest.Resource,
|
||||
) {
|
||||
defer s.joinWaitGroup.Done()
|
||||
|
||||
command := []string{
|
||||
"tailscale",
|
||||
"up",
|
||||
"-login-server",
|
||||
endpoint,
|
||||
"--authkey",
|
||||
key,
|
||||
"--hostname",
|
||||
hostname,
|
||||
}
|
||||
|
||||
log.Println("Join command:", command)
|
||||
log.Printf("Running join command for %s\n", hostname)
|
||||
_, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(s.T(), err)
|
||||
log.Printf("%s joined\n", hostname)
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) tailscaleContainer(identifier, version string, network dockertest.Network,
|
||||
) (string, *dockertest.Resource) {
|
||||
tailscaleBuildOptions := &dockertest.BuildOptions{
|
||||
Dockerfile: "Dockerfile.tailscale",
|
||||
ContextDir: ".",
|
||||
BuildArgs: []docker.BuildArg{
|
||||
{
|
||||
Name: "TAILSCALE_VERSION",
|
||||
Value: version,
|
||||
},
|
||||
},
|
||||
}
|
||||
hostname := fmt.Sprintf(
|
||||
"tailscale-%s-%s",
|
||||
strings.Replace(version, ".", "-", -1),
|
||||
identifier,
|
||||
)
|
||||
tailscaleOptions := &dockertest.RunOptions{
|
||||
Name: hostname,
|
||||
Networks: []*dockertest.Network{&network},
|
||||
Cmd: []string{
|
||||
"tailscaled", "--tun=tsdev",
|
||||
},
|
||||
|
||||
// expose the host IP address, so we can access it from inside the container
|
||||
ExtraHosts: []string{"host.docker.internal:host-gateway", "headscale:host-gateway"},
|
||||
}
|
||||
|
||||
pts, err := s.pool.BuildAndRunWithBuildOptions(
|
||||
tailscaleBuildOptions,
|
||||
tailscaleOptions,
|
||||
DockerRestartPolicy,
|
||||
DockerAllowLocalIPv6,
|
||||
DockerAllowNetworkAdministration,
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not start resource: %s", err)
|
||||
}
|
||||
log.Printf("Created %s container\n", hostname)
|
||||
|
||||
return hostname, pts
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) TearDownSuite() {
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) HandleStats(
|
||||
suiteName string,
|
||||
stats *suite.SuiteInformation,
|
||||
) {
|
||||
s.stats = stats
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) saveLog(
|
||||
resource *dockertest.Resource,
|
||||
basePath string,
|
||||
) error {
|
||||
err := os.MkdirAll(basePath, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
|
||||
err = s.pool.Client.Logs(
|
||||
docker.LogsOptions{
|
||||
Context: context.TODO(),
|
||||
Container: resource.Container.ID,
|
||||
OutputStream: &stdout,
|
||||
ErrorStream: &stderr,
|
||||
Tail: "all",
|
||||
RawTerminal: false,
|
||||
Stdout: true,
|
||||
Stderr: true,
|
||||
Follow: false,
|
||||
Timestamps: false,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Saving logs for %s to %s\n", resource.Container.Name, basePath)
|
||||
|
||||
err = ioutil.WriteFile(
|
||||
path.Join(basePath, resource.Container.Name+".stdout.log"),
|
||||
[]byte(stdout.String()),
|
||||
0o644,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(
|
||||
path.Join(basePath, resource.Container.Name+".stderr.log"),
|
||||
[]byte(stdout.String()),
|
||||
0o644,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) TestPingAllPeersByHostname() {
|
||||
ips, err := getIPs(s.tailscales)
|
||||
assert.Nil(s.T(), err)
|
||||
for hostname, tailscale := range s.tailscales {
|
||||
for peername := range ips {
|
||||
if peername == hostname {
|
||||
continue
|
||||
}
|
||||
s.T().Run(fmt.Sprintf("%s-%s", hostname, peername), func(t *testing.T) {
|
||||
command := []string{
|
||||
"tailscale", "ping",
|
||||
"--timeout=10s",
|
||||
"--c=5",
|
||||
"--until-direct=false",
|
||||
peername,
|
||||
}
|
||||
|
||||
log.Printf(
|
||||
"Pinging using hostname from %s to %s\n",
|
||||
hostname,
|
||||
peername,
|
||||
)
|
||||
log.Println(command)
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
assert.Nil(t, err)
|
||||
log.Printf("Result for %s: %s\n", hostname, result)
|
||||
assert.Contains(t, result, "via DERP(headscale)")
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *IntegrationDERPTestSuite) TestDERPSTUN() {
|
||||
headscaleSTUNAddr := fmt.Sprintf("localhost:%s", s.headscale.GetPort("3478/udp"))
|
||||
client := stun.NewClient()
|
||||
client.SetVerbose(true)
|
||||
client.SetVVerbose(true)
|
||||
client.SetServerAddr(headscaleSTUNAddr)
|
||||
_, _, err := client.Discover()
|
||||
assert.Nil(s.T(), err)
|
||||
}
|
@ -29,13 +29,6 @@ import (
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
)
|
||||
|
||||
var tailscaleVersions = []string{"1.20.4", "1.18.2", "1.16.2", "1.14.3", "1.12.3"}
|
||||
|
||||
type TestNamespace struct {
|
||||
count int
|
||||
tailscales map[string]dockertest.Resource
|
||||
}
|
||||
|
||||
type IntegrationTestSuite struct {
|
||||
suite.Suite
|
||||
stats *suite.SuiteInformation
|
||||
@ -687,38 +680,6 @@ func (s *IntegrationTestSuite) TestMagicDNS() {
|
||||
}
|
||||
}
|
||||
|
||||
func getIPs(
|
||||
tailscales map[string]dockertest.Resource,
|
||||
) (map[string][]netaddr.IP, error) {
|
||||
ips := make(map[string][]netaddr.IP)
|
||||
for hostname, tailscale := range tailscales {
|
||||
command := []string{"tailscale", "ip"}
|
||||
|
||||
result, err := ExecuteCommand(
|
||||
&tailscale,
|
||||
command,
|
||||
[]string{},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, address := range strings.Split(result, "\n") {
|
||||
address = strings.TrimSuffix(address, "\n")
|
||||
if len(address) < 1 {
|
||||
continue
|
||||
}
|
||||
ip, err := netaddr.ParseIP(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ips[hostname] = append(ips[hostname], ip)
|
||||
}
|
||||
}
|
||||
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
func getAPIURLs(
|
||||
tailscales map[string]dockertest.Resource,
|
||||
) (map[netaddr.IP]string, error) {
|
||||
|
29
integration_test/etc_embedded_derp/config.yaml
Normal file
29
integration_test/etc_embedded_derp/config.yaml
Normal file
@ -0,0 +1,29 @@
|
||||
log_level: trace
|
||||
acl_policy_path: ""
|
||||
db_type: sqlite3
|
||||
ephemeral_node_inactivity_timeout: 30m
|
||||
ip_prefixes:
|
||||
- fd7a:115c:a1e0::/48
|
||||
- 100.64.0.0/10
|
||||
dns_config:
|
||||
base_domain: headscale.net
|
||||
magic_dns: true
|
||||
domains: []
|
||||
nameservers:
|
||||
- 1.1.1.1
|
||||
db_path: /tmp/integration_test_db.sqlite3
|
||||
private_key_path: private.key
|
||||
listen_addr: 0.0.0.0:8443
|
||||
server_url: https://headscale:8443
|
||||
tls_cert_path: "/etc/headscale/tls/server.crt"
|
||||
tls_key_path: "/etc/headscale/tls/server.key"
|
||||
tls_client_auth_mode: disabled
|
||||
derp:
|
||||
server:
|
||||
enabled: true
|
||||
region_id: 999
|
||||
region_code: "headscale"
|
||||
region_name: "Headscale Embedded DERP"
|
||||
stun:
|
||||
enabled: true
|
||||
listen_addr: "0.0.0.0:3478"
|
22
integration_test/etc_embedded_derp/tls/server.crt
Normal file
22
integration_test/etc_embedded_derp/tls/server.crt
Normal file
@ -0,0 +1,22 @@
|
||||
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIC8jCCAdqgAwIBAgIULbu+UbSTMG/LtxooLLh7BgSEyqEwDQYJKoZIhvcNAQEL
|
||||
BQAwFDESMBAGA1UEAwwJaGVhZHNjYWxlMCAXDTIyMDMwNTE2NDgwM1oYDzI1MjEx
|
||||
MTA0MTY0ODAzWjAUMRIwEAYDVQQDDAloZWFkc2NhbGUwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQDqcfpToLZUF0rlNwXkkt3lbyw4Cl4TJdx36o2PKaOK
|
||||
U+tze/IjRsCWeMwrcR1o9TNZcxsD+c2J48D1WATuQJlMeg+2UJXGaTGRKkkbPMy3
|
||||
5m7AFf/Q16UEOgm2NYjZaQ8faRGIMYURG/6sXmNeETJvBixpBev9yKJuVXgqHNS4
|
||||
NpEkNwdOCuAZXrmw0HCbiusawJOay4tFvhH14rav8Uimonl8UTNVXufMzyUOuoaQ
|
||||
TGflmzYX3hIoswRnTPlIWFoqObvx2Q8H+of3uQJXy0m8I6OrIoXLNxnqYMfFls79
|
||||
9SYgVc2jPsCbh5fwyRbx2Hof7sIZ1K/mNgxJRG1E3ZiLAgMBAAGjOjA4MBQGA1Ud
|
||||
EQQNMAuCCWhlYWRzY2FsZTALBgNVHQ8EBAMCB4AwEwYDVR0lBAwwCgYIKwYBBQUH
|
||||
AwEwDQYJKoZIhvcNAQELBQADggEBANGlVN7NCsJaKz0k0nhlRGK+tcxn2p1PXN/i
|
||||
Iy+JX8ahixPC4ocRwOhrXgb390ZXLLwq08HrWYRB/Wi1VUzCp5d8dVxvrR43dJ+v
|
||||
L2EOBiIKgcu2C3pWW1qRR46/EoXUU9kSH2VNBvIhNufi32kEOidoDzxtQf6qVCoF
|
||||
guUt1JkAqrynv1UvR/2ZRM/WzM/oJ8qfECwrwDxyYhkqU5Z5jCWg0C6kPIBvNdzt
|
||||
B0eheWS+ZxVwkePTR4e17kIafwknth3lo+orxVrq/xC+OVM1bGrt2ZyD64ZvEqQl
|
||||
w6kgbzBdLScAQptWOFThwhnJsg0UbYKimZsnYmjVEuN59TJv92M=
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
(Expires on Nov 4 16:48:03 2521 GMT)
|
||||
|
28
integration_test/etc_embedded_derp/tls/server.key
Normal file
28
integration_test/etc_embedded_derp/tls/server.key
Normal file
@ -0,0 +1,28 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDqcfpToLZUF0rl
|
||||
NwXkkt3lbyw4Cl4TJdx36o2PKaOKU+tze/IjRsCWeMwrcR1o9TNZcxsD+c2J48D1
|
||||
WATuQJlMeg+2UJXGaTGRKkkbPMy35m7AFf/Q16UEOgm2NYjZaQ8faRGIMYURG/6s
|
||||
XmNeETJvBixpBev9yKJuVXgqHNS4NpEkNwdOCuAZXrmw0HCbiusawJOay4tFvhH1
|
||||
4rav8Uimonl8UTNVXufMzyUOuoaQTGflmzYX3hIoswRnTPlIWFoqObvx2Q8H+of3
|
||||
uQJXy0m8I6OrIoXLNxnqYMfFls799SYgVc2jPsCbh5fwyRbx2Hof7sIZ1K/mNgxJ
|
||||
RG1E3ZiLAgMBAAECggEBALu1Ni/u5Qy++YA8ZcN0s6UXNdhItLmv/q0kZuLQ+9et
|
||||
CT8VZfFInLndTdsaXenDKLHdryunviFA8SV+q7P2lMbek+Xs735EiyMnMBFWxLIZ
|
||||
FWNGOeQERGL19QCmLEOmEi2b+iWJQHlKaMWpbPXL3w11a+lKjIBNO4ALfoJ5QveZ
|
||||
cGMKsJdm/mpqBvLeNeh2eAFk3Gp6sT1g80Ge8NkgyzFBNIqnut0eerM15kPTc6Qz
|
||||
12JLaOXUuV3PrcB4PN4nOwrTDg88GDNOQtc1Pc9r4nOHyLfr8X7QEtj1wXSwmOuK
|
||||
d6ynMnAmoxVA9wEnupLbil1bzohRzpsTpkmDruYaBEECgYEA/Z09I8D6mt2NVqIE
|
||||
KyvLjBK39ijSV9r3/lvB2Ple2OOL5YQEd+yTrIFy+3zdUnDgD1zmNnXjmjvHZ9Lc
|
||||
IFf2o06AF84QLNB5gLPdDQkGNFdDqUxljBrfAfE3oANmPS/B0SijMGOOOiDO2FtO
|
||||
xl1nfRr78mswuRs9awoUWCdNRKUCgYEA7KaTYKIQW/FEjw9lshp74q5vbn6zoXF5
|
||||
7N8VkwI+bBVNvRbM9XZ8qhfgRdu9eXs5oL/N4mSYY54I8fA//pJ0Z2vpmureMm1V
|
||||
mL5WBUmSD9DIbAchoK+sRiQhVmNMBQC6cHMABA7RfXvBeGvWrm9pKCS6ZLgLjkjp
|
||||
PsmAcaXQcW8CgYEA2inAxljjOwUK6FNGsrxhxIT1qtNC3kCGxE+6WSNq67gSR8Vg
|
||||
8qiX//T7LEslOB3RIGYRwxd2St7RkgZZRZllmOWWWuPwFhzf6E7RAL2akLvggGov
|
||||
kG4tGEagSw2hjVDfsUT73ExHtMk0Jfmlsg33UC8+PDLpHtLH6qQpDAwC8+ECgYEA
|
||||
o+AqOIWhvHmT11l7O915Ip1WzvZwYADbxLsrDnVEUsZh4epTHjvh0kvcY6PqTqCV
|
||||
ZIrOANNWb811Nkz/k8NJVoD08PFp0xPBbZeIq/qpachTsfMyRzq/mobUiyUR9Hjv
|
||||
ooUQYr78NOApNsG+lWbTNBhS9wI4BlzZIECbcJe5g4MCgYEAndRoy8S+S0Hx/S8a
|
||||
O3hzXeDmivmgWqn8NVD4AKOovpkz4PaIVVQbAQkiNfAx8/DavPvjEKAbDezJ4ECV
|
||||
j7IsOWtDVI7pd6eF9fTcECwisrda8aUoiOap8AQb48153Vx+g2N4Vy3uH0xJs4cz
|
||||
TDALZPOBg8VlV+HEFDP43sp9Bf0=
|
||||
-----END PRIVATE KEY-----
|
Loading…
Reference in New Issue
Block a user