From 83c104652d89717731774a2c7c95b4e47cc41383 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 24 Feb 2025 20:11:14 -0800 Subject: [PATCH 01/87] cmd/derper: add --socket flag to change unix socket path to tailscaled Fixes #10359 Change-Id: Ide49941c486d29856841016686827316878c9433 Signed-off-by: Brad Fitzpatrick --- cmd/derper/derper.go | 4 ++++ derp/derp_server.go | 17 +++++++++++++---- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 980870847..682ec0bba 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -71,10 +71,13 @@ var ( secretsCacheDir = flag.String("secrets-cache-dir", defaultSetecCacheDir(), "directory to cache setec secrets in (required if --secrets-url is set)") bootstrapDNS = flag.String("bootstrap-dns-names", "", "optional comma-separated list of hostnames to make available at /bootstrap-dns") unpublishedDNS = flag.String("unpublished-bootstrap-dns-names", "", "optional comma-separated list of hostnames to make available at /bootstrap-dns and not publish in the list. If an entry contains a slash, the second part names a DNS record to poll for its TXT record with a `0` to `100` value for rollout percentage.") + verifyClients = flag.Bool("verify-clients", false, "verify clients to this DERP server through a local tailscaled instance.") verifyClientURL = flag.String("verify-client-url", "", "if non-empty, an admission controller URL for permitting client connections; see tailcfg.DERPAdmitClientRequest") verifyFailOpen = flag.Bool("verify-client-url-fail-open", true, "whether we fail open if --verify-client-url is unreachable") + socket = flag.String("socket", "", "optional alternate path to tailscaled socket (only relevant when using --verify-clients)") + acceptConnLimit = flag.Float64("accept-connection-limit", math.Inf(+1), "rate limit for accepting new connection") acceptConnBurst = flag.Int("accept-connection-burst", math.MaxInt, "burst limit for accepting new connection") @@ -192,6 +195,7 @@ func main() { s := derp.NewServer(cfg.PrivateKey, log.Printf) s.SetVerifyClient(*verifyClients) + s.SetTailscaledSocketPath(*socket) s.SetVerifyClientURL(*verifyClientURL) s.SetVerifyClientURLFailOpen(*verifyFailOpen) s.SetTCPWriteTimeout(*tcpWriteTimeout) diff --git a/derp/derp_server.go b/derp/derp_server.go index baca898d3..c330572d2 100644 --- a/derp/derp_server.go +++ b/derp/derp_server.go @@ -137,6 +137,7 @@ type Server struct { metaCert []byte // the encoded x509 cert to send after LetsEncrypt cert+intermediate dupPolicy dupPolicy debug bool + localClient local.Client // Counters: packetsSent, bytesSent expvar.Int @@ -485,6 +486,16 @@ func (s *Server) SetVerifyClientURLFailOpen(v bool) { s.verifyClientsURLFailOpen = v } +// SetTailscaledSocketPath sets the unix socket path to use to talk to +// tailscaled if client verification is enabled. +// +// If unset or set to the empty string, the default path for the operating +// system is used. +func (s *Server) SetTailscaledSocketPath(path string) { + s.localClient.Socket = path + s.localClient.UseSocketOnly = path != "" +} + // SetTCPWriteTimeout sets the timeout for writing to connected clients. // This timeout does not apply to mesh connections. // Defaults to 2 seconds. @@ -1320,8 +1331,6 @@ func (c *sclient) requestMeshUpdate() { } } -var localClient local.Client - // isMeshPeer reports whether the client is a trusted mesh peer // node in the DERP region. func (s *Server) isMeshPeer(info *clientInfo) bool { @@ -1340,7 +1349,7 @@ func (s *Server) verifyClient(ctx context.Context, clientKey key.NodePublic, inf // tailscaled-based verification: if s.verifyClientsLocalTailscaled { - _, err := localClient.WhoIsNodeKey(ctx, clientKey) + _, err := s.localClient.WhoIsNodeKey(ctx, clientKey) if err == tailscale.ErrPeerNotFound { return fmt.Errorf("peer %v not authorized (not found in local tailscaled)", clientKey) } @@ -2240,7 +2249,7 @@ func (s *Server) ConsistencyCheck() error { func (s *Server) checkVerifyClientsLocalTailscaled() error { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - status, err := localClient.StatusWithoutPeers(ctx) + status, err := s.localClient.StatusWithoutPeers(ctx) if err != nil { return fmt.Errorf("localClient.Status: %w", err) } From d7508b24c64162e915e9f2c3da1052ac1d9f1ff2 Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Tue, 25 Feb 2025 08:39:56 -0800 Subject: [PATCH 02/87] go.mod: bump golang.org/x/crypto (#15123) There were two recent CVEs. The one that sorta affects us is https://groups.google.com/g/golang-announce/c/qN_GDasRQSA (SSH DoS). Updates #15124 Signed-off-by: Andrew Lytvynov --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5aeefc9c9..04264f9ce 100644 --- a/go.mod +++ b/go.mod @@ -93,7 +93,7 @@ require ( go.uber.org/zap v1.27.0 go4.org/mem v0.0.0-20240501181205-ae6ca9944745 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba - golang.org/x/crypto v0.33.0 + golang.org/x/crypto v0.35.0 golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac golang.org/x/mod v0.23.0 golang.org/x/net v0.35.0 diff --git a/go.sum b/go.sum index be5fc57bc..00a45edb9 100644 --- a/go.sum +++ b/go.sum @@ -1041,8 +1041,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= -golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= +golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= +golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= From 820bdb870a414d9a5d2131f80649d0fa98a74819 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 14 Feb 2025 18:36:24 -0800 Subject: [PATCH 03/87] maths: add exponentially weighted moving average type In order to improve latency tracking, we will use an exponentially weighted moving average that will smooth change over time and suppress large outlier values. Updates tailscale/corp#26649 Signed-off-by: James Tucker --- maths/ewma.go | 72 ++++++++++++++++++ maths/ewma_test.go | 178 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 250 insertions(+) create mode 100644 maths/ewma.go create mode 100644 maths/ewma_test.go diff --git a/maths/ewma.go b/maths/ewma.go new file mode 100644 index 000000000..0897b73e4 --- /dev/null +++ b/maths/ewma.go @@ -0,0 +1,72 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package maths contains additional mathematical functions or structures not +// found in the standard library. +package maths + +import ( + "math" + "time" +) + +// EWMA is an exponentially weighted moving average supporting updates at +// irregular intervals with at most nanosecond resolution. +// The zero value will compute a half-life of 1 second. +// It is not safe for concurrent use. +// TODO(raggi): de-duplicate with tstime/rate.Value, which has a more complex +// and synchronized interface and does not provide direct access to the stable +// value. +type EWMA struct { + value float64 // current value of the average + lastTime int64 // time of last update in unix nanos + halfLife float64 // half-life in seconds +} + +// NewEWMA creates a new EWMA with the specified half-life. If halfLifeSeconds +// is 0, it defaults to 1. +func NewEWMA(halfLifeSeconds float64) *EWMA { + return &EWMA{ + halfLife: halfLifeSeconds, + } +} + +// Update adds a new sample to the average. If t is zero or precedes the last +// update, the update is ignored. +func (e *EWMA) Update(value float64, t time.Time) { + if t.IsZero() { + return + } + hl := e.halfLife + if hl == 0 { + hl = 1 + } + tn := t.UnixNano() + if e.lastTime == 0 { + e.value = value + e.lastTime = tn + return + } + + dt := (time.Duration(tn-e.lastTime) * time.Nanosecond).Seconds() + if dt < 0 { + // drop out of order updates + return + } + + // decay = 2^(-dt/halfLife) + decay := math.Exp2(-dt / hl) + e.value = e.value*decay + value*(1-decay) + e.lastTime = tn +} + +// Get returns the current value of the average +func (e *EWMA) Get() float64 { + return e.value +} + +// Reset clears the EWMA to its initial state +func (e *EWMA) Reset() { + e.value = 0 + e.lastTime = 0 +} diff --git a/maths/ewma_test.go b/maths/ewma_test.go new file mode 100644 index 000000000..307078a38 --- /dev/null +++ b/maths/ewma_test.go @@ -0,0 +1,178 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package maths + +import ( + "slices" + "testing" + "time" +) + +// some real world latency samples. +var ( + latencyHistory1 = []int{ + 14, 12, 15, 6, 19, 12, 13, 13, 13, 16, 17, 11, 17, 11, 14, 15, 14, 15, + 16, 16, 17, 14, 12, 16, 18, 14, 14, 11, 15, 15, 25, 11, 15, 14, 12, 15, + 13, 12, 13, 15, 11, 13, 15, 14, 14, 15, 12, 15, 18, 12, 15, 22, 12, 13, + 10, 14, 16, 15, 16, 11, 14, 17, 18, 20, 16, 11, 16, 14, 5, 15, 17, 12, + 15, 11, 15, 20, 12, 17, 12, 17, 15, 12, 12, 11, 14, 15, 11, 20, 14, 13, + 11, 12, 13, 13, 11, 13, 11, 15, 13, 13, 14, 12, 11, 12, 12, 14, 11, 13, + 12, 12, 12, 19, 14, 13, 13, 14, 11, 12, 10, 11, 15, 12, 14, 11, 11, 14, + 14, 12, 12, 11, 14, 12, 11, 12, 14, 11, 12, 15, 12, 14, 12, 12, 21, 16, + 21, 12, 16, 9, 11, 16, 14, 13, 14, 12, 13, 16, + } + latencyHistory2 = []int{ + 18, 20, 21, 21, 20, 23, 18, 18, 20, 21, 20, 19, 22, 18, 20, 20, 19, 21, + 21, 22, 22, 19, 18, 22, 22, 19, 20, 17, 16, 11, 25, 16, 18, 21, 17, 22, + 19, 18, 22, 21, 20, 18, 22, 17, 17, 20, 19, 10, 19, 16, 19, 25, 17, 18, + 15, 20, 21, 20, 23, 22, 22, 22, 19, 22, 22, 17, 22, 20, 20, 19, 21, 22, + 20, 19, 17, 22, 16, 16, 20, 22, 17, 19, 21, 16, 20, 22, 19, 21, 20, 19, + 13, 14, 23, 19, 16, 10, 19, 15, 15, 17, 16, 18, 14, 16, 18, 22, 20, 18, + 18, 21, 15, 19, 18, 19, 18, 20, 17, 19, 21, 19, 20, 19, 20, 20, 17, 14, + 17, 17, 18, 21, 20, 18, 18, 17, 16, 17, 17, 20, 22, 19, 20, 21, 21, 20, + 21, 24, 20, 18, 12, 17, 18, 17, 19, 19, 19, + } +) + +func TestEWMALatencyHistory(t *testing.T) { + type result struct { + t time.Time + v float64 + s int + } + + for _, latencyHistory := range [][]int{latencyHistory1, latencyHistory2} { + startTime := time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC) + halfLife := 30.0 + + ewma := NewEWMA(halfLife) + + var results []result + sum := 0.0 + for i, latency := range latencyHistory { + t := startTime.Add(time.Duration(i) * time.Second) + ewma.Update(float64(latency), t) + sum += float64(latency) + + results = append(results, result{t, ewma.Get(), latency}) + } + mean := sum / float64(len(latencyHistory)) + min := float64(slices.Min(latencyHistory)) + max := float64(slices.Max(latencyHistory)) + + t.Logf("EWMA Latency History (half-life: %.1f seconds):", halfLife) + t.Logf("Mean latency: %.2f ms", mean) + t.Logf("Range: [%.1f, %.1f]", min, max) + + t.Log("Samples: ") + sparkline := []rune("▁▂▃▄▅▆▇█") + var sampleLine []rune + for _, r := range results { + idx := int(((float64(r.s) - min) / (max - min)) * float64(len(sparkline)-1)) + if idx >= len(sparkline) { + idx = len(sparkline) - 1 + } + sampleLine = append(sampleLine, sparkline[idx]) + } + t.Log(string(sampleLine)) + + t.Log("EWMA: ") + var ewmaLine []rune + for _, r := range results { + idx := int(((r.v - min) / (max - min)) * float64(len(sparkline)-1)) + if idx >= len(sparkline) { + idx = len(sparkline) - 1 + } + ewmaLine = append(ewmaLine, sparkline[idx]) + } + t.Log(string(ewmaLine)) + t.Log("") + + t.Logf("Time | Sample | Value | Value - Sample") + t.Logf("") + + for _, result := range results { + t.Logf("%10s | % 6d | % 5.2f | % 5.2f", result.t.Format("15:04:05"), result.s, result.v, result.v-float64(result.s)) + } + + // check that all results are greater than the min, and less than the max of the input, + // and they're all close to the mean. + for _, result := range results { + if result.v < float64(min) || result.v > float64(max) { + t.Errorf("result %f out of range [%f, %f]", result.v, min, max) + } + + if result.v < mean*0.9 || result.v > mean*1.1 { + t.Errorf("result %f not close to mean %f", result.v, mean) + } + } + } +} + +func TestHalfLife(t *testing.T) { + start := time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC) + + ewma := NewEWMA(30.0) + ewma.Update(10, start) + ewma.Update(0, start.Add(30*time.Second)) + + if ewma.Get() != 5 { + t.Errorf("expected 5, got %f", ewma.Get()) + } + + ewma.Update(10, start.Add(60*time.Second)) + if ewma.Get() != 7.5 { + t.Errorf("expected 7.5, got %f", ewma.Get()) + } + + ewma.Update(10, start.Add(90*time.Second)) + if ewma.Get() != 8.75 { + t.Errorf("expected 8.75, got %f", ewma.Get()) + } +} + +func TestZeroValue(t *testing.T) { + start := time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC) + + var ewma EWMA + ewma.Update(10, start) + ewma.Update(0, start.Add(time.Second)) + + if ewma.Get() != 5 { + t.Errorf("expected 5, got %f", ewma.Get()) + } + + ewma.Update(10, start.Add(2*time.Second)) + if ewma.Get() != 7.5 { + t.Errorf("expected 7.5, got %f", ewma.Get()) + } + + ewma.Update(10, start.Add(3*time.Second)) + if ewma.Get() != 8.75 { + t.Errorf("expected 8.75, got %f", ewma.Get()) + } +} + +func TestReset(t *testing.T) { + start := time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC) + + ewma := NewEWMA(30.0) + ewma.Update(10, start) + ewma.Update(0, start.Add(30*time.Second)) + + if ewma.Get() != 5 { + t.Errorf("expected 5, got %f", ewma.Get()) + } + + ewma.Reset() + + if ewma.Get() != 0 { + t.Errorf("expected 0, got %f", ewma.Get()) + } + + ewma.Update(10, start.Add(90*time.Second)) + if ewma.Get() != 10 { + t.Errorf("expected 10, got %f", ewma.Get()) + } +} From c174d3c795a906214cf6bd63ffc3618555296db5 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 26 Feb 2025 09:02:40 -0800 Subject: [PATCH 04/87] scripts/installer.sh: ensure default umask for the installer (#15139) Ensures default Linux umask 022 for the installer script to make sure that files created by the installer can be accessed by other tools, such as apt. Updates tailscale/tailscale#15133 Signed-off-by: Irbe Krumina --- scripts/installer.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/scripts/installer.sh b/scripts/installer.sh index 3bd392b93..388dd5a56 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -7,6 +7,14 @@ set -eu +# Ensure that this script runs with the default umask for Linux. In practice, +# this means that files created by this script (such as keyring files) will be +# created with 644 permissions. This ensures that keyrings and other files +# created by this script are readable by installers on systems where the +# umask is set to a more restrictive value. +# See https://github.com/tailscale/tailscale/issues/15133 +umask 022 + # All the code is wrapped in a main function that gets called at the # bottom of the file, so that a truncated partial download doesn't end # up executing half a script. From ae303d41dd1850b4306848a5ada87ea8b14a088d Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Thu, 27 Feb 2025 11:35:54 -0800 Subject: [PATCH 05/87] go.mod: bump github.com/go-json-experiment/json (#15010) The upstream module has seen significant work making the v1 emulation layer a high fidelity re-implementation of v1 "encoding/json". This addresses several upstream breaking changes: * MarshalJSONV2 renamed as MarshalJSONTo * UnmarshalJSONV2 renamed as UnmarshalJSONFrom * Options argument removed from MarshalJSONV2 * Options argument removed from UnmarshalJSONV2 Updates tailscale/corp#791 Signed-off-by: Joe Tsai --- go.mod | 2 +- go.sum | 4 +-- types/opt/value.go | 16 +++++------ types/prefs/item.go | 16 +++++------ types/prefs/list.go | 16 +++++------ types/prefs/map.go | 16 +++++------ types/prefs/prefs.go | 16 +++++------ types/prefs/prefs_example/prefs_types.go | 20 +++++++------- types/prefs/prefs_test.go | 16 +++++------ types/prefs/struct_list.go | 16 +++++------ types/prefs/struct_map.go | 16 +++++------ util/syspolicy/internal/internal.go | 4 +-- util/syspolicy/setting/origin.go | 16 +++++------ util/syspolicy/setting/raw_item.go | 34 ++++++++++++------------ util/syspolicy/setting/snapshot.go | 16 +++++------ util/syspolicy/setting/summary.go | 16 +++++------ 16 files changed, 120 insertions(+), 120 deletions(-) diff --git a/go.mod b/go.mod index 04264f9ce..e6f3141a0 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/frankban/quicktest v1.14.6 github.com/fxamacker/cbor/v2 v2.7.0 github.com/gaissmai/bart v0.18.0 - github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 + github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 github.com/go-logr/zapr v1.3.0 github.com/go-ole/go-ole v1.3.0 github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 diff --git a/go.sum b/go.sum index 00a45edb9..0c8704674 100644 --- a/go.sum +++ b/go.sum @@ -327,8 +327,8 @@ github.com/go-git/go-git/v5 v5.13.1/go.mod h1:qryJB4cSBoq3FRoBRf5A77joojuBcmPJ0q github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 h1:KbX3Z3CgiYlbaavUq3Cj9/MjpO+88S7/AGXzynVDv84= -github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s= +github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874 h1:F8d1AJ6M9UQCavhwmO6ZsrYLfG8zVFWfEfMS2MXPkSY= +github.com/go-json-experiment/json v0.0.0-20250223041408-d3c622f1b874/go.mod h1:TiCD2a1pcmjd7YnhGH0f/zKNcCD06B029pHhzV23c2M= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= diff --git a/types/opt/value.go b/types/opt/value.go index b47b03c81..c71c53e51 100644 --- a/types/opt/value.go +++ b/types/opt/value.go @@ -100,31 +100,31 @@ func (o Value[T]) Equal(v Value[T]) bool { return false } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (o Value[T]) MarshalJSONV2(enc *jsontext.Encoder, opts jsonv2.Options) error { +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (o Value[T]) MarshalJSONTo(enc *jsontext.Encoder) error { if !o.set { return enc.WriteToken(jsontext.Null) } - return jsonv2.MarshalEncode(enc, &o.value, opts) + return jsonv2.MarshalEncode(enc, &o.value) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (o *Value[T]) UnmarshalJSONV2(dec *jsontext.Decoder, opts jsonv2.Options) error { +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (o *Value[T]) UnmarshalJSONFrom(dec *jsontext.Decoder) error { if dec.PeekKind() == 'n' { *o = Value[T]{} _, err := dec.ReadToken() // read null return err } o.set = true - return jsonv2.UnmarshalDecode(dec, &o.value, opts) + return jsonv2.UnmarshalDecode(dec, &o.value) } // MarshalJSON implements [json.Marshaler]. func (o Value[T]) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(o) // uses MarshalJSONV2 + return jsonv2.Marshal(o) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (o *Value[T]) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, o) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, o) // uses UnmarshalJSONFrom } diff --git a/types/prefs/item.go b/types/prefs/item.go index 103204147..717a0c76c 100644 --- a/types/prefs/item.go +++ b/types/prefs/item.go @@ -152,15 +152,15 @@ func (iv ItemView[T, V]) Equal(iv2 ItemView[T, V]) bool { return iv.ж.Equal(*iv2.ж) } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (iv ItemView[T, V]) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { - return iv.ж.MarshalJSONV2(out, opts) +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (iv ItemView[T, V]) MarshalJSONTo(out *jsontext.Encoder) error { + return iv.ж.MarshalJSONTo(out) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (iv *ItemView[T, V]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (iv *ItemView[T, V]) UnmarshalJSONFrom(in *jsontext.Decoder) error { var x Item[T] - if err := x.UnmarshalJSONV2(in, opts); err != nil { + if err := x.UnmarshalJSONFrom(in); err != nil { return err } iv.ж = &x @@ -169,10 +169,10 @@ func (iv *ItemView[T, V]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Opti // MarshalJSON implements [json.Marshaler]. func (iv ItemView[T, V]) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(iv) // uses MarshalJSONV2 + return jsonv2.Marshal(iv) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (iv *ItemView[T, V]) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, iv) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, iv) // uses UnmarshalJSONFrom } diff --git a/types/prefs/list.go b/types/prefs/list.go index 9830e79de..e9c1a1f33 100644 --- a/types/prefs/list.go +++ b/types/prefs/list.go @@ -157,15 +157,15 @@ func (lv ListView[T]) Equal(lv2 ListView[T]) bool { return lv.ж.Equal(*lv2.ж) } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (lv ListView[T]) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { - return lv.ж.MarshalJSONV2(out, opts) +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (lv ListView[T]) MarshalJSONTo(out *jsontext.Encoder) error { + return lv.ж.MarshalJSONTo(out) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (lv *ListView[T]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (lv *ListView[T]) UnmarshalJSONFrom(in *jsontext.Decoder) error { var x List[T] - if err := x.UnmarshalJSONV2(in, opts); err != nil { + if err := x.UnmarshalJSONFrom(in); err != nil { return err } lv.ж = &x @@ -174,10 +174,10 @@ func (lv *ListView[T]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options // MarshalJSON implements [json.Marshaler]. func (lv ListView[T]) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(lv) // uses MarshalJSONV2 + return jsonv2.Marshal(lv) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (lv *ListView[T]) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, lv) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, lv) // uses UnmarshalJSONFrom } diff --git a/types/prefs/map.go b/types/prefs/map.go index 2bd32bfbd..4b64690ed 100644 --- a/types/prefs/map.go +++ b/types/prefs/map.go @@ -133,15 +133,15 @@ func (mv MapView[K, V]) Equal(mv2 MapView[K, V]) bool { return mv.ж.Equal(*mv2.ж) } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (mv MapView[K, V]) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { - return mv.ж.MarshalJSONV2(out, opts) +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (mv MapView[K, V]) MarshalJSONTo(out *jsontext.Encoder) error { + return mv.ж.MarshalJSONTo(out) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (mv *MapView[K, V]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (mv *MapView[K, V]) UnmarshalJSONFrom(in *jsontext.Decoder) error { var x Map[K, V] - if err := x.UnmarshalJSONV2(in, opts); err != nil { + if err := x.UnmarshalJSONFrom(in); err != nil { return err } mv.ж = &x @@ -150,10 +150,10 @@ func (mv *MapView[K, V]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Optio // MarshalJSON implements [json.Marshaler]. func (mv MapView[K, V]) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(mv) // uses MarshalJSONV2 + return jsonv2.Marshal(mv) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (mv *MapView[K, V]) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, mv) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, mv) // uses UnmarshalJSONFrom } diff --git a/types/prefs/prefs.go b/types/prefs/prefs.go index 4f7902077..52cb464b6 100644 --- a/types/prefs/prefs.go +++ b/types/prefs/prefs.go @@ -158,22 +158,22 @@ func (p *preference[T]) SetReadOnly(readonly bool) { p.s.Metadata.ReadOnly = readonly } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (p preference[T]) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { - return jsonv2.MarshalEncode(out, &p.s, opts) +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (p preference[T]) MarshalJSONTo(out *jsontext.Encoder) error { + return jsonv2.MarshalEncode(out, &p.s) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (p *preference[T]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { - return jsonv2.UnmarshalDecode(in, &p.s, opts) +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (p *preference[T]) UnmarshalJSONFrom(in *jsontext.Decoder) error { + return jsonv2.UnmarshalDecode(in, &p.s) } // MarshalJSON implements [json.Marshaler]. func (p preference[T]) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(p) // uses MarshalJSONV2 + return jsonv2.Marshal(p) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (p *preference[T]) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, p) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, p) // uses UnmarshalJSONFrom } diff --git a/types/prefs/prefs_example/prefs_types.go b/types/prefs/prefs_example/prefs_types.go index 49f0d8c3c..f88c29f94 100644 --- a/types/prefs/prefs_example/prefs_types.go +++ b/types/prefs/prefs_example/prefs_types.go @@ -48,10 +48,10 @@ import ( // the `omitzero` JSON tag option. This option is not supported by the // [encoding/json] package as of 2024-08-21; see golang/go#45669. // It is recommended that a prefs type implements both -// [jsonv2.MarshalerV2]/[jsonv2.UnmarshalerV2] and [json.Marshaler]/[json.Unmarshaler] +// [jsonv2.MarshalerTo]/[jsonv2.UnmarshalerFrom] and [json.Marshaler]/[json.Unmarshaler] // to ensure consistent and more performant marshaling, regardless of the JSON package // used at the call sites; the standard marshalers can be implemented via [jsonv2]. -// See [Prefs.MarshalJSONV2], [Prefs.UnmarshalJSONV2], [Prefs.MarshalJSON], +// See [Prefs.MarshalJSONTo], [Prefs.UnmarshalJSONFrom], [Prefs.MarshalJSON], // and [Prefs.UnmarshalJSON] for an example implementation. type Prefs struct { ControlURL prefs.Item[string] `json:",omitzero"` @@ -128,34 +128,34 @@ type AppConnectorPrefs struct { Advertise prefs.Item[bool] `json:",omitzero"` } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. +// MarshalJSONTo implements [jsonv2.MarshalerTo]. // It is implemented as a performance improvement and to enable omission of // unconfigured preferences from the JSON output. See the [Prefs] doc for details. -func (p Prefs) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { +func (p Prefs) MarshalJSONTo(out *jsontext.Encoder) error { // The prefs type shadows the Prefs's method set, // causing [jsonv2] to use the default marshaler and avoiding // infinite recursion. type prefs Prefs - return jsonv2.MarshalEncode(out, (*prefs)(&p), opts) + return jsonv2.MarshalEncode(out, (*prefs)(&p)) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (p *Prefs) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (p *Prefs) UnmarshalJSONFrom(in *jsontext.Decoder) error { // The prefs type shadows the Prefs's method set, // causing [jsonv2] to use the default unmarshaler and avoiding // infinite recursion. type prefs Prefs - return jsonv2.UnmarshalDecode(in, (*prefs)(p), opts) + return jsonv2.UnmarshalDecode(in, (*prefs)(p)) } // MarshalJSON implements [json.Marshaler]. func (p Prefs) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(p) // uses MarshalJSONV2 + return jsonv2.Marshal(p) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (p *Prefs) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, p) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, p) // uses UnmarshalJSONFrom } type marshalAsTrueInJSON struct{} diff --git a/types/prefs/prefs_test.go b/types/prefs/prefs_test.go index ea4729366..1201054d0 100644 --- a/types/prefs/prefs_test.go +++ b/types/prefs/prefs_test.go @@ -53,32 +53,32 @@ type TestPrefs struct { Group TestPrefsGroup `json:",omitzero"` } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (p TestPrefs) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (p TestPrefs) MarshalJSONTo(out *jsontext.Encoder) error { // The testPrefs type shadows the TestPrefs's method set, // causing jsonv2 to use the default marshaler and avoiding // infinite recursion. type testPrefs TestPrefs - return jsonv2.MarshalEncode(out, (*testPrefs)(&p), opts) + return jsonv2.MarshalEncode(out, (*testPrefs)(&p)) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (p *TestPrefs) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (p *TestPrefs) UnmarshalJSONFrom(in *jsontext.Decoder) error { // The testPrefs type shadows the TestPrefs's method set, // causing jsonv2 to use the default unmarshaler and avoiding // infinite recursion. type testPrefs TestPrefs - return jsonv2.UnmarshalDecode(in, (*testPrefs)(p), opts) + return jsonv2.UnmarshalDecode(in, (*testPrefs)(p)) } // MarshalJSON implements [json.Marshaler]. func (p TestPrefs) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(p) // uses MarshalJSONV2 + return jsonv2.Marshal(p) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (p *TestPrefs) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, p) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, p) // uses UnmarshalJSONFrom } // TestBundle is an example structure type that, diff --git a/types/prefs/struct_list.go b/types/prefs/struct_list.go index 872cb2326..65f11011a 100644 --- a/types/prefs/struct_list.go +++ b/types/prefs/struct_list.go @@ -169,15 +169,15 @@ func (lv StructListView[T, V]) Equal(lv2 StructListView[T, V]) bool { return lv.ж.Equal(*lv2.ж) } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (lv StructListView[T, V]) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { - return lv.ж.MarshalJSONV2(out, opts) +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (lv StructListView[T, V]) MarshalJSONTo(out *jsontext.Encoder) error { + return lv.ж.MarshalJSONTo(out) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (lv *StructListView[T, V]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (lv *StructListView[T, V]) UnmarshalJSONFrom(in *jsontext.Decoder) error { var x StructList[T] - if err := x.UnmarshalJSONV2(in, opts); err != nil { + if err := x.UnmarshalJSONFrom(in); err != nil { return err } lv.ж = &x @@ -186,10 +186,10 @@ func (lv *StructListView[T, V]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv // MarshalJSON implements [json.Marshaler]. func (lv StructListView[T, V]) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(lv) // uses MarshalJSONV2 + return jsonv2.Marshal(lv) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (lv *StructListView[T, V]) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, lv) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, lv) // uses UnmarshalJSONFrom } diff --git a/types/prefs/struct_map.go b/types/prefs/struct_map.go index 4d55da7a0..a081f7c74 100644 --- a/types/prefs/struct_map.go +++ b/types/prefs/struct_map.go @@ -149,15 +149,15 @@ func (mv StructMapView[K, T, V]) Equal(mv2 StructMapView[K, T, V]) bool { return mv.ж.Equal(*mv2.ж) } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (mv StructMapView[K, T, V]) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { - return mv.ж.MarshalJSONV2(out, opts) +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (mv StructMapView[K, T, V]) MarshalJSONTo(out *jsontext.Encoder) error { + return mv.ж.MarshalJSONTo(out) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (mv *StructMapView[K, T, V]) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (mv *StructMapView[K, T, V]) UnmarshalJSONFrom(in *jsontext.Decoder) error { var x StructMap[K, T] - if err := x.UnmarshalJSONV2(in, opts); err != nil { + if err := x.UnmarshalJSONFrom(in); err != nil { return err } mv.ж = &x @@ -166,10 +166,10 @@ func (mv *StructMapView[K, T, V]) UnmarshalJSONV2(in *jsontext.Decoder, opts jso // MarshalJSON implements [json.Marshaler]. func (mv StructMapView[K, T, V]) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(mv) // uses MarshalJSONV2 + return jsonv2.Marshal(mv) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (mv *StructMapView[K, T, V]) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, mv) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, mv) // uses UnmarshalJSONFrom } diff --git a/util/syspolicy/internal/internal.go b/util/syspolicy/internal/internal.go index 8f2889625..2e1737e5b 100644 --- a/util/syspolicy/internal/internal.go +++ b/util/syspolicy/internal/internal.go @@ -56,10 +56,10 @@ func EqualJSONForTest(tb TB, j1, j2 jsontext.Value) (s1, s2 string, equal bool) return "", "", true } // Otherwise, format the values for display and return false. - if err := j1.Indent("", "\t"); err != nil { + if err := j1.Indent(); err != nil { tb.Fatal(err) } - if err := j2.Indent("", "\t"); err != nil { + if err := j2.Indent(); err != nil { tb.Fatal(err) } return j1.String(), j2.String(), false diff --git a/util/syspolicy/setting/origin.go b/util/syspolicy/setting/origin.go index 078ef758e..b5b28edf6 100644 --- a/util/syspolicy/setting/origin.go +++ b/util/syspolicy/setting/origin.go @@ -50,22 +50,22 @@ func (s Origin) String() string { return s.Scope().String() } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (s Origin) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { - return jsonv2.MarshalEncode(out, &s.data, opts) +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (s Origin) MarshalJSONTo(out *jsontext.Encoder) error { + return jsonv2.MarshalEncode(out, &s.data) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (s *Origin) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { - return jsonv2.UnmarshalDecode(in, &s.data, opts) +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (s *Origin) UnmarshalJSONFrom(in *jsontext.Decoder) error { + return jsonv2.UnmarshalDecode(in, &s.data) } // MarshalJSON implements [json.Marshaler]. func (s Origin) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(s) // uses MarshalJSONV2 + return jsonv2.Marshal(s) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (s *Origin) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, s) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, s) // uses UnmarshalJSONFrom } diff --git a/util/syspolicy/setting/raw_item.go b/util/syspolicy/setting/raw_item.go index cf46e54b7..82e5f634a 100644 --- a/util/syspolicy/setting/raw_item.go +++ b/util/syspolicy/setting/raw_item.go @@ -75,31 +75,31 @@ func (i RawItem) String() string { return fmt.Sprintf("%v%s", i.data.Value.Value, suffix) } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (i RawItem) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { - return jsonv2.MarshalEncode(out, &i.data, opts) +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (i RawItem) MarshalJSONTo(out *jsontext.Encoder) error { + return jsonv2.MarshalEncode(out, &i.data) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (i *RawItem) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { - return jsonv2.UnmarshalDecode(in, &i.data, opts) +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (i *RawItem) UnmarshalJSONFrom(in *jsontext.Decoder) error { + return jsonv2.UnmarshalDecode(in, &i.data) } // MarshalJSON implements [json.Marshaler]. func (i RawItem) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(i) // uses MarshalJSONV2 + return jsonv2.Marshal(i) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (i *RawItem) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, i) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, i) // uses UnmarshalJSONFrom } // RawValue represents a raw policy setting value read from a policy store. // It is JSON-marshallable and facilitates unmarshalling of JSON values // into corresponding policy setting types, with special handling for JSON numbers // (unmarshalled as float64) and JSON string arrays (unmarshalled as []string). -// See also [RawValue.UnmarshalJSONV2]. +// See also [RawValue.UnmarshalJSONFrom]. type RawValue struct { opt.Value[any] } @@ -114,16 +114,16 @@ func RawValueOf[T RawValueType](v T) RawValue { return RawValue{opt.ValueOf[any](v)} } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (v RawValue) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { - return jsonv2.MarshalEncode(out, v.Value, opts) +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (v RawValue) MarshalJSONTo(out *jsontext.Encoder) error { + return jsonv2.MarshalEncode(out, v.Value) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2] by attempting to unmarshal +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom] by attempting to unmarshal // a JSON value as one of the supported policy setting value types (bool, string, uint64, or []string), // based on the JSON value type. It fails if the JSON value is an object, if it's a JSON number that // cannot be represented as a uint64, or if a JSON array contains anything other than strings. -func (v *RawValue) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { +func (v *RawValue) UnmarshalJSONFrom(in *jsontext.Decoder) error { var valPtr any switch k := in.PeekKind(); k { case 't', 'f': @@ -139,7 +139,7 @@ func (v *RawValue) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) er default: panic("unreachable") } - if err := jsonv2.UnmarshalDecode(in, valPtr, opts); err != nil { + if err := jsonv2.UnmarshalDecode(in, valPtr); err != nil { v.Value.Clear() return err } @@ -150,12 +150,12 @@ func (v *RawValue) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) er // MarshalJSON implements [json.Marshaler]. func (v RawValue) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(v) // uses MarshalJSONV2 + return jsonv2.Marshal(v) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (v *RawValue) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, v) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, v) // uses UnmarshalJSONFrom } // RawValues is a map of keyed setting values that can be read from a JSON. diff --git a/util/syspolicy/setting/snapshot.go b/util/syspolicy/setting/snapshot.go index 0af2bae0f..38642f7cc 100644 --- a/util/syspolicy/setting/snapshot.go +++ b/util/syspolicy/setting/snapshot.go @@ -147,23 +147,23 @@ type snapshotJSON struct { Settings map[Key]RawItem `json:",omitempty"` } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (s *Snapshot) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (s *Snapshot) MarshalJSONTo(out *jsontext.Encoder) error { data := &snapshotJSON{} if s != nil { data.Summary = s.summary data.Settings = s.m } - return jsonv2.MarshalEncode(out, data, opts) + return jsonv2.MarshalEncode(out, data) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (s *Snapshot) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (s *Snapshot) UnmarshalJSONFrom(in *jsontext.Decoder) error { if s == nil { return errors.New("s must not be nil") } data := &snapshotJSON{} - if err := jsonv2.UnmarshalDecode(in, data, opts); err != nil { + if err := jsonv2.UnmarshalDecode(in, data); err != nil { return err } *s = Snapshot{m: data.Settings, sig: deephash.Hash(&data.Settings), summary: data.Summary} @@ -172,12 +172,12 @@ func (s *Snapshot) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) er // MarshalJSON implements [json.Marshaler]. func (s *Snapshot) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(s) // uses MarshalJSONV2 + return jsonv2.Marshal(s) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (s *Snapshot) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, s) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, s) // uses UnmarshalJSONFrom } // MergeSnapshots returns a [Snapshot] that contains all [RawItem]s diff --git a/util/syspolicy/setting/summary.go b/util/syspolicy/setting/summary.go index 5ff20e0aa..d7c139a87 100644 --- a/util/syspolicy/setting/summary.go +++ b/util/syspolicy/setting/summary.go @@ -54,24 +54,24 @@ func (s Summary) String() string { return s.data.Scope.String() } -// MarshalJSONV2 implements [jsonv2.MarshalerV2]. -func (s Summary) MarshalJSONV2(out *jsontext.Encoder, opts jsonv2.Options) error { - return jsonv2.MarshalEncode(out, &s.data, opts) +// MarshalJSONTo implements [jsonv2.MarshalerTo]. +func (s Summary) MarshalJSONTo(out *jsontext.Encoder) error { + return jsonv2.MarshalEncode(out, &s.data) } -// UnmarshalJSONV2 implements [jsonv2.UnmarshalerV2]. -func (s *Summary) UnmarshalJSONV2(in *jsontext.Decoder, opts jsonv2.Options) error { - return jsonv2.UnmarshalDecode(in, &s.data, opts) +// UnmarshalJSONFrom implements [jsonv2.UnmarshalerFrom]. +func (s *Summary) UnmarshalJSONFrom(in *jsontext.Decoder) error { + return jsonv2.UnmarshalDecode(in, &s.data) } // MarshalJSON implements [json.Marshaler]. func (s Summary) MarshalJSON() ([]byte, error) { - return jsonv2.Marshal(s) // uses MarshalJSONV2 + return jsonv2.Marshal(s) // uses MarshalJSONTo } // UnmarshalJSON implements [json.Unmarshaler]. func (s *Summary) UnmarshalJSON(b []byte) error { - return jsonv2.Unmarshal(b, s) // uses UnmarshalJSONV2 + return jsonv2.Unmarshal(b, s) // uses UnmarshalJSONFrom } // SummaryOption is an option that configures [Summary] From f5522e62d1dde2ea966f2454df248a8ea2d43676 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Thu, 27 Feb 2025 11:58:45 -0800 Subject: [PATCH 06/87] client/web: fix CSRF handler order in web UI (#15143) Fix the order of the CSRF handlers (HTTP plaintext context setting, _then_ enforcement) in the construction of the web UI server. This resolves false-positive "invalid Origin" 403 exceptions when attempting to update settings in the web UI. Add unit test to exercise the CSRF protection failure and success cases for our web UI configuration. Updates #14822 Updates #14872 Signed-off-by: Patrick O'Doherty --- client/web/web.go | 65 ++++++++++++++++++--------------- client/web/web_test.go | 82 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 118 insertions(+), 29 deletions(-) diff --git a/client/web/web.go b/client/web/web.go index 6203b4c18..e9810ccd0 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -203,35 +203,9 @@ func NewServer(opts ServerOpts) (s *Server, err error) { } s.assetsHandler, s.assetsCleanup = assetsHandler(s.devMode) - var metric string // clientmetric to report on startup - - // Create handler for "/api" requests with CSRF protection. - // We don't require secure cookies, since the web client is regularly used - // on network appliances that are served on local non-https URLs. - // The client is secured by limiting the interface it listens on, - // or by authenticating requests before they reach the web client. - csrfProtect := csrf.Protect(s.csrfKey(), csrf.Secure(false)) - - // signal to the CSRF middleware that the request is being served over - // plaintext HTTP to skip TLS-only header checks. - withSetPlaintext := func(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - r = csrf.PlaintextHTTPRequest(r) - h.ServeHTTP(w, r) - }) - } - - switch s.mode { - case LoginServerMode: - s.apiHandler = csrfProtect(withSetPlaintext(http.HandlerFunc(s.serveLoginAPI))) - metric = "web_login_client_initialization" - case ReadOnlyServerMode: - s.apiHandler = csrfProtect(withSetPlaintext(http.HandlerFunc(s.serveLoginAPI))) - metric = "web_readonly_client_initialization" - case ManageServerMode: - s.apiHandler = csrfProtect(withSetPlaintext(http.HandlerFunc(s.serveAPI))) - metric = "web_client_initialization" - } + var metric string + s.apiHandler, metric = s.modeAPIHandler(s.mode) + s.apiHandler = s.withCSRF(s.apiHandler) // Don't block startup on reporting metric. // Report in separate go routine with 5 second timeout. @@ -244,6 +218,39 @@ func NewServer(opts ServerOpts) (s *Server, err error) { return s, nil } +func (s *Server) withCSRF(h http.Handler) http.Handler { + csrfProtect := csrf.Protect(s.csrfKey(), csrf.Secure(false)) + + // ref https://github.com/tailscale/tailscale/pull/14822 + // signal to the CSRF middleware that the request is being served over + // plaintext HTTP to skip TLS-only header checks. + withSetPlaintext := func(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r = csrf.PlaintextHTTPRequest(r) + h.ServeHTTP(w, r) + }) + } + + // NB: the order of the withSetPlaintext and csrfProtect calls is important + // to ensure that we signal to the CSRF middleware that the request is being + // served over plaintext HTTP and not over TLS as it presumes by default. + return withSetPlaintext(csrfProtect(h)) +} + +func (s *Server) modeAPIHandler(mode ServerMode) (http.Handler, string) { + switch mode { + case LoginServerMode: + return http.HandlerFunc(s.serveLoginAPI), "web_login_client_initialization" + case ReadOnlyServerMode: + return http.HandlerFunc(s.serveLoginAPI), "web_readonly_client_initialization" + case ManageServerMode: + return http.HandlerFunc(s.serveAPI), "web_client_initialization" + default: // invalid mode + log.Fatalf("invalid mode: %v", mode) + } + return nil, "" +} + func (s *Server) Shutdown() { s.logf("web.Server: shutting down") if s.assetsCleanup != nil { diff --git a/client/web/web_test.go b/client/web/web_test.go index b9242f6ac..291356260 100644 --- a/client/web/web_test.go +++ b/client/web/web_test.go @@ -11,6 +11,7 @@ import ( "fmt" "io" "net/http" + "net/http/cookiejar" "net/http/httptest" "net/netip" "net/url" @@ -20,6 +21,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/gorilla/csrf" "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" "tailscale.com/ipn" @@ -1477,3 +1479,83 @@ func mockWaitAuthURL(_ context.Context, id string, src tailcfg.NodeID) (*tailcfg return nil, errors.New("unknown id") } } + +func TestCSRFProtect(t *testing.T) { + s := &Server{} + + mux := http.NewServeMux() + mux.HandleFunc("GET /test/csrf-token", func(w http.ResponseWriter, r *http.Request) { + token := csrf.Token(r) + _, err := io.WriteString(w, token) + if err != nil { + t.Fatal(err) + } + }) + mux.HandleFunc("POST /test/csrf-protected", func(w http.ResponseWriter, r *http.Request) { + _, err := io.WriteString(w, "ok") + if err != nil { + t.Fatal(err) + } + }) + h := s.withCSRF(mux) + ser := httptest.NewServer(h) + defer ser.Close() + + jar, err := cookiejar.New(nil) + if err != nil { + t.Fatalf("unable to construct cookie jar: %v", err) + } + + client := ser.Client() + client.Jar = jar + + // make GET request to populate cookie jar + resp, err := client.Get(ser.URL + "/test/csrf-token") + if err != nil { + t.Fatalf("unable to make request: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Fatalf("unexpected status: %v", resp.Status) + } + tokenBytes, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("unable to read body: %v", err) + } + + csrfToken := strings.TrimSpace(string(tokenBytes)) + if csrfToken == "" { + t.Fatal("empty csrf token") + } + + // make a POST request without the CSRF header; ensure it fails + resp, err = client.Post(ser.URL+"/test/csrf-protected", "text/plain", nil) + if err != nil { + t.Fatalf("unable to make request: %v", err) + } + if resp.StatusCode != http.StatusForbidden { + t.Fatalf("unexpected status: %v", resp.Status) + } + + // make a POST request with the CSRF header; ensure it succeeds + req, err := http.NewRequest("POST", ser.URL+"/test/csrf-protected", nil) + if err != nil { + t.Fatalf("error building request: %v", err) + } + req.Header.Set("X-CSRF-Token", csrfToken) + resp, err = client.Do(req) + if err != nil { + t.Fatalf("unable to make request: %v", err) + } + if resp.StatusCode != http.StatusOK { + t.Fatalf("unexpected status: %v", resp.Status) + } + defer resp.Body.Close() + out, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("unable to read body: %v", err) + } + if string(out) != "ok" { + t.Fatalf("unexpected body: %q", out) + } +} From 3d28aa19cbf70a0b0e72d2ce37e83bd7e73a346c Mon Sep 17 00:00:00 2001 From: Joe Tsai Date: Thu, 27 Feb 2025 12:33:31 -0800 Subject: [PATCH 07/87] all: statically enforce json/v2 interface satisfaction (#15154) The json/v2 prototype is still in flux and the API can/will change. Statically enforce that types implementing the v2 methods satisfy the correct interface so that changes to the signature can be statically detected by the compiler. Updates tailscale/corp#791 Signed-off-by: Joe Tsai --- types/opt/value_test.go | 5 +++++ types/prefs/list.go | 5 +++++ types/prefs/prefs.go | 5 +++++ types/prefs/prefs_example/prefs_types.go | 5 +++++ types/prefs/prefs_test.go | 19 +++++++++++++++++++ util/syspolicy/setting/origin.go | 5 +++++ util/syspolicy/setting/raw_item.go | 10 ++++++++++ util/syspolicy/setting/snapshot.go | 5 +++++ util/syspolicy/setting/summary.go | 5 +++++ 9 files changed, 64 insertions(+) diff --git a/types/opt/value_test.go b/types/opt/value_test.go index dbd8b255f..890f9a579 100644 --- a/types/opt/value_test.go +++ b/types/opt/value_test.go @@ -13,6 +13,11 @@ import ( "tailscale.com/util/must" ) +var ( + _ jsonv2.MarshalerTo = (*Value[bool])(nil) + _ jsonv2.UnmarshalerFrom = (*Value[bool])(nil) +) + type testStruct struct { Int int `json:",omitempty,omitzero"` Str string `json:",omitempty"` diff --git a/types/prefs/list.go b/types/prefs/list.go index e9c1a1f33..7db473887 100644 --- a/types/prefs/list.go +++ b/types/prefs/list.go @@ -157,6 +157,11 @@ func (lv ListView[T]) Equal(lv2 ListView[T]) bool { return lv.ж.Equal(*lv2.ж) } +var ( + _ jsonv2.MarshalerTo = (*ListView[bool])(nil) + _ jsonv2.UnmarshalerFrom = (*ListView[bool])(nil) +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. func (lv ListView[T]) MarshalJSONTo(out *jsontext.Encoder) error { return lv.ж.MarshalJSONTo(out) diff --git a/types/prefs/prefs.go b/types/prefs/prefs.go index 52cb464b6..a6caf1283 100644 --- a/types/prefs/prefs.go +++ b/types/prefs/prefs.go @@ -158,6 +158,11 @@ func (p *preference[T]) SetReadOnly(readonly bool) { p.s.Metadata.ReadOnly = readonly } +var ( + _ jsonv2.MarshalerTo = (*preference[struct{}])(nil) + _ jsonv2.UnmarshalerFrom = (*preference[struct{}])(nil) +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. func (p preference[T]) MarshalJSONTo(out *jsontext.Encoder) error { return jsonv2.MarshalEncode(out, &p.s) diff --git a/types/prefs/prefs_example/prefs_types.go b/types/prefs/prefs_example/prefs_types.go index f88c29f94..c35f1f62f 100644 --- a/types/prefs/prefs_example/prefs_types.go +++ b/types/prefs/prefs_example/prefs_types.go @@ -128,6 +128,11 @@ type AppConnectorPrefs struct { Advertise prefs.Item[bool] `json:",omitzero"` } +var ( + _ jsonv2.MarshalerTo = (*Prefs)(nil) + _ jsonv2.UnmarshalerFrom = (*Prefs)(nil) +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. // It is implemented as a performance improvement and to enable omission of // unconfigured preferences from the JSON output. See the [Prefs] doc for details. diff --git a/types/prefs/prefs_test.go b/types/prefs/prefs_test.go index 1201054d0..d6af745bf 100644 --- a/types/prefs/prefs_test.go +++ b/types/prefs/prefs_test.go @@ -19,6 +19,20 @@ import ( //go:generate go run tailscale.com/cmd/viewer --tags=test --type=TestPrefs,TestBundle,TestValueStruct,TestGenericStruct,TestPrefsGroup +var ( + _ jsonv2.MarshalerTo = (*ItemView[*TestBundle, TestBundleView])(nil) + _ jsonv2.UnmarshalerFrom = (*ItemView[*TestBundle, TestBundleView])(nil) + + _ jsonv2.MarshalerTo = (*MapView[string, string])(nil) + _ jsonv2.UnmarshalerFrom = (*MapView[string, string])(nil) + + _ jsonv2.MarshalerTo = (*StructListView[*TestBundle, TestBundleView])(nil) + _ jsonv2.UnmarshalerFrom = (*StructListView[*TestBundle, TestBundleView])(nil) + + _ jsonv2.MarshalerTo = (*StructMapView[string, *TestBundle, TestBundleView])(nil) + _ jsonv2.UnmarshalerFrom = (*StructMapView[string, *TestBundle, TestBundleView])(nil) +) + type TestPrefs struct { Int32Item Item[int32] `json:",omitzero"` UInt64Item Item[uint64] `json:",omitzero"` @@ -53,6 +67,11 @@ type TestPrefs struct { Group TestPrefsGroup `json:",omitzero"` } +var ( + _ jsonv2.MarshalerTo = (*TestPrefs)(nil) + _ jsonv2.UnmarshalerFrom = (*TestPrefs)(nil) +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. func (p TestPrefs) MarshalJSONTo(out *jsontext.Encoder) error { // The testPrefs type shadows the TestPrefs's method set, diff --git a/util/syspolicy/setting/origin.go b/util/syspolicy/setting/origin.go index b5b28edf6..4c7cc7025 100644 --- a/util/syspolicy/setting/origin.go +++ b/util/syspolicy/setting/origin.go @@ -50,6 +50,11 @@ func (s Origin) String() string { return s.Scope().String() } +var ( + _ jsonv2.MarshalerTo = (*Origin)(nil) + _ jsonv2.UnmarshalerFrom = (*Origin)(nil) +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. func (s Origin) MarshalJSONTo(out *jsontext.Encoder) error { return jsonv2.MarshalEncode(out, &s.data) diff --git a/util/syspolicy/setting/raw_item.go b/util/syspolicy/setting/raw_item.go index 82e5f634a..9a96073b0 100644 --- a/util/syspolicy/setting/raw_item.go +++ b/util/syspolicy/setting/raw_item.go @@ -75,6 +75,11 @@ func (i RawItem) String() string { return fmt.Sprintf("%v%s", i.data.Value.Value, suffix) } +var ( + _ jsonv2.MarshalerTo = (*RawItem)(nil) + _ jsonv2.UnmarshalerFrom = (*RawItem)(nil) +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. func (i RawItem) MarshalJSONTo(out *jsontext.Encoder) error { return jsonv2.MarshalEncode(out, &i.data) @@ -114,6 +119,11 @@ func RawValueOf[T RawValueType](v T) RawValue { return RawValue{opt.ValueOf[any](v)} } +var ( + _ jsonv2.MarshalerTo = (*RawValue)(nil) + _ jsonv2.UnmarshalerFrom = (*RawValue)(nil) +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. func (v RawValue) MarshalJSONTo(out *jsontext.Encoder) error { return jsonv2.MarshalEncode(out, v.Value) diff --git a/util/syspolicy/setting/snapshot.go b/util/syspolicy/setting/snapshot.go index 38642f7cc..087325a04 100644 --- a/util/syspolicy/setting/snapshot.go +++ b/util/syspolicy/setting/snapshot.go @@ -147,6 +147,11 @@ type snapshotJSON struct { Settings map[Key]RawItem `json:",omitempty"` } +var ( + _ jsonv2.MarshalerTo = (*Snapshot)(nil) + _ jsonv2.UnmarshalerFrom = (*Snapshot)(nil) +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. func (s *Snapshot) MarshalJSONTo(out *jsontext.Encoder) error { data := &snapshotJSON{} diff --git a/util/syspolicy/setting/summary.go b/util/syspolicy/setting/summary.go index d7c139a87..9864822f7 100644 --- a/util/syspolicy/setting/summary.go +++ b/util/syspolicy/setting/summary.go @@ -54,6 +54,11 @@ func (s Summary) String() string { return s.data.Scope.String() } +var ( + _ jsonv2.MarshalerTo = (*Summary)(nil) + _ jsonv2.UnmarshalerFrom = (*Summary)(nil) +) + // MarshalJSONTo implements [jsonv2.MarshalerTo]. func (s Summary) MarshalJSONTo(out *jsontext.Encoder) error { return jsonv2.MarshalEncode(out, &s.data) From b85d18d14e9898261af00de60ebf069bc17a1a0b Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Thu, 27 Feb 2025 14:41:05 -0800 Subject: [PATCH 08/87] ipn/{ipnlocal,store},kube/kubeclient: store TLS cert and key pair to a Secret in a single operation. (#15147) To avoid duplicate issuances/slowness while the state Secret contains a mismatched cert and key. Updates tailscale/tailscale#15134 Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- ipn/ipnlocal/cert.go | 39 ++++-- ipn/ipnlocal/cert_test.go | 8 +- ipn/store/kubestore/store_kube.go | 76 ++++++---- ipn/store/kubestore/store_kube_test.go | 183 +++++++++++++++++++++++++ kube/kubeclient/fake_client.go | 15 +- 5 files changed, 278 insertions(+), 43 deletions(-) create mode 100644 ipn/store/kubestore/store_kube_test.go diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index cfa4fe1ba..d360ed79c 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -250,15 +250,13 @@ type certStore interface { // for now. If they're expired, it returns errCertExpired. // If they don't exist, it returns ipn.ErrStateNotExist. Read(domain string, now time.Time) (*TLSCertKeyPair, error) - // WriteCert writes the cert for domain. - WriteCert(domain string, cert []byte) error - // WriteKey writes the key for domain. - WriteKey(domain string, key []byte) error // ACMEKey returns the value previously stored via WriteACMEKey. // It is a PEM encoded ECDSA key. ACMEKey() ([]byte, error) // WriteACMEKey stores the provided PEM encoded ECDSA key. WriteACMEKey([]byte) error + // WriteTLSCertAndKey writes the cert and key for domain. + WriteTLSCertAndKey(domain string, cert, key []byte) error } var errCertExpired = errors.New("cert expired") @@ -344,6 +342,13 @@ func (f certFileStore) WriteKey(domain string, key []byte) error { return atomicfile.WriteFile(keyFile(f.dir, domain), key, 0600) } +func (f certFileStore) WriteTLSCertAndKey(domain string, cert, key []byte) error { + if err := f.WriteKey(domain, key); err != nil { + return err + } + return f.WriteCert(domain, cert) +} + // certStateStore implements certStore by storing the cert & key files in an ipn.StateStore. type certStateStore struct { ipn.StateStore @@ -384,6 +389,27 @@ func (s certStateStore) WriteACMEKey(key []byte) error { return ipn.WriteState(s.StateStore, ipn.StateKey(acmePEMName), key) } +// TLSCertKeyWriter is an interface implemented by state stores that can write the TLS +// cert and key in a single atomic operation. Currently this is only implemented +// by the kubestore.StoreKube. +type TLSCertKeyWriter interface { + WriteTLSCertAndKey(domain string, cert, key []byte) error +} + +// WriteTLSCertAndKey writes the TLS cert and key for domain to the current +// LocalBackend's StateStore. +func (s certStateStore) WriteTLSCertAndKey(domain string, cert, key []byte) error { + // If we're using a store that supports atomic writes, use that. + if aw, ok := s.StateStore.(TLSCertKeyWriter); ok { + return aw.WriteTLSCertAndKey(domain, cert, key) + } + // Otherwise fall back to separate writes for cert and key. + if err := s.WriteKey(domain, key); err != nil { + return err + } + return s.WriteCert(domain, cert) +} + // TLSCertKeyPair is a TLS public and private key, and whether they were obtained // from cache or freshly obtained. type TLSCertKeyPair struct { @@ -546,9 +572,6 @@ func (b *LocalBackend) getCertPEM(ctx context.Context, cs certStore, logf logger if err := encodeECDSAKey(&privPEM, certPrivKey); err != nil { return nil, err } - if err := cs.WriteKey(domain, privPEM.Bytes()); err != nil { - return nil, err - } csr, err := certRequest(certPrivKey, domain, nil) if err != nil { @@ -570,7 +593,7 @@ func (b *LocalBackend) getCertPEM(ctx context.Context, cs certStore, logf logger return nil, err } } - if err := cs.WriteCert(domain, certPEM.Bytes()); err != nil { + if err := cs.WriteTLSCertAndKey(domain, certPEM.Bytes(), privPEM.Bytes()); err != nil { return nil, err } b.domainRenewed(domain) diff --git a/ipn/ipnlocal/cert_test.go b/ipn/ipnlocal/cert_test.go index 21741ca95..868808cd6 100644 --- a/ipn/ipnlocal/cert_test.go +++ b/ipn/ipnlocal/cert_test.go @@ -86,13 +86,9 @@ func TestCertStoreRoundTrip(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - if err := test.store.WriteCert(testDomain, testCert); err != nil { - t.Fatalf("WriteCert: unexpected error: %v", err) + if err := test.store.WriteTLSCertAndKey(testDomain, testCert, testKey); err != nil { + t.Fatalf("WriteTLSCertAndKey: unexpected error: %v", err) } - if err := test.store.WriteKey(testDomain, testKey); err != nil { - t.Fatalf("WriteKey: unexpected error: %v", err) - } - kp, err := test.store.Read(testDomain, testNow) if err != nil { t.Fatalf("Read: unexpected error: %v", err) diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index 462e6d434..b4e14c6d3 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -18,6 +18,7 @@ import ( "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" "tailscale.com/types/logger" + "tailscale.com/util/mak" ) const ( @@ -83,10 +84,22 @@ func (s *Store) ReadState(id ipn.StateKey) ([]byte, error) { // WriteState implements the StateStore interface. func (s *Store) WriteState(id ipn.StateKey, bs []byte) (err error) { + return s.updateStateSecret(map[string][]byte{string(id): bs}) +} + +// WriteTLSCertAndKey writes a TLS cert and key to domain.crt, domain.key fields of a Tailscale Kubernetes node's state +// Secret. +func (s *Store) WriteTLSCertAndKey(domain string, cert, key []byte) error { + return s.updateStateSecret(map[string][]byte{domain + ".crt": cert, domain + ".key": key}) +} + +func (s *Store) updateStateSecret(data map[string][]byte) (err error) { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer func() { if err == nil { - s.memory.WriteState(ipn.StateKey(sanitizeKey(id)), bs) + for id, bs := range data { + s.memory.WriteState(ipn.StateKey(id), bs) + } } if err != nil { if err := s.client.Event(ctx, eventTypeWarning, reasonTailscaleStateUpdateFailed, err.Error()); err != nil { @@ -99,9 +112,9 @@ func (s *Store) WriteState(id ipn.StateKey, bs []byte) (err error) { } cancel() }() - secret, err := s.client.GetSecret(ctx, s.secretName) if err != nil { + // If the Secret does not exist, create it with the required data. if kubeclient.IsNotFoundErr(err) { return s.client.CreateSecret(ctx, &kubeapi.Secret{ TypeMeta: kubeapi.TypeMeta{ @@ -111,40 +124,53 @@ func (s *Store) WriteState(id ipn.StateKey, bs []byte) (err error) { ObjectMeta: kubeapi.ObjectMeta{ Name: s.secretName, }, - Data: map[string][]byte{ - sanitizeKey(id): bs, - }, + Data: func(m map[string][]byte) map[string][]byte { + d := make(map[string][]byte, len(m)) + for key, val := range m { + d[sanitizeKey(key)] = val + } + return d + }(data), }) } return err } if s.canPatch { - if len(secret.Data) == 0 { // if user has pre-created a blank Secret - m := []kubeclient.JSONPatch{ + var m []kubeclient.JSONPatch + // If the user has pre-created a Secret with no data, we need to ensure the top level /data field. + if len(secret.Data) == 0 { + m = []kubeclient.JSONPatch{ { - Op: "add", - Path: "/data", - Value: map[string][]byte{sanitizeKey(id): bs}, + Op: "add", + Path: "/data", + Value: func(m map[string][]byte) map[string][]byte { + d := make(map[string][]byte, len(m)) + for key, val := range m { + d[sanitizeKey(key)] = val + } + return d + }(data), }, } - if err := s.client.JSONPatchResource(ctx, s.secretName, kubeclient.TypeSecrets, m); err != nil { - return fmt.Errorf("error patching Secret %s with a /data field: %v", s.secretName, err) + // If the Secret has data, patch it with the new data. + } else { + for key, val := range data { + m = append(m, kubeclient.JSONPatch{ + Op: "add", + Path: "/data/" + sanitizeKey(key), + Value: val, + }) } - return nil - } - m := []kubeclient.JSONPatch{ - { - Op: "add", - Path: "/data/" + sanitizeKey(id), - Value: bs, - }, } if err := s.client.JSONPatchResource(ctx, s.secretName, kubeclient.TypeSecrets, m); err != nil { - return fmt.Errorf("error patching Secret %s with /data/%s field: %v", s.secretName, sanitizeKey(id), err) + return fmt.Errorf("error patching Secret %s: %w", s.secretName, err) } return nil } - secret.Data[sanitizeKey(id)] = bs + // No patch permissions, use UPDATE instead. + for key, val := range data { + mak.Set(&secret.Data, sanitizeKey(key), val) + } if err := s.client.UpdateSecret(ctx, secret); err != nil { return err } @@ -172,9 +198,9 @@ func (s *Store) loadState() (err error) { return nil } -func sanitizeKey(k ipn.StateKey) string { - // The only valid characters in a Kubernetes secret key are alphanumeric, -, - // _, and . +// sanitizeKey converts any value that can be converted to a string into a valid Kubernetes secret key. +// Valid characters are alphanumeric, -, _, and . +func sanitizeKey[T ~string](k T) string { return strings.Map(func(r rune) rune { if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '-' || r == '_' || r == '.' { return r diff --git a/ipn/store/kubestore/store_kube_test.go b/ipn/store/kubestore/store_kube_test.go new file mode 100644 index 000000000..f3c5ac9fb --- /dev/null +++ b/ipn/store/kubestore/store_kube_test.go @@ -0,0 +1,183 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package kubestore + +import ( + "context" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "tailscale.com/ipn" + "tailscale.com/ipn/store/mem" + "tailscale.com/kube/kubeapi" + "tailscale.com/kube/kubeclient" +) + +func TestUpdateStateSecret(t *testing.T) { + tests := []struct { + name string + initial map[string][]byte + updates map[string][]byte + wantData map[string][]byte + allowPatch bool + }{ + { + name: "basic_update", + initial: map[string][]byte{ + "existing": []byte("old"), + }, + updates: map[string][]byte{ + "foo": []byte("bar"), + }, + wantData: map[string][]byte{ + "existing": []byte("old"), + "foo": []byte("bar"), + }, + allowPatch: true, + }, + { + name: "update_existing", + initial: map[string][]byte{ + "foo": []byte("old"), + }, + updates: map[string][]byte{ + "foo": []byte("new"), + }, + wantData: map[string][]byte{ + "foo": []byte("new"), + }, + allowPatch: true, + }, + { + name: "multiple_updates", + initial: map[string][]byte{ + "keep": []byte("keep"), + }, + updates: map[string][]byte{ + "foo": []byte("bar"), + "baz": []byte("qux"), + }, + wantData: map[string][]byte{ + "keep": []byte("keep"), + "foo": []byte("bar"), + "baz": []byte("qux"), + }, + allowPatch: true, + }, + { + name: "create_new_secret", + updates: map[string][]byte{ + "foo": []byte("bar"), + }, + wantData: map[string][]byte{ + "foo": []byte("bar"), + }, + allowPatch: true, + }, + { + name: "patch_denied", + initial: map[string][]byte{ + "foo": []byte("old"), + }, + updates: map[string][]byte{ + "foo": []byte("new"), + }, + wantData: map[string][]byte{ + "foo": []byte("new"), + }, + allowPatch: false, + }, + { + name: "sanitize_keys", + initial: map[string][]byte{ + "clean-key": []byte("old"), + }, + updates: map[string][]byte{ + "dirty@key": []byte("new"), + "also/bad": []byte("value"), + "good.key": []byte("keep"), + }, + wantData: map[string][]byte{ + "clean-key": []byte("old"), + "dirty_key": []byte("new"), + "also_bad": []byte("value"), + "good.key": []byte("keep"), + }, + allowPatch: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + secret := tt.initial // track current state + client := &kubeclient.FakeClient{ + GetSecretImpl: func(ctx context.Context, name string) (*kubeapi.Secret, error) { + if secret == nil { + return nil, &kubeapi.Status{Code: 404} + } + return &kubeapi.Secret{Data: secret}, nil + }, + CheckSecretPermissionsImpl: func(ctx context.Context, name string) (bool, bool, error) { + return tt.allowPatch, true, nil + }, + CreateSecretImpl: func(ctx context.Context, s *kubeapi.Secret) error { + secret = s.Data + return nil + }, + UpdateSecretImpl: func(ctx context.Context, s *kubeapi.Secret) error { + secret = s.Data + return nil + }, + JSONPatchResourceImpl: func(ctx context.Context, name, resourceType string, patches []kubeclient.JSONPatch) error { + if !tt.allowPatch { + return &kubeapi.Status{Reason: "Forbidden"} + } + if secret == nil { + secret = make(map[string][]byte) + } + for _, p := range patches { + if p.Op == "add" && p.Path == "/data" { + secret = p.Value.(map[string][]byte) + } else if p.Op == "add" && strings.HasPrefix(p.Path, "/data/") { + key := strings.TrimPrefix(p.Path, "/data/") + secret[key] = p.Value.([]byte) + } + } + return nil + }, + } + + s := &Store{ + client: client, + canPatch: tt.allowPatch, + secretName: "test-secret", + memory: mem.Store{}, + } + + err := s.updateStateSecret(tt.updates) + if err != nil { + t.Errorf("updateStateSecret() error = %v", err) + return + } + + // Verify secret data + if diff := cmp.Diff(secret, tt.wantData); diff != "" { + t.Errorf("secret data mismatch (-got +want):\n%s", diff) + } + + // Verify memory store was updated + for k, v := range tt.updates { + got, err := s.memory.ReadState(ipn.StateKey(k)) + if err != nil { + t.Errorf("reading from memory store: %v", err) + continue + } + if !cmp.Equal(got, v) { + t.Errorf("memory store key %q = %v, want %v", k, got, v) + } + } + }) + } +} diff --git a/kube/kubeclient/fake_client.go b/kube/kubeclient/fake_client.go index 5716ca31b..aea786ea0 100644 --- a/kube/kubeclient/fake_client.go +++ b/kube/kubeclient/fake_client.go @@ -15,6 +15,9 @@ var _ Client = &FakeClient{} type FakeClient struct { GetSecretImpl func(context.Context, string) (*kubeapi.Secret, error) CheckSecretPermissionsImpl func(ctx context.Context, name string) (bool, bool, error) + CreateSecretImpl func(context.Context, *kubeapi.Secret) error + UpdateSecretImpl func(context.Context, *kubeapi.Secret) error + JSONPatchResourceImpl func(context.Context, string, string, []JSONPatch) error } func (fc *FakeClient) CheckSecretPermissions(ctx context.Context, name string) (bool, bool, error) { @@ -33,8 +36,12 @@ func (fc *FakeClient) Event(context.Context, string, string, string) error { return nil } -func (fc *FakeClient) JSONPatchResource(context.Context, string, string, []JSONPatch) error { - return nil +func (fc *FakeClient) JSONPatchResource(ctx context.Context, resource, name string, patches []JSONPatch) error { + return fc.JSONPatchResourceImpl(ctx, resource, name, patches) +} +func (fc *FakeClient) UpdateSecret(ctx context.Context, secret *kubeapi.Secret) error { + return fc.UpdateSecretImpl(ctx, secret) +} +func (fc *FakeClient) CreateSecret(ctx context.Context, secret *kubeapi.Secret) error { + return fc.CreateSecretImpl(ctx, secret) } -func (fc *FakeClient) UpdateSecret(context.Context, *kubeapi.Secret) error { return nil } -func (fc *FakeClient) CreateSecret(context.Context, *kubeapi.Secret) error { return nil } From 6df0aa58bbddedf6c6f0373f9dd2eb0693e01fd8 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Thu, 27 Feb 2025 15:05:04 -0800 Subject: [PATCH 09/87] cmd/containerboot: fix nil pointer exception (#15090) Updates tailscale/tailscale#15081 Signed-off-by: Irbe Krumina --- cmd/containerboot/serve.go | 2 ++ cmd/containerboot/tailscaled.go | 13 +++++++++---- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/cmd/containerboot/serve.go b/cmd/containerboot/serve.go index fbfaba64a..4ea5a9c46 100644 --- a/cmd/containerboot/serve.go +++ b/cmd/containerboot/serve.go @@ -35,6 +35,8 @@ func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan var tickChan <-chan time.Time var eventChan <-chan fsnotify.Event if w, err := fsnotify.NewWatcher(); err != nil { + // Creating a new fsnotify watcher would fail for example if inotify was not able to create a new file descriptor. + // See https://github.com/tailscale/tailscale/issues/15081 log.Printf("serve proxy: failed to create fsnotify watcher, timer-only mode: %v", err) ticker := time.NewTicker(5 * time.Second) defer ticker.Stop() diff --git a/cmd/containerboot/tailscaled.go b/cmd/containerboot/tailscaled.go index e73a7e94d..01ee96d3a 100644 --- a/cmd/containerboot/tailscaled.go +++ b/cmd/containerboot/tailscaled.go @@ -173,11 +173,14 @@ func tailscaleSet(ctx context.Context, cfg *settings) error { func watchTailscaledConfigChanges(ctx context.Context, path string, lc *local.Client, errCh chan<- error) { var ( tickChan <-chan time.Time + eventChan <-chan fsnotify.Event + errChan <-chan error tailscaledCfgDir = filepath.Dir(path) prevTailscaledCfg []byte ) - w, err := fsnotify.NewWatcher() - if err != nil { + if w, err := fsnotify.NewWatcher(); err != nil { + // Creating a new fsnotify watcher would fail for example if inotify was not able to create a new file descriptor. + // See https://github.com/tailscale/tailscale/issues/15081 log.Printf("tailscaled config watch: failed to create fsnotify watcher, timer-only mode: %v", err) ticker := time.NewTicker(5 * time.Second) defer ticker.Stop() @@ -188,6 +191,8 @@ func watchTailscaledConfigChanges(ctx context.Context, path string, lc *local.Cl errCh <- fmt.Errorf("failed to add fsnotify watch: %w", err) return } + eventChan = w.Events + errChan = w.Errors } b, err := os.ReadFile(path) if err != nil { @@ -205,11 +210,11 @@ func watchTailscaledConfigChanges(ctx context.Context, path string, lc *local.Cl select { case <-ctx.Done(): return - case err := <-w.Errors: + case err := <-errChan: errCh <- fmt.Errorf("watcher error: %w", err) return case <-tickChan: - case event := <-w.Events: + case event := <-eventChan: if event.Name != toWatch { continue } From 90273a7f70a16b4f8de0ac0f70ccc39ad4e1c5ff Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Thu, 27 Feb 2025 18:55:46 -0500 Subject: [PATCH 10/87] safesocket: return an error for LocalTCPPortAndToken for tailscaled (#15144) fixes tailscale/corp#26806 Fixes a regression where LocalTCPPortAndToken needs to error out early if we're not running as sandboxed macos so that we attempt to connect using the normal unix machinery. Signed-off-by: Jonathan Nobels --- safesocket/safesocket_darwin.go | 16 +++++++++++----- safesocket/safesocket_darwin_test.go | 3 +++ 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/safesocket/safesocket_darwin.go b/safesocket/safesocket_darwin.go index fbcd7aaa6..f6e46bc50 100644 --- a/safesocket/safesocket_darwin.go +++ b/safesocket/safesocket_darwin.go @@ -37,14 +37,16 @@ type safesocketDarwin struct { sameuserproofFD *os.File // file descriptor for macos app store sameuserproof file sharedDir string // shared directory for location of sameuserproof file - checkConn bool // Check macsys safesocket port before returning it - isMacSysExt func() bool // For testing only to force macsys + checkConn bool // Check macsys safesocket port before returning it + isMacSysExt func() bool // For testing only to force macsys + isSandboxedMacos func() bool // For testing only to force macOS sandbox } var ssd = safesocketDarwin{ - isMacSysExt: version.IsMacSysExt, - checkConn: true, - sharedDir: "/Library/Tailscale", + isMacSysExt: version.IsMacSysExt, + isSandboxedMacos: version.IsSandboxedMacOS, + checkConn: true, + sharedDir: "/Library/Tailscale", } // There are three ways a Darwin binary can be run: as the Mac App Store (macOS) @@ -66,6 +68,10 @@ func localTCPPortAndTokenDarwin() (port int, token string, err error) { ssd.mu.Lock() defer ssd.mu.Unlock() + if !ssd.isSandboxedMacos() { + return 0, "", ErrNoTokenOnOS + } + if ssd.port != 0 && ssd.token != "" { return ssd.port, ssd.token, nil } diff --git a/safesocket/safesocket_darwin_test.go b/safesocket/safesocket_darwin_test.go index 80f0dcddd..465ac0b68 100644 --- a/safesocket/safesocket_darwin_test.go +++ b/safesocket/safesocket_darwin_test.go @@ -17,6 +17,7 @@ import ( func TestSetCredentials(t *testing.T) { wantPort := 123 wantToken := "token" + tstest.Replace(t, &ssd.isSandboxedMacos, func() bool { return true }) SetCredentials(wantToken, wantPort) gotPort, gotToken, err := LocalTCPPortAndToken() @@ -37,6 +38,8 @@ func TestSetCredentials(t *testing.T) { // returns a listener and a non-zero port and non-empty token. func TestInitListenerDarwin(t *testing.T) { temp := t.TempDir() + tstest.Replace(t, &ssd.isSandboxedMacos, func() bool { return true }) + ln, err := InitListenerDarwin(temp) if err != nil || ln == nil { t.Fatalf("InitListenerDarwin failed: %v", err) From 7180812f47c8ebdee2a9837671b7a4b4d376a3f8 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Thu, 27 Feb 2025 13:40:43 -0800 Subject: [PATCH 11/87] licenses: add README Add description of the license reports in this directory and brief instructions for reviewers. I recently needed to convert these to CSV, so I also wanted to place to stash that regex so I didn't lose it. Updates tailscale/corp#5780 Signed-off-by: Will Norris --- licenses/README.md | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 licenses/README.md diff --git a/licenses/README.md b/licenses/README.md new file mode 100644 index 000000000..46fe8b77f --- /dev/null +++ b/licenses/README.md @@ -0,0 +1,35 @@ +# Licenses + +This directory contains a list of dependencies, and their licenses, that are included in the Tailscale clients. +These lists are generated using the [go-licenses] tool to analyze all Go packages in the Tailscale binaries, +as well as a set of custom output templates that includes any additional non-Go dependencies. +For example, the clients for macOS and iOS include some additional Swift libraries. + +These lists are updated roughly every week, so it is possible to see the dependencies in a given release by looking at the release tag. +For example, the dependences for the 1.80.0 release of the macOS client can be seen at +. + +[go-licenses]: https://github.com/google/go-licenses + +## Other formats + +The go-licenses tool can output other formats like CSV, but that wouldn't include the non-Go dependencies. +We can generate a CSV file if that's really needed by running a regex over the markdown files: + +```sh +cat apple.md | grep "^ -" | sed -E "s/- \[(.*)\]\(.*?\) \(\[(.*)\]\((.*)\)\)/\1,\2,\3/" +``` + +## Reviewer instructions + +The majority of changes in this directory are from updating dependency versions. +In that case, only the URL for the license file will change to reflect the new version. +Occasionally, a dependency is added or removed, or the import path is changed. + +New dependencies require the closest review to ensure the license is acceptable. +Because we generate the license reports **after** dependencies are changed, +the new dependency would have already gone through one review when it was initially added. +This is just a secondary review to double-check the license. If in doubt, ask legal. + +Always do a normal GitHub code review on the license PR with a brief summary of what changed. +For example, see #13936 or #14064. Then approve and merge the PR. From 2791b5d5cc7f377da1c0a60e193e8cdaf37cd8b5 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 28 Feb 2025 01:28:08 -0800 Subject: [PATCH 12/87] go.{mod,sum}: bump mkctr (#15161) Updates tailscale/tailscale#15159 Signed-off-by: Irbe Krumina --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e6f3141a0..106538e94 100644 --- a/go.mod +++ b/go.mod @@ -77,7 +77,7 @@ require ( github.com/tailscale/golang-x-crypto v0.0.0-20250218230618-9a281fd8faca github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a - github.com/tailscale/mkctr v0.0.0-20250110151924-54977352e4a6 + github.com/tailscale/mkctr v0.0.0-20250228050937-c75ea1476830 github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc github.com/tailscale/setec v0.0.0-20250205144240-8898a29c3fbb diff --git a/go.sum b/go.sum index 0c8704674..efbf8ae2b 100644 --- a/go.sum +++ b/go.sum @@ -906,8 +906,8 @@ github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05 h1:4chzWmimtJPx github.com/tailscale/goupnp v1.0.1-0.20210804011211-c64d0f06ea05/go.mod h1:PdCqy9JzfWMJf1H5UJW2ip33/d4YkoKN0r67yKH1mG8= github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29XwJucQo73FrleVK6t4kYz4NVhp34Yw= github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= -github.com/tailscale/mkctr v0.0.0-20250110151924-54977352e4a6 h1:9SuADtKJAGQkIpnpg5znEJ86QaxacN25pHkiEXTDjzg= -github.com/tailscale/mkctr v0.0.0-20250110151924-54977352e4a6/go.mod h1:qTslktI+Qh9hXo7ZP8xLkl5V8AxUMfxG0xLtkCFLxnw= +github.com/tailscale/mkctr v0.0.0-20250228050937-c75ea1476830 h1:SwZ72kr1oRzzSPA5PYB4hzPh22UI0nm0dapn3bHaUPs= +github.com/tailscale/mkctr v0.0.0-20250228050937-c75ea1476830/go.mod h1:qTslktI+Qh9hXo7ZP8xLkl5V8AxUMfxG0xLtkCFLxnw= github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU= github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= From 8c2717f96a54d1bf0d543a78afc766913a3cf9ac Mon Sep 17 00:00:00 2001 From: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> Date: Fri, 28 Feb 2025 13:51:07 -0500 Subject: [PATCH 13/87] ipn/ipnlocal: send vipServices info via c2n even it's incomplete (#15166) This commit updates the logic of vipServicesFromPrefsLocked, so that it would return the vipServices list even when service host is only advertising the service but not yet serving anything. This makes control always get accurate state of service host in terms of serving a service. Fixes tailscale/corp#26843 Signed-off-by: KevinLiang10 <37811973+KevinLiang10@users.noreply.github.com> --- ipn/ipnlocal/local.go | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index fec5c166f..4f94a55a1 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -8222,15 +8222,13 @@ func (b *LocalBackend) vipServiceHash(services []*tailcfg.VIPService) string { func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcfg.VIPService { // keyed by service name var services map[tailcfg.ServiceName]*tailcfg.VIPService - if !b.serveConfig.Valid() { - return nil - } - - for svc, config := range b.serveConfig.Services().All() { - mak.Set(&services, svc, &tailcfg.VIPService{ - Name: svc, - Ports: config.ServicePortRange(), - }) + if b.serveConfig.Valid() { + for svc, config := range b.serveConfig.Services().All() { + mak.Set(&services, svc, &tailcfg.VIPService{ + Name: svc, + Ports: config.ServicePortRange(), + }) + } } for _, s := range prefs.AdvertiseServices().All() { From ef906763ee5a7e5e22eaf5336dd020532d9b6964 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Thu, 27 Feb 2025 16:31:56 -0800 Subject: [PATCH 14/87] util/eventbus: initial implementation of an in-process event bus Updates #15160 Signed-off-by: David Anderson Co-authored-by: M. J. Fromberger --- go.mod | 1 + go.sum | 4 + util/eventbus/bus.go | 223 +++++++++++++++++++++++++++++++++++++ util/eventbus/bus_test.go | 196 ++++++++++++++++++++++++++++++++ util/eventbus/doc.go | 100 +++++++++++++++++ util/eventbus/publish.go | 79 +++++++++++++ util/eventbus/queue.go | 83 ++++++++++++++ util/eventbus/subscribe.go | 170 ++++++++++++++++++++++++++++ 8 files changed, 856 insertions(+) create mode 100644 util/eventbus/bus.go create mode 100644 util/eventbus/bus_test.go create mode 100644 util/eventbus/doc.go create mode 100644 util/eventbus/publish.go create mode 100644 util/eventbus/queue.go create mode 100644 util/eventbus/subscribe.go diff --git a/go.mod b/go.mod index 106538e94..970e2e63c 100644 --- a/go.mod +++ b/go.mod @@ -20,6 +20,7 @@ require ( github.com/coder/websocket v1.8.12 github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf + github.com/creachadair/taskgroup v0.13.2 github.com/creack/pty v1.1.23 github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e diff --git a/go.sum b/go.sum index efbf8ae2b..1707effd5 100644 --- a/go.sum +++ b/go.sum @@ -231,6 +231,8 @@ github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7 github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creachadair/mds v0.17.1 h1:lXQbTGKmb3nE3aK6OEp29L1gCx6B5ynzlQ6c1KOBurc= github.com/creachadair/mds v0.17.1/go.mod h1:4b//mUiL8YldH6TImXjmW45myzTLNS1LLjOmrk888eg= +github.com/creachadair/taskgroup v0.13.2 h1:3KyqakBuFsm3KkXi/9XIb0QcA8tEzLHLgaoidf0MdVc= +github.com/creachadair/taskgroup v0.13.2/go.mod h1:i3V1Zx7H8RjwljUEeUWYT30Lmb9poewSb2XI1yTwD0g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= @@ -298,6 +300,8 @@ github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phm github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go new file mode 100644 index 000000000..85d73b15e --- /dev/null +++ b/util/eventbus/bus.go @@ -0,0 +1,223 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus + +import ( + "context" + "reflect" + "slices" + "sync" + + "tailscale.com/util/set" +) + +// Bus is an event bus that distributes published events to interested +// subscribers. +type Bus struct { + write chan any + stop goroutineShutdownControl + snapshot chan chan []any + + topicsMu sync.Mutex // guards everything below. + topics map[reflect.Type][]*Queue + + // Used for introspection/debugging only, not in the normal event + // publishing path. + publishers set.Set[publisher] + queues set.Set[*Queue] +} + +// New returns a new bus. Use [PublisherOf] to make event publishers, +// and [Bus.Queue] and [Subscribe] to make event subscribers. +func New() *Bus { + stopCtl, stopWorker := newGoroutineShutdown() + ret := &Bus{ + write: make(chan any), + stop: stopCtl, + snapshot: make(chan chan []any), + topics: map[reflect.Type][]*Queue{}, + publishers: set.Set[publisher]{}, + queues: set.Set[*Queue]{}, + } + go ret.pump(stopWorker) + return ret +} + +func (b *Bus) pump(stop goroutineShutdownWorker) { + defer stop.Done() + var vals queue + acceptCh := func() chan any { + if vals.Full() { + return nil + } + return b.write + } + for { + // Drain all pending events. Note that while we're draining + // events into subscriber queues, we continue to + // opportunistically accept more incoming events, if we have + // queue space for it. + for !vals.Empty() { + val := vals.Peek() + dests := b.dest(reflect.ValueOf(val).Type()) + for _, d := range dests { + deliverOne: + for { + select { + case d.write <- val: + break deliverOne + case <-d.stop.WaitChan(): + // Queue closed, don't block but continue + // delivering to others. + break deliverOne + case in := <-acceptCh(): + vals.Add(in) + case <-stop.Stop(): + return + case ch := <-b.snapshot: + ch <- vals.Snapshot() + } + } + } + vals.Drop() + } + + // Inbound queue empty, wait for at least 1 work item before + // resuming. + for vals.Empty() { + select { + case <-stop.Stop(): + return + case val := <-b.write: + vals.Add(val) + case ch := <-b.snapshot: + ch <- nil + } + } + } +} + +func (b *Bus) dest(t reflect.Type) []*Queue { + b.topicsMu.Lock() + defer b.topicsMu.Unlock() + return b.topics[t] +} + +func (b *Bus) subscribe(t reflect.Type, q *Queue) (cancel func()) { + b.topicsMu.Lock() + defer b.topicsMu.Unlock() + b.topics[t] = append(b.topics[t], q) + return func() { + b.unsubscribe(t, q) + } +} + +func (b *Bus) unsubscribe(t reflect.Type, q *Queue) { + b.topicsMu.Lock() + defer b.topicsMu.Unlock() + // Topic slices are accessed by pump without holding a lock, so we + // have to replace the entire slice when unsubscribing. + // Unsubscribing should be infrequent enough that this won't + // matter. + i := slices.Index(b.topics[t], q) + if i < 0 { + return + } + b.topics[t] = slices.Delete(slices.Clone(b.topics[t]), i, i+1) +} + +func (b *Bus) Close() { + b.stop.StopAndWait() +} + +// Queue returns a new queue with no subscriptions. Use [Subscribe] to +// atach subscriptions to it. +// +// The queue's name should be a short, human-readable string that +// identifies this queue. The name is only visible through debugging +// APIs. +func (b *Bus) Queue(name string) *Queue { + return newQueue(b, name) +} + +func (b *Bus) addQueue(q *Queue) { + b.topicsMu.Lock() + defer b.topicsMu.Unlock() + b.queues.Add(q) +} + +func (b *Bus) deleteQueue(q *Queue) { + b.topicsMu.Lock() + defer b.topicsMu.Unlock() + b.queues.Delete(q) +} + +func (b *Bus) addPublisher(p publisher) { + b.topicsMu.Lock() + defer b.topicsMu.Unlock() + b.publishers.Add(p) +} + +func (b *Bus) deletePublisher(p publisher) { + b.topicsMu.Lock() + defer b.topicsMu.Unlock() + b.publishers.Delete(p) +} + +func newGoroutineShutdown() (goroutineShutdownControl, goroutineShutdownWorker) { + ctx, cancel := context.WithCancel(context.Background()) + + ctl := goroutineShutdownControl{ + startShutdown: cancel, + shutdownFinished: make(chan struct{}), + } + work := goroutineShutdownWorker{ + startShutdown: ctx.Done(), + shutdownFinished: ctl.shutdownFinished, + } + + return ctl, work +} + +// goroutineShutdownControl is a helper type to manage the shutdown of +// a worker goroutine. The worker goroutine should use the +// goroutineShutdownWorker related to this controller. +type goroutineShutdownControl struct { + startShutdown context.CancelFunc + shutdownFinished chan struct{} +} + +func (ctl *goroutineShutdownControl) Stop() { + ctl.startShutdown() +} + +func (ctl *goroutineShutdownControl) Wait() { + <-ctl.shutdownFinished +} + +func (ctl *goroutineShutdownControl) WaitChan() <-chan struct{} { + return ctl.shutdownFinished +} + +func (ctl *goroutineShutdownControl) StopAndWait() { + ctl.Stop() + ctl.Wait() +} + +// goroutineShutdownWorker is a helper type for a worker goroutine to +// be notified that it should shut down, and to report that shutdown +// has completed. The notification is triggered by the related +// goroutineShutdownControl. +type goroutineShutdownWorker struct { + startShutdown <-chan struct{} + shutdownFinished chan struct{} +} + +func (work *goroutineShutdownWorker) Stop() <-chan struct{} { + return work.startShutdown +} + +func (work *goroutineShutdownWorker) Done() { + close(work.shutdownFinished) +} diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go new file mode 100644 index 000000000..180f4164a --- /dev/null +++ b/util/eventbus/bus_test.go @@ -0,0 +1,196 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus_test + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/creachadair/taskgroup" + "github.com/google/go-cmp/cmp" + "tailscale.com/util/eventbus" +) + +type EventA struct { + Counter int +} + +type EventB struct { + Counter int +} + +func TestBus(t *testing.T) { + b := eventbus.New() + defer b.Close() + + q := b.Queue("TestBus") + defer q.Close() + s := eventbus.Subscribe[EventA](q) + + go func() { + pa := eventbus.PublisherOf[EventA](b, "TestBusA") + defer pa.Close() + pb := eventbus.PublisherOf[EventB](b, "TestBusB") + defer pb.Close() + pa.Publish(EventA{1}) + pb.Publish(EventB{2}) + pa.Publish(EventA{3}) + }() + + want := expectEvents(t, EventA{1}, EventA{3}) + for !want.Empty() { + select { + case got := <-s.Events(): + want.Got(got) + case <-q.Done(): + t.Fatalf("queue closed unexpectedly") + case <-time.After(time.Second): + t.Fatalf("timed out waiting for event") + } + } +} + +func TestBusMultipleConsumers(t *testing.T) { + b := eventbus.New() + defer b.Close() + + q1 := b.Queue("TestBusA") + defer q1.Close() + s1 := eventbus.Subscribe[EventA](q1) + + q2 := b.Queue("TestBusAB") + defer q2.Close() + s2A := eventbus.Subscribe[EventA](q2) + s2B := eventbus.Subscribe[EventB](q2) + + go func() { + pa := eventbus.PublisherOf[EventA](b, "TestBusA") + defer pa.Close() + pb := eventbus.PublisherOf[EventB](b, "TestBusB") + defer pb.Close() + pa.Publish(EventA{1}) + pb.Publish(EventB{2}) + pa.Publish(EventA{3}) + }() + + wantA := expectEvents(t, EventA{1}, EventA{3}) + wantB := expectEvents(t, EventA{1}, EventB{2}, EventA{3}) + for !wantA.Empty() || !wantB.Empty() { + select { + case got := <-s1.Events(): + wantA.Got(got) + case got := <-s2A.Events(): + wantB.Got(got) + case got := <-s2B.Events(): + wantB.Got(got) + case <-q1.Done(): + t.Fatalf("queue closed unexpectedly") + case <-q2.Done(): + t.Fatalf("queue closed unexpectedly") + case <-time.After(time.Second): + t.Fatalf("timed out waiting for event") + } + } +} + +func TestSpam(t *testing.T) { + b := eventbus.New() + defer b.Close() + + const ( + publishers = 100 + eventsPerPublisher = 20 + wantEvents = publishers * eventsPerPublisher + subscribers = 100 + ) + + var g taskgroup.Group + + received := make([][]EventA, subscribers) + for i := range subscribers { + q := b.Queue(fmt.Sprintf("Subscriber%d", i)) + defer q.Close() + s := eventbus.Subscribe[EventA](q) + g.Go(func() error { + for range wantEvents { + select { + case evt := <-s.Events(): + received[i] = append(received[i], evt) + case <-q.Done(): + t.Errorf("queue done before expected number of events received") + return errors.New("queue prematurely closed") + case <-time.After(5 * time.Second): + t.Errorf("timed out waiting for expected bus event after %d events", len(received[i])) + return errors.New("timeout") + } + } + return nil + }) + } + + published := make([][]EventA, publishers) + for i := range publishers { + g.Run(func() { + p := eventbus.PublisherOf[EventA](b, fmt.Sprintf("Publisher%d", i)) + for j := range eventsPerPublisher { + evt := EventA{i*eventsPerPublisher + j} + p.Publish(evt) + published[i] = append(published[i], evt) + } + }) + } + + if err := g.Wait(); err != nil { + t.Fatal(err) + } + var last []EventA + for i, got := range received { + if len(got) != wantEvents { + // Receiving goroutine already reported an error, we just need + // to fail early within the main test goroutine. + t.FailNow() + } + if last == nil { + continue + } + if diff := cmp.Diff(got, last); diff != "" { + t.Errorf("Subscriber %d did not see the same events as %d (-got+want):\n%s", i, i-1, diff) + } + last = got + } + for i, sent := range published { + if got := len(sent); got != eventsPerPublisher { + t.Fatalf("Publisher %d sent %d events, want %d", i, got, eventsPerPublisher) + } + } + + // TODO: check that the published sequences are proper + // subsequences of the received slices. +} + +type queueChecker struct { + t *testing.T + want []any +} + +func expectEvents(t *testing.T, want ...any) *queueChecker { + return &queueChecker{t, want} +} + +func (q *queueChecker) Got(v any) { + q.t.Helper() + if q.Empty() { + q.t.Fatalf("queue got unexpected %v", v) + } + if v != q.want[0] { + q.t.Fatalf("queue got %#v, want %#v", v, q.want[0]) + } + q.want = q.want[1:] +} + +func (q *queueChecker) Empty() bool { + return len(q.want) == 0 +} diff --git a/util/eventbus/doc.go b/util/eventbus/doc.go new file mode 100644 index 000000000..136823c42 --- /dev/null +++ b/util/eventbus/doc.go @@ -0,0 +1,100 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package eventbus provides an in-process event bus. +// +// The event bus connects publishers of typed events with subscribers +// interested in those events. +// +// # Usage +// +// To publish events, use [PublisherOf] to get a typed publisher for +// your event type, then call [Publisher.Publish] as needed. If your +// event is expensive to construct, you can optionally use +// [Publisher.ShouldPublish] to skip the work if nobody is listening +// for the event. +// +// To receive events, first use [Bus.Queue] to create an event +// delivery queue, then use [Subscribe] to get a [Subscriber] for each +// event type you're interested in. Receive the events themselves by +// selecting over all your [Subscriber.Chan] channels, as well as +// [Queue.Done] for shutdown notifications. +// +// # Concurrency properties +// +// The bus serializes all published events, and preserves that +// ordering when delivering to subscribers that are attached to the +// same Queue. In more detail: +// +// - An event is published to the bus at some instant between the +// start and end of the call to [Publisher.Publish]. +// - Events cannot be published at the same instant, and so are +// totally ordered by their publication time. Given two events E1 +// and E2, either E1 happens before E2, or E2 happens before E1. +// - Queues dispatch events to their Subscribers in publication +// order: if E1 happens before E2, the queue always delivers E1 +// before E2. +// - Queues do not synchronize with each other: given queues Q1 and +// Q2, both subscribed to events E1 and E2, Q1 may deliver both E1 +// and E2 before Q2 delivers E1. +// +// Less formally: there is one true timeline of all published events. +// If you make a Queue and subscribe to events on it, you will receive +// those events one at a time, in the same order as the one true +// timeline. You will "skip over" events you didn't subscribe to, but +// your view of the world always moves forward in time, never +// backwards, and you will observe events in the same order as +// everyone else. +// +// However, you cannot assume that what your subscribers on your queue +// see as "now" is the same as what other subscribers on other +// queues. Their queue may be further behind you in the timeline, or +// running ahead of you. This means you should be careful about +// reaching out to another component directly after receiving an +// event, as its view of the world may not yet (or ever) be exactly +// consistent with yours. +// +// To make your code more testable and understandable, you should try +// to structure it following the actor model: you have some local +// state over which you have authority, but your only way to interact +// with state elsewhere in the program is to receive and process +// events coming from elsewhere, or to emit events of your own. +// +// # Expected subscriber behavior +// +// Subscribers are expected to promptly receive their events on +// [Subscriber.Chan]. The bus has a small, fixed amount of internal +// buffering, meaning that a slow subscriber will eventually cause +// backpressure and block publication of all further events. +// +// In general, you should receive from your subscriber(s) in a loop, +// and only do fast state updates within that loop. Any heavier work +// should be offloaded to another goroutine. +// +// Causing publishers to block from backpressure is considered a bug +// in the slow subscriber causing the backpressure, and should be +// addressed there. Publishers should assume that Publish will not +// block for extended periods of time, and should not make exceptional +// effort to behave gracefully if they do get blocked. +// +// These blocking semantics are provisional and subject to +// change. Please speak up if this causes development pain, so that we +// can adapt the semantics to better suit our needs. +// +// # Debugging facilities +// +// (TODO, not implemented yet, sorry, I promise we're working on it next!) +// +// The bus comes with introspection facilities to help reason about +// the state of the client, and diagnose issues such as slow +// subscribers. +// +// The bus provide a tsweb debugging page that shows the current state +// of the bus, including all publishers, subscribers, and queued +// events. +// +// The bus also has a snooping and tracing facility, which lets you +// observe all events flowing through the bus, along with their +// source, destination(s) and timing information such as the time of +// delivery to each subscriber and end-to-end bus delays. +package eventbus diff --git a/util/eventbus/publish.go b/util/eventbus/publish.go new file mode 100644 index 000000000..14828812b --- /dev/null +++ b/util/eventbus/publish.go @@ -0,0 +1,79 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus + +import ( + "context" + "reflect" +) + +// publisher is a uniformly typed wrapper around Publisher[T], so that +// debugging facilities can look at active publishers. +type publisher interface { + publisherName() string +} + +// A Publisher publishes events on the bus. +type Publisher[T any] struct { + bus *Bus + name string + stopCtx context.Context + stop context.CancelFunc +} + +// PublisherOf returns a publisher for event type T on the given bus. +// +// The publisher's name should be a short, human-readable string that +// identifies this event publisher. The name is only visible through +// debugging APIs. +func PublisherOf[T any](b *Bus, name string) *Publisher[T] { + ctx, cancel := context.WithCancel(context.Background()) + ret := &Publisher[T]{ + bus: b, + name: name, + stopCtx: ctx, + stop: cancel, + } + b.addPublisher(ret) + return ret +} + +func (p *Publisher[T]) publisherName() string { return p.name } + +// Publish publishes event v on the bus. +func (p *Publisher[T]) Publish(v T) { + // Check for just a stopped publisher or bus before trying to + // write, so that once closed Publish consistently does nothing. + select { + case <-p.stopCtx.Done(): + return + case <-p.bus.stop.WaitChan(): + return + default: + } + + select { + case p.bus.write <- v: + case <-p.stopCtx.Done(): + case <-p.bus.stop.WaitChan(): + } +} + +// ShouldPublish reports whether anyone is subscribed to events of +// type T. +// +// ShouldPublish can be used to skip expensive event construction if +// nobody seems to care. Publishers must not assume that someone will +// definitely receive an event if ShouldPublish returns true. +func (p *Publisher[T]) ShouldPublish() bool { + dests := p.bus.dest(reflect.TypeFor[T]()) + return len(dests) > 0 +} + +// Close closes the publisher, indicating that no further events will +// be published with it. +func (p *Publisher[T]) Close() { + p.stop() + p.bus.deletePublisher(p) +} diff --git a/util/eventbus/queue.go b/util/eventbus/queue.go new file mode 100644 index 000000000..8f6bda748 --- /dev/null +++ b/util/eventbus/queue.go @@ -0,0 +1,83 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus + +import ( + "slices" +) + +const maxQueuedItems = 16 + +// queue is an ordered queue of length up to maxQueuedItems. +type queue struct { + vals []any + start int +} + +// canAppend reports whether a value can be appended to q.vals without +// shifting values around. +func (q *queue) canAppend() bool { + return cap(q.vals) < maxQueuedItems || len(q.vals) < cap(q.vals) +} + +func (q *queue) Full() bool { + return q.start == 0 && !q.canAppend() +} + +func (q *queue) Empty() bool { + return q.start == len(q.vals) +} + +func (q *queue) Len() int { + return len(q.vals) - q.start +} + +// Add adds v to the end of the queue. Blocks until append can be +// done. +func (q *queue) Add(v any) { + if !q.canAppend() { + if q.start == 0 { + panic("Add on a full queue") + } + + // Slide remaining values back to the start of the array. + n := copy(q.vals, q.vals[q.start:]) + toClear := len(q.vals) - n + clear(q.vals[len(q.vals)-toClear:]) + q.vals = q.vals[:n] + q.start = 0 + } + + q.vals = append(q.vals, v) +} + +// Peek returns the first value in the queue, without removing it from +// the queue, or nil if the queue is empty. +func (q *queue) Peek() any { + if q.Empty() { + return nil + } + + return q.vals[q.start] +} + +// Drop discards the first value in the queue, if any. +func (q *queue) Drop() { + if q.Empty() { + return + } + + q.vals[q.start] = nil + q.start++ + if q.Empty() { + // Reset cursor to start of array, it's free to do. + q.start = 0 + q.vals = q.vals[:0] + } +} + +// Snapshot returns a copy of the queue's contents. +func (q *queue) Snapshot() []any { + return slices.Clone(q.vals[q.start:]) +} diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go new file mode 100644 index 000000000..ade834d77 --- /dev/null +++ b/util/eventbus/subscribe.go @@ -0,0 +1,170 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus + +import ( + "fmt" + "reflect" + "sync" +) + +type dispatchFn func(vals *queue, stop goroutineShutdownWorker, acceptCh func() chan any) bool + +// A Queue receives events from a Bus. +// +// To receive events through the queue, see [Subscribe]. Subscribers +// that share the same Queue receive events one at time, in the order +// they were published. +type Queue struct { + bus *Bus + name string + + write chan any + stop goroutineShutdownControl + snapshot chan chan []any + + outputsMu sync.Mutex + outputs map[reflect.Type]dispatchFn +} + +func newQueue(b *Bus, name string) *Queue { + stopCtl, stopWorker := newGoroutineShutdown() + ret := &Queue{ + bus: b, + name: name, + write: make(chan any), + stop: stopCtl, + snapshot: make(chan chan []any), + outputs: map[reflect.Type]dispatchFn{}, + } + b.addQueue(ret) + go ret.pump(stopWorker) + return ret +} + +func (q *Queue) pump(stop goroutineShutdownWorker) { + defer stop.Done() + var vals queue + acceptCh := func() chan any { + if vals.Full() { + return nil + } + return q.write + } + for { + if !vals.Empty() { + val := vals.Peek() + fn := q.dispatchFn(val) + if fn == nil { + // Raced with unsubscribe. + vals.Drop() + continue + } + if !fn(&vals, stop, acceptCh) { + return + } + } else { + // Keep the cases in this select in sync with + // Subscriber.dispatch below. The only different should be + // that this select doesn't deliver queued values to + // anyone, and unconditionally accepts new values. + select { + case val := <-q.write: + vals.Add(val) + case <-stop.Stop(): + return + case ch := <-q.snapshot: + ch <- vals.Snapshot() + } + } + } +} + +// A Subscriber delivers one type of event from a [Queue]. +type Subscriber[T any] struct { + recv *Queue + read chan T +} + +func (s *Subscriber[T]) dispatch(vals *queue, stop goroutineShutdownWorker, acceptCh func() chan any) bool { + t := vals.Peek().(T) + for { + // Keep the cases in this select in sync with Queue.pump + // above. The only different should be that this select + // delivers a value on s.read. + select { + case s.read <- t: + vals.Drop() + return true + case val := <-acceptCh(): + vals.Add(val) + case <-stop.Stop(): + return false + case ch := <-s.recv.snapshot: + ch <- vals.Snapshot() + } + } +} + +// Events returns a channel on which the subscriber's events are +// delivered. +func (s *Subscriber[T]) Events() <-chan T { + return s.read +} + +// Close shuts down the Subscriber, indicating the caller no longer +// wishes to receive these events. After Close, receives on +// [Subscriber.Chan] block for ever. +func (s *Subscriber[T]) Close() { + t := reflect.TypeFor[T]() + s.recv.bus.unsubscribe(t, s.recv) + s.recv.deleteDispatchFn(t) +} + +func (q *Queue) dispatchFn(val any) dispatchFn { + q.outputsMu.Lock() + defer q.outputsMu.Unlock() + return q.outputs[reflect.ValueOf(val).Type()] +} + +func (q *Queue) addDispatchFn(t reflect.Type, fn dispatchFn) { + q.outputsMu.Lock() + defer q.outputsMu.Unlock() + if q.outputs[t] != nil { + panic(fmt.Errorf("double subscription for event %s", t)) + } + q.outputs[t] = fn +} + +func (q *Queue) deleteDispatchFn(t reflect.Type) { + q.outputsMu.Lock() + defer q.outputsMu.Unlock() + delete(q.outputs, t) +} + +// Done returns a channel that is closed when the Queue is closed. +func (q *Queue) Done() <-chan struct{} { + return q.stop.WaitChan() +} + +// Close closes the queue. All Subscribers attached to the queue are +// implicitly closed, and any pending events are discarded. +func (q *Queue) Close() { + q.stop.StopAndWait() + q.bus.deleteQueue(q) +} + +// Subscribe requests delivery of events of type T through the given +// Queue. Panics if the queue already has a subscriber for T. +func Subscribe[T any](r *Queue) Subscriber[T] { + t := reflect.TypeFor[T]() + ret := Subscriber[T]{ + recv: r, + read: make(chan T), + } + r.addDispatchFn(t, ret.dispatch) + r.bus.subscribe(t, r) + + return ret +} From 74d7d8a77b14abb8ce31c9c81f5b2dbee03eec96 Mon Sep 17 00:00:00 2001 From: Lee Briggs Date: Fri, 24 Jan 2025 11:15:28 -0800 Subject: [PATCH 15/87] ipn/store/awsstore: allow providing a KMS key Implements a KMS input for AWS parameter to support encrypting Tailscale state Fixes #14765 Change-Id: I39c0fae4bfd60a9aec17c5ea6a61d0b57143d4ba Co-authored-by: Brad Fitzpatrick Signed-off-by: Lee Briggs --- ipn/store/awsstore/store_aws.go | 111 ++++++++++++++++++++++----- ipn/store/awsstore/store_aws_stub.go | 18 ----- ipn/store/awsstore/store_aws_test.go | 61 ++++++++++++++- ipn/store/store_aws.go | 10 ++- 4 files changed, 157 insertions(+), 43 deletions(-) delete mode 100644 ipn/store/awsstore/store_aws_stub.go diff --git a/ipn/store/awsstore/store_aws.go b/ipn/store/awsstore/store_aws.go index 0fb78d45a..40bbbf037 100644 --- a/ipn/store/awsstore/store_aws.go +++ b/ipn/store/awsstore/store_aws.go @@ -10,7 +10,9 @@ import ( "context" "errors" "fmt" + "net/url" "regexp" + "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/arn" @@ -28,6 +30,14 @@ const ( var parameterNameRx = regexp.MustCompile(parameterNameRxStr) +// Option defines a functional option type for configuring awsStore. +type Option func(*storeOptions) + +// storeOptions holds optional settings for creating a new awsStore. +type storeOptions struct { + kmsKey string +} + // awsSSMClient is an interface allowing us to mock the couple of // API calls we are leveraging with the AWSStore provider type awsSSMClient interface { @@ -46,6 +56,10 @@ type awsStore struct { ssmClient awsSSMClient ssmARN arn.ARN + // kmsKey is optional. If empty, the parameter is stored in plaintext. + // If non-empty, the parameter is encrypted with this KMS key. + kmsKey string + memory mem.Store } @@ -57,30 +71,80 @@ type awsStore struct { // Tailscaled to only only store new state in-memory and // restarting Tailscaled can fail until you delete your state // from the AWS Parameter Store. -func New(_ logger.Logf, ssmARN string) (ipn.StateStore, error) { - return newStore(ssmARN, nil) +// +// If you want to specify an optional KMS key, +// pass one or more Option objects, e.g. awsstore.WithKeyID("alias/my-key"). +func New(_ logger.Logf, ssmARN string, opts ...Option) (ipn.StateStore, error) { + // Apply all options to an empty storeOptions + var so storeOptions + for _, opt := range opts { + opt(&so) + } + + return newStore(ssmARN, so, nil) +} + +// WithKeyID sets the KMS key to be used for encryption. It can be +// a KeyID, an alias ("alias/my-key"), or a full ARN. +// +// If kmsKey is empty, the Option is a no-op. +func WithKeyID(kmsKey string) Option { + return func(o *storeOptions) { + o.kmsKey = kmsKey + } +} + +// ParseARNAndOpts parses an ARN and optional URL-encoded parameters +// from arg. +func ParseARNAndOpts(arg string) (ssmARN string, opts []Option, err error) { + ssmARN = arg + + // Support optional ?url-encoded-parameters. + if s, q, ok := strings.Cut(arg, "?"); ok { + ssmARN = s + q, err := url.ParseQuery(q) + if err != nil { + return "", nil, err + } + + for k := range q { + switch k { + default: + return "", nil, fmt.Errorf("unknown arn option parameter %q", k) + case "kmsKey": + // We allow an ARN, a key ID, or an alias name for kmsKeyID. + // If it doesn't look like an ARN and doesn't have a '/', + // prepend "alias/" for KMS alias references. + kmsKey := q.Get(k) + if kmsKey != "" && + !strings.Contains(kmsKey, "/") && + !strings.HasPrefix(kmsKey, "arn:") { + kmsKey = "alias/" + kmsKey + } + if kmsKey != "" { + opts = append(opts, WithKeyID(kmsKey)) + } + } + } + } + return ssmARN, opts, nil } // newStore is NewStore, but for tests. If client is non-nil, it's // used instead of making one. -func newStore(ssmARN string, client awsSSMClient) (ipn.StateStore, error) { +func newStore(ssmARN string, so storeOptions, client awsSSMClient) (ipn.StateStore, error) { s := &awsStore{ ssmClient: client, + kmsKey: so.kmsKey, } var err error - - // Parse the ARN if s.ssmARN, err = arn.Parse(ssmARN); err != nil { return nil, fmt.Errorf("unable to parse the ARN correctly: %v", err) } - - // Validate the ARN corresponds to the SSM service if s.ssmARN.Service != "ssm" { return nil, fmt.Errorf("invalid service %q, expected 'ssm'", s.ssmARN.Service) } - - // Validate the ARN corresponds to a parameter store resource if !parameterNameRx.MatchString(s.ssmARN.Resource) { return nil, fmt.Errorf("invalid resource %q, expected to match %v", s.ssmARN.Resource, parameterNameRxStr) } @@ -96,12 +160,11 @@ func newStore(ssmARN string, client awsSSMClient) (ipn.StateStore, error) { s.ssmClient = ssm.NewFromConfig(cfg) } - // Hydrate cache with the potentially current state + // Preload existing state, if any if err := s.LoadState(); err != nil { return nil, err } return s, nil - } // LoadState attempts to read the state from AWS SSM parameter store key. @@ -172,15 +235,21 @@ func (s *awsStore) persistState() error { // which is free. However, if it exceeds 4kb it switches the parameter to advanced tiering // doubling the capacity to 8kb per the following docs: // https://aws.amazon.com/about-aws/whats-new/2019/08/aws-systems-manager-parameter-store-announces-intelligent-tiering-to-enable-automatic-parameter-tier-selection/ - _, err = s.ssmClient.PutParameter( - context.TODO(), - &ssm.PutParameterInput{ - Name: aws.String(s.ParameterName()), - Value: aws.String(string(bs)), - Overwrite: aws.Bool(true), - Tier: ssmTypes.ParameterTierIntelligentTiering, - Type: ssmTypes.ParameterTypeSecureString, - }, - ) + in := &ssm.PutParameterInput{ + Name: aws.String(s.ParameterName()), + Value: aws.String(string(bs)), + Overwrite: aws.Bool(true), + Tier: ssmTypes.ParameterTierIntelligentTiering, + Type: ssmTypes.ParameterTypeSecureString, + } + + // If kmsKey is specified, encrypt with that key + // NOTE: this input allows any alias, keyID or ARN + // If this isn't specified, AWS will use the default KMS key + if s.kmsKey != "" { + in.KeyId = aws.String(s.kmsKey) + } + + _, err = s.ssmClient.PutParameter(context.TODO(), in) return err } diff --git a/ipn/store/awsstore/store_aws_stub.go b/ipn/store/awsstore/store_aws_stub.go deleted file mode 100644 index 8d2156ce9..000000000 --- a/ipn/store/awsstore/store_aws_stub.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (c) Tailscale Inc & AUTHORS -// SPDX-License-Identifier: BSD-3-Clause - -//go:build !linux || ts_omit_aws - -package awsstore - -import ( - "fmt" - "runtime" - - "tailscale.com/ipn" - "tailscale.com/types/logger" -) - -func New(logger.Logf, string) (ipn.StateStore, error) { - return nil, fmt.Errorf("AWS store is not supported on %v", runtime.GOOS) -} diff --git a/ipn/store/awsstore/store_aws_test.go b/ipn/store/awsstore/store_aws_test.go index f6c8fedb3..3382635a7 100644 --- a/ipn/store/awsstore/store_aws_test.go +++ b/ipn/store/awsstore/store_aws_test.go @@ -1,7 +1,7 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause -//go:build linux +//go:build linux && !ts_omit_aws package awsstore @@ -65,7 +65,11 @@ func TestNewAWSStore(t *testing.T) { Resource: "parameter/foo", } - s, err := newStore(storeParameterARN.String(), mc) + opts := storeOptions{ + kmsKey: "arn:aws:kms:eu-west-1:123456789:key/MyCustomKey", + } + + s, err := newStore(storeParameterARN.String(), opts, mc) if err != nil { t.Fatalf("creating aws store failed: %v", err) } @@ -73,7 +77,7 @@ func TestNewAWSStore(t *testing.T) { // Build a brand new file store and check that both IDs written // above are still there. - s2, err := newStore(storeParameterARN.String(), mc) + s2, err := newStore(storeParameterARN.String(), opts, mc) if err != nil { t.Fatalf("creating second aws store failed: %v", err) } @@ -162,3 +166,54 @@ func testStoreSemantics(t *testing.T, store ipn.StateStore) { } } } + +func TestParseARNAndOpts(t *testing.T) { + tests := []struct { + name string + arg string + wantARN string + wantKey string + }{ + { + name: "no-key", + arg: "arn:aws:ssm:us-east-1:123456789012:parameter/myTailscaleParam", + wantARN: "arn:aws:ssm:us-east-1:123456789012:parameter/myTailscaleParam", + }, + { + name: "custom-key", + arg: "arn:aws:ssm:us-east-1:123456789012:parameter/myTailscaleParam?kmsKey=alias/MyCustomKey", + wantARN: "arn:aws:ssm:us-east-1:123456789012:parameter/myTailscaleParam", + wantKey: "alias/MyCustomKey", + }, + { + name: "bare-name", + arg: "arn:aws:ssm:us-east-1:123456789012:parameter/myTailscaleParam?kmsKey=Bare", + wantARN: "arn:aws:ssm:us-east-1:123456789012:parameter/myTailscaleParam", + wantKey: "alias/Bare", + }, + { + name: "arn-arg", + arg: "arn:aws:ssm:us-east-1:123456789012:parameter/myTailscaleParam?kmsKey=arn:foo", + wantARN: "arn:aws:ssm:us-east-1:123456789012:parameter/myTailscaleParam", + wantKey: "arn:foo", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + arn, opts, err := ParseARNAndOpts(tt.arg) + if err != nil { + t.Fatalf("New: %v", err) + } + if arn != tt.wantARN { + t.Errorf("ARN = %q; want %q", arn, tt.wantARN) + } + var got storeOptions + for _, opt := range opts { + opt(&got) + } + if got.kmsKey != tt.wantKey { + t.Errorf("kmsKey = %q; want %q", got.kmsKey, tt.wantKey) + } + }) + } +} diff --git a/ipn/store/store_aws.go b/ipn/store/store_aws.go index e164f9de7..d39e84319 100644 --- a/ipn/store/store_aws.go +++ b/ipn/store/store_aws.go @@ -6,7 +6,9 @@ package store import ( + "tailscale.com/ipn" "tailscale.com/ipn/store/awsstore" + "tailscale.com/types/logger" ) func init() { @@ -14,5 +16,11 @@ func init() { } func registerAWSStore() { - Register("arn:", awsstore.New) + Register("arn:", func(logf logger.Logf, arg string) (ipn.StateStore, error) { + ssmARN, opts, err := awsstore.ParseARNAndOpts(arg) + if err != nil { + return nil, err + } + return awsstore.New(logf, ssmARN, opts...) + }) } From dc18091678ebf3928bf3ead518f2d6e979547526 Mon Sep 17 00:00:00 2001 From: kari-ts <135075563+kari-ts@users.noreply.github.com> Date: Fri, 28 Feb 2025 14:17:28 -0800 Subject: [PATCH 16/87] ipn: update AddPeer to include TaildropTarget (#15091) We previously were not merging in the TaildropTarget into the PeerStatus because we did not update AddPeer. Updates tailscale/tailscale#14393 Signed-off-by: kari-ts --- ipn/ipnlocal/local.go | 2 +- ipn/ipnstate/ipnstate.go | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 4f94a55a1..74796a62a 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -6709,7 +6709,7 @@ func (b *LocalBackend) FileTargets() ([]*apitype.FileTarget, error) { } func (b *LocalBackend) taildropTargetStatus(p tailcfg.NodeView) ipnstate.TaildropTargetStatus { - if b.netMap == nil || b.state != ipn.Running { + if b.state != ipn.Running { return ipnstate.TaildropTargetIpnStateNotRunning } if b.netMap == nil { diff --git a/ipn/ipnstate/ipnstate.go b/ipn/ipnstate/ipnstate.go index bc1ba615d..89c6d7e24 100644 --- a/ipn/ipnstate/ipnstate.go +++ b/ipn/ipnstate/ipnstate.go @@ -216,6 +216,11 @@ type PeerStatusLite struct { } // PeerStatus describes a peer node and its current state. +// WARNING: The fields in PeerStatus are merged by the AddPeer method in the StatusBuilder. +// When adding a new field to PeerStatus, you must update AddPeer to handle merging +// the new field. The AddPeer function is responsible for combining multiple updates +// to the same peer, and any new field that is not merged properly may lead to +// inconsistencies or lost data in the peer status. type PeerStatus struct { ID tailcfg.StableNodeID PublicKey key.NodePublic @@ -533,6 +538,9 @@ func (sb *StatusBuilder) AddPeer(peer key.NodePublic, st *PeerStatus) { if v := st.Capabilities; v != nil { e.Capabilities = v } + if v := st.TaildropTarget; v != TaildropTargetUnknown { + e.TaildropTarget = v + } e.Location = st.Location } From 986daca5eeeffa04bdb184d1ee13f70d04d33ff1 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Sun, 2 Mar 2025 10:22:15 -0800 Subject: [PATCH 17/87] scripts/installer.sh: explicitly chmod 0644 installed files (#15171) Updates tailscale/tailscale#15133 Signed-off-by: Irbe Krumina --- scripts/installer.sh | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/scripts/installer.sh b/scripts/installer.sh index 388dd5a56..f3671aff8 100755 --- a/scripts/installer.sh +++ b/scripts/installer.sh @@ -7,14 +7,6 @@ set -eu -# Ensure that this script runs with the default umask for Linux. In practice, -# this means that files created by this script (such as keyring files) will be -# created with 644 permissions. This ensures that keyrings and other files -# created by this script are readable by installers on systems where the -# umask is set to a more restrictive value. -# See https://github.com/tailscale/tailscale/issues/15133 -umask 022 - # All the code is wrapped in a main function that gets called at the # bottom of the file, so that a truncated partial download doesn't end # up executing half a script. @@ -501,10 +493,13 @@ main() { legacy) $CURL "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION.asc" | $SUDO apt-key add - $CURL "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION.list" | $SUDO tee /etc/apt/sources.list.d/tailscale.list + $SUDO chmod 0644 /etc/apt/sources.list.d/tailscale.list ;; keyring) $CURL "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION.noarmor.gpg" | $SUDO tee /usr/share/keyrings/tailscale-archive-keyring.gpg >/dev/null + $SUDO chmod 0644 /usr/share/keyrings/tailscale-archive-keyring.gpg $CURL "https://pkgs.tailscale.com/$TRACK/$OS/$VERSION.tailscale-keyring.list" | $SUDO tee /etc/apt/sources.list.d/tailscale.list + $SUDO chmod 0644 /etc/apt/sources.list.d/tailscale.list ;; esac $SUDO apt-get update From a567f56445d523a89922253ae4902ad19e71c1be Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Mon, 3 Mar 2025 08:04:18 -0800 Subject: [PATCH 18/87] ipn/store/kubestore: sanitize keys loaded to in-memory store (#15178) Reads use the sanitized form, so unsanitized keys being stored in memory resulted lookup failures, for example for serve config. Updates tailscale/tailscale#15134 Signed-off-by: Irbe Krumina --- ipn/store/kubestore/store_kube.go | 9 +++++++-- ipn/store/kubestore/store_kube_test.go | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index b4e14c6d3..ecd101c57 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -98,7 +98,11 @@ func (s *Store) updateStateSecret(data map[string][]byte) (err error) { defer func() { if err == nil { for id, bs := range data { - s.memory.WriteState(ipn.StateKey(id), bs) + // The in-memory store does not distinguish between values read from state Secret on + // init and values written to afterwards. Values read from the state + // Secret will always be sanitized, so we also need to sanitize values written to store + // later, so that the Read logic can just lookup keys in sanitized form. + s.memory.WriteState(ipn.StateKey(sanitizeKey(id)), bs) } } if err != nil { @@ -198,8 +202,9 @@ func (s *Store) loadState() (err error) { return nil } -// sanitizeKey converts any value that can be converted to a string into a valid Kubernetes secret key. +// sanitizeKey converts any value that can be converted to a string into a valid Kubernetes Secret key. // Valid characters are alphanumeric, -, _, and . +// https://kubernetes.io/docs/concepts/configuration/secret/#restriction-names-data. func sanitizeKey[T ~string](k T) string { return strings.Map(func(r rune) rune { if r >= 'a' && r <= 'z' || r >= 'A' && r <= 'Z' || r >= '0' && r <= '9' || r == '-' || r == '_' || r == '.' { diff --git a/ipn/store/kubestore/store_kube_test.go b/ipn/store/kubestore/store_kube_test.go index f3c5ac9fb..351458efb 100644 --- a/ipn/store/kubestore/store_kube_test.go +++ b/ipn/store/kubestore/store_kube_test.go @@ -169,7 +169,7 @@ func TestUpdateStateSecret(t *testing.T) { // Verify memory store was updated for k, v := range tt.updates { - got, err := s.memory.ReadState(ipn.StateKey(k)) + got, err := s.memory.ReadState(ipn.StateKey(sanitizeKey(k))) if err != nil { t.Errorf("reading from memory store: %v", err) continue From ce6ce81311cc53df4498f2e8757b52be50801d64 Mon Sep 17 00:00:00 2001 From: Percy Wegmann Date: Fri, 28 Feb 2025 18:30:14 -0600 Subject: [PATCH 19/87] ipn/ipnlocal: initialize Taildrive shares when starting backend Previously, it initialized when the backend was created. This caused two problems: 1. It would not properly switch when changing profiles. 2. If the backend was created before the profile had been selected, Taildrive's shares were uninitialized. Updates #14825 Signed-off-by: Percy Wegmann --- ipn/ipnlocal/local.go | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 74796a62a..1ce299371 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -618,19 +618,6 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo } } - // initialize Taildrive shares from saved state - fs, ok := b.sys.DriveForRemote.GetOK() - if ok { - currentShares := b.pm.prefs.DriveShares() - if currentShares.Len() > 0 { - var shares []*drive.Share - for _, share := range currentShares.All() { - shares = append(shares, share.AsStruct()) - } - fs.SetShares(shares) - } - } - for name, newFn := range registeredExtensions { ext, err := newFn(logf, sys) if err != nil { @@ -2458,6 +2445,16 @@ func (b *LocalBackend) Start(opts ipn.Options) error { b.logf("Backend: logs: be:%v fe:%v", blid, opts.FrontendLogID) b.sendToLocked(ipn.Notify{Prefs: &prefs}, allClients) + // initialize Taildrive shares from saved state + if fs, ok := b.sys.DriveForRemote.GetOK(); ok { + currentShares := b.pm.CurrentPrefs().DriveShares() + var shares []*drive.Share + for _, share := range currentShares.All() { + shares = append(shares, share.AsStruct()) + } + fs.SetShares(shares) + } + if !loggedOut && (b.hasNodeKeyLocked() || confWantRunning) { // If we know that we're either logged in or meant to be // running, tell the controlclient that it should also assume From 5449aba94c51285ef5c2be322f47d1f7b636cd1b Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Mon, 3 Mar 2025 14:54:57 -0500 Subject: [PATCH 20/87] safesocket: correct logic for determining if we're a macOS GUI client (#15187) fixes tailscale/corp#26806 This was still slightly incorrect. We care only if the caller is the macSys or macOs app. isSandBoxedMacOS doesn't give us the correct answer for macSys because technically, macsys isn't sandboxed. Signed-off-by: Jonathan Nobels --- safesocket/safesocket_darwin.go | 16 ++++++++-------- safesocket/safesocket_darwin_test.go | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/safesocket/safesocket_darwin.go b/safesocket/safesocket_darwin.go index f6e46bc50..5c2717ecf 100644 --- a/safesocket/safesocket_darwin.go +++ b/safesocket/safesocket_darwin.go @@ -37,16 +37,16 @@ type safesocketDarwin struct { sameuserproofFD *os.File // file descriptor for macos app store sameuserproof file sharedDir string // shared directory for location of sameuserproof file - checkConn bool // Check macsys safesocket port before returning it - isMacSysExt func() bool // For testing only to force macsys - isSandboxedMacos func() bool // For testing only to force macOS sandbox + checkConn bool // Check macsys safesocket port before returning it + isMacSysExt func() bool // For testing only to force macsys + isMacGUIApp func() bool // For testing only to force macOS sandbox } var ssd = safesocketDarwin{ - isMacSysExt: version.IsMacSysExt, - isSandboxedMacos: version.IsSandboxedMacOS, - checkConn: true, - sharedDir: "/Library/Tailscale", + isMacSysExt: version.IsMacSysExt, + isMacGUIApp: func() bool { return version.IsMacAppStore() || version.IsMacSysApp() }, + checkConn: true, + sharedDir: "/Library/Tailscale", } // There are three ways a Darwin binary can be run: as the Mac App Store (macOS) @@ -68,7 +68,7 @@ func localTCPPortAndTokenDarwin() (port int, token string, err error) { ssd.mu.Lock() defer ssd.mu.Unlock() - if !ssd.isSandboxedMacos() { + if !ssd.isMacGUIApp() { return 0, "", ErrNoTokenOnOS } diff --git a/safesocket/safesocket_darwin_test.go b/safesocket/safesocket_darwin_test.go index 465ac0b68..2793d6aa3 100644 --- a/safesocket/safesocket_darwin_test.go +++ b/safesocket/safesocket_darwin_test.go @@ -17,7 +17,7 @@ import ( func TestSetCredentials(t *testing.T) { wantPort := 123 wantToken := "token" - tstest.Replace(t, &ssd.isSandboxedMacos, func() bool { return true }) + tstest.Replace(t, &ssd.isMacGUIApp, func() bool { return true }) SetCredentials(wantToken, wantPort) gotPort, gotToken, err := LocalTCPPortAndToken() @@ -38,7 +38,7 @@ func TestSetCredentials(t *testing.T) { // returns a listener and a non-zero port and non-empty token. func TestInitListenerDarwin(t *testing.T) { temp := t.TempDir() - tstest.Replace(t, &ssd.isSandboxedMacos, func() bool { return true }) + tstest.Replace(t, &ssd.isMacGUIApp, func() bool { return true }) ln, err := InitListenerDarwin(temp) if err != nil || ln == nil { From 16a920b96ed9dc1f76b844e340b04f32c89242bf Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Mon, 3 Mar 2025 18:28:26 -0500 Subject: [PATCH 21/87] safesocket: add isMacSysExt Check (#15192) fixes tailscale/corp#26806 IsMacSysApp is not returning the correct answer... It looks like the rest of the code base uses isMacSysExt (when what they really want to know is isMacSysApp). To fix the immediate issue (localAPI is broken entirely in corp), we'll add this check to safesocket which lines up with the other usages, despite the confusing naming. Signed-off-by: Jonathan Nobels --- safesocket/safesocket_darwin.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/safesocket/safesocket_darwin.go b/safesocket/safesocket_darwin.go index 5c2717ecf..fb35ad9df 100644 --- a/safesocket/safesocket_darwin.go +++ b/safesocket/safesocket_darwin.go @@ -44,7 +44,7 @@ type safesocketDarwin struct { var ssd = safesocketDarwin{ isMacSysExt: version.IsMacSysExt, - isMacGUIApp: func() bool { return version.IsMacAppStore() || version.IsMacSysApp() }, + isMacGUIApp: func() bool { return version.IsMacAppStore() || version.IsMacSysApp() || version.IsMacSysExt() }, checkConn: true, sharedDir: "/Library/Tailscale", } From e74a705c6713b325e44a875c0d850b4a5c02223a Mon Sep 17 00:00:00 2001 From: Brian Palmer Date: Tue, 4 Mar 2025 08:47:35 -0700 Subject: [PATCH 22/87] cmd/hello: display native ipv4 (#15191) We are soon going to start assigning shared-in nodes a CGNAT IPv4 in the Hello tailnet when necessary, the same way that normal node shares assign a new IPv4 on conflict. But Hello wants to display the node's native IPv4, the one it uses in its own tailnet. That IPv4 isn't available anywhere in the netmap today, because it's not normally needed for anything. We are going to start sending that native IPv4 in the peer node CapMap, only for Hello's netmap responses. This change enables Hello to display that native IPv4 instead, when available. Updates tailscale/corp#25393 Change-Id: I87480b6d318ab028b41ef149eb3ba618bd7f1e08 Signed-off-by: Brian Palmer --- cmd/hello/hello.go | 5 +++++ tailcfg/tailcfg.go | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/cmd/hello/hello.go b/cmd/hello/hello.go index 86f885f54..fa116b28b 100644 --- a/cmd/hello/hello.go +++ b/cmd/hello/hello.go @@ -20,6 +20,7 @@ import ( "tailscale.com/client/local" "tailscale.com/client/tailscale/apitype" + "tailscale.com/tailcfg" ) var ( @@ -134,6 +135,10 @@ func tailscaleIP(who *apitype.WhoIsResponse) string { if who == nil { return "" } + vals, err := tailcfg.UnmarshalNodeCapJSON[string](who.Node.CapMap, tailcfg.NodeAttrNativeIPV4) + if err == nil && len(vals) > 0 { + return vals[0] + } for _, nodeIP := range who.Node.Addresses { if nodeIP.Addr().Is4() && nodeIP.IsSingleIP() { return nodeIP.Addr().String() diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index f82c6eb81..b5f49c614 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2440,6 +2440,11 @@ const ( // type float64 representing the duration in seconds. This cap will be // omitted if the tailnet's MaxKeyDuration is the default. NodeAttrMaxKeyDuration NodeCapability = "tailnet.maxKeyDuration" + + // NodeAttrNativeIPV4 contains the IPV4 address of the node in its + // native tailnet. This is currently only sent to Hello, in its + // peer node list. + NodeAttrNativeIPV4 NodeCapability = "native-ipv4" ) // SetDNSRequest is a request to add a DNS record. From fa374fa852f6b91656c64f07892c554b27b83e49 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Tue, 4 Mar 2025 11:46:05 +0000 Subject: [PATCH 23/87] cmd/testwrapper: Display package-level output Updates tailscale/corp#26861 Signed-off-by: James Sanderson --- cmd/testwrapper/testwrapper.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/cmd/testwrapper/testwrapper.go b/cmd/testwrapper/testwrapper.go index 1df1ef11f..1501c7e97 100644 --- a/cmd/testwrapper/testwrapper.go +++ b/cmd/testwrapper/testwrapper.go @@ -141,7 +141,7 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, goTestArgs, te } outcome := goOutput.Action if outcome == "build-fail" { - outcome = "FAIL" + outcome = "fail" } pkgTests[""].logs.WriteString(goOutput.Output) ch <- &testAttempt{ @@ -152,7 +152,15 @@ func runTests(ctx context.Context, attempt int, pt *packageTests, goTestArgs, te logs: pkgTests[""].logs, pkgFinished: true, } + case "output": + // Capture all output from the package except for the final + // "FAIL tailscale.io/control 0.684s" line, as + // printPkgOutcome will output a similar line + if !strings.HasPrefix(goOutput.Output, fmt.Sprintf("FAIL\t%s\t", goOutput.Package)) { + pkgTests[""].logs.WriteString(goOutput.Output) + } } + continue } testName := goOutput.Test @@ -276,7 +284,11 @@ func main() { // when a package times out. failed = true } - os.Stdout.ReadFrom(&tr.logs) + if testingVerbose || tr.outcome == "fail" { + // Output package-level output which is where e.g. + // panics outside tests will be printed + io.Copy(os.Stdout, &tr.logs) + } printPkgOutcome(tr.pkg, tr.outcome, thisRun.attempt, tr.end.Sub(tr.start)) continue } From cae5b97626a44ef425a0b0d3807269fec966a18d Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 4 Mar 2025 07:41:44 -0800 Subject: [PATCH 24/87] cmd/derper: add --home flag to control home page behavior Updates #12897 Change-Id: I7e9c8de0d2daf92cc32e9f6121bc0874c6672540 Signed-off-by: Brad Fitzpatrick --- cmd/derper/derper.go | 52 +++++++++++++++++++++++++++++++++----------- 1 file changed, 39 insertions(+), 13 deletions(-) diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 682ec0bba..221ee0bff 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -63,6 +63,7 @@ var ( hostname = flag.String("hostname", "derp.tailscale.com", "LetsEncrypt host name, if addr's port is :443. When --certmode=manual, this can be an IP address to avoid SNI checks") runSTUN = flag.Bool("stun", true, "whether to run a STUN server. It will bind to the same IP (if any) as the --addr flag value.") runDERP = flag.Bool("derp", true, "whether to run a DERP server. The only reason to set this false is if you're decommissioning a server but want to keep its bootstrap DNS functionality still running.") + flagHome = flag.String("home", "", "what to serve at the root path. It may be left empty (the default, for a default homepage), \"blank\" for a blank page, or a URL to redirect to") meshPSKFile = flag.String("mesh-psk-file", defaultMeshPSKFile(), "if non-empty, path to file containing the mesh pre-shared key file. It should contain some hex string; whitespace is trimmed.") meshWith = flag.String("mesh-with", "", "optional comma-separated list of hostnames to mesh with; the server's own hostname can be in the list. If an entry contains a slash, the second part names a hostname to be used when dialing the target.") @@ -254,6 +255,11 @@ func main() { } expvar.Publish("derp", s.ExpVar()) + handleHome, ok := getHomeHandler(*flagHome) + if !ok { + log.Fatalf("unknown --home value %q", *flagHome) + } + mux := http.NewServeMux() if *runDERP { derpHandler := derphttp.Handler(s) @@ -274,19 +280,7 @@ func main() { mux.HandleFunc("/bootstrap-dns", tsweb.BrowserHeaderHandlerFunc(handleBootstrapDNS)) mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { tsweb.AddBrowserHeaders(w) - w.Header().Set("Content-Type", "text/html; charset=utf-8") - w.WriteHeader(200) - err := homePageTemplate.Execute(w, templateData{ - ShowAbuseInfo: validProdHostname.MatchString(*hostname), - Disabled: !*runDERP, - AllowDebug: tsweb.AllowDebugAccess(r), - }) - if err != nil { - if r.Context().Err() == nil { - log.Printf("homePageTemplate.Execute: %v", err) - } - return - } + handleHome.ServeHTTP(w, r) })) mux.Handle("/robots.txt", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { tsweb.AddBrowserHeaders(w) @@ -579,3 +573,35 @@ var homePageTemplate = template.Must(template.New("home").Parse(` `)) + +// getHomeHandler returns a handler for the home page based on a flag string +// as documented on the --home flag. +func getHomeHandler(val string) (_ http.Handler, ok bool) { + if val == "" { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.WriteHeader(200) + err := homePageTemplate.Execute(w, templateData{ + ShowAbuseInfo: validProdHostname.MatchString(*hostname), + Disabled: !*runDERP, + AllowDebug: tsweb.AllowDebugAccess(r), + }) + if err != nil { + if r.Context().Err() == nil { + log.Printf("homePageTemplate.Execute: %v", err) + } + return + } + }), true + } + if val == "blank" { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/html; charset=utf-8") + w.WriteHeader(200) + }), true + } + if strings.HasPrefix(val, "http://") || strings.HasPrefix(val, "https://") { + return http.RedirectHandler(val, http.StatusFound), true + } + return nil, false +} From 1d2d449b57c7a04766d3da80d9e8ef52abe3ef70 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Mar 2025 11:56:15 +0000 Subject: [PATCH 25/87] .github: Bump actions/cache from 4.2.0 to 4.2.2 Bumps [actions/cache](https://github.com/actions/cache) from 4.2.0 to 4.2.2. - [Release notes](https://github.com/actions/cache/releases) - [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md) - [Commits](https://github.com/actions/cache/compare/1bd1e32a3bdc45362d1e726936510720a7c30a57...d4323d4df104b026a6aa633fdb11d772146be0bf) --- updated-dependencies: - dependency-name: actions/cache dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/test.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7142c86b9..4ff2f2421 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -79,7 +79,7 @@ jobs: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Restore Cache - uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 + uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -153,7 +153,7 @@ jobs: cache: false - name: Restore Cache - uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 + uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -254,7 +254,7 @@ jobs: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Restore Cache - uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 + uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -319,7 +319,7 @@ jobs: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Restore Cache - uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 + uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache @@ -367,7 +367,7 @@ jobs: - name: checkout uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Restore Cache - uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0 + uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2 with: # Note: unlike the other setups, this is only grabbing the mod download # cache, rather than the whole mod directory, as the download cache From f840aad49e51b457f4d6cacb532313f4d28bfbd1 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Tue, 4 Mar 2025 16:17:57 -0800 Subject: [PATCH 26/87] go.toolchain.rev: bump to go1.24.1 (#15209) Bump to 1.24.1 to avail of security fixes. Updates https://github.com/tailscale/tailscale/issues/15015 Signed-off-by: Patrick O'Doherty --- go.toolchain.rev | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.toolchain.rev b/go.toolchain.rev index ddbabb3eb..69aec16e4 100644 --- a/go.toolchain.rev +++ b/go.toolchain.rev @@ -1 +1 @@ -2b494987ff3c1a6a26e10570c490394ff0a77aa4 +4fdaeeb8fe43bcdb4e8cc736433b9cd9c0ddd221 From 3e184345953345ccb69958df355b9bd9fed4ac2e Mon Sep 17 00:00:00 2001 From: David Anderson Date: Tue, 4 Mar 2025 11:22:30 -0800 Subject: [PATCH 27/87] util/eventbus: rework to have a Client abstraction The Client carries both publishers and subscribers for a single actor. This makes the APIs for publish and subscribe look more similar, and this structure is a better fit for upcoming debug facilities. Updates #15160 Signed-off-by: David Anderson --- util/eventbus/bus.go | 103 +++++++++++---------- util/eventbus/bus_test.go | 51 ++++++----- util/eventbus/client.go | 100 ++++++++++++++++++++ util/eventbus/doc.go | 65 ++++++------- util/eventbus/publish.go | 53 +++++------ util/eventbus/subscribe.go | 183 +++++++++++++++++++++---------------- 6 files changed, 346 insertions(+), 209 deletions(-) create mode 100644 util/eventbus/client.go diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index 85d73b15e..393596d75 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -20,12 +20,11 @@ type Bus struct { snapshot chan chan []any topicsMu sync.Mutex // guards everything below. - topics map[reflect.Type][]*Queue + topics map[reflect.Type][]*subscribeState // Used for introspection/debugging only, not in the normal event // publishing path. - publishers set.Set[publisher] - queues set.Set[*Queue] + clients set.Set[*Client] } // New returns a new bus. Use [PublisherOf] to make event publishers, @@ -33,17 +32,53 @@ type Bus struct { func New() *Bus { stopCtl, stopWorker := newGoroutineShutdown() ret := &Bus{ - write: make(chan any), - stop: stopCtl, - snapshot: make(chan chan []any), - topics: map[reflect.Type][]*Queue{}, - publishers: set.Set[publisher]{}, - queues: set.Set[*Queue]{}, + write: make(chan any), + stop: stopCtl, + snapshot: make(chan chan []any), + topics: map[reflect.Type][]*subscribeState{}, + clients: set.Set[*Client]{}, } go ret.pump(stopWorker) return ret } +// Client returns a new client with no subscriptions. Use [Subscribe] +// to receive events, and [Publish] to emit events. +// +// The client's name is used only for debugging, to tell humans what +// piece of code a publisher/subscriber belongs to. Aim for something +// short but unique, for example "kernel-route-monitor" or "taildrop", +// not "watcher". +func (b *Bus) Client(name string) *Client { + ret := &Client{ + name: name, + bus: b, + pub: set.Set[publisher]{}, + } + b.topicsMu.Lock() + defer b.topicsMu.Unlock() + b.clients.Add(ret) + return ret +} + +// Close closes the bus. Implicitly closes all clients, publishers and +// subscribers attached to the bus. +// +// Close blocks until the bus is fully shut down. The bus is +// permanently unusable after closing. +func (b *Bus) Close() { + b.stop.StopAndWait() + + var clients set.Set[*Client] + b.topicsMu.Lock() + clients, b.clients = b.clients, set.Set[*Client]{} + b.topicsMu.Unlock() + + for c := range clients { + c.Close() + } +} + func (b *Bus) pump(stop goroutineShutdownWorker) { defer stop.Done() var vals queue @@ -98,13 +133,19 @@ func (b *Bus) pump(stop goroutineShutdownWorker) { } } -func (b *Bus) dest(t reflect.Type) []*Queue { +func (b *Bus) dest(t reflect.Type) []*subscribeState { b.topicsMu.Lock() defer b.topicsMu.Unlock() return b.topics[t] } -func (b *Bus) subscribe(t reflect.Type, q *Queue) (cancel func()) { +func (b *Bus) shouldPublish(t reflect.Type) bool { + b.topicsMu.Lock() + defer b.topicsMu.Unlock() + return len(b.topics[t]) > 0 +} + +func (b *Bus) subscribe(t reflect.Type, q *subscribeState) (cancel func()) { b.topicsMu.Lock() defer b.topicsMu.Unlock() b.topics[t] = append(b.topics[t], q) @@ -113,7 +154,7 @@ func (b *Bus) subscribe(t reflect.Type, q *Queue) (cancel func()) { } } -func (b *Bus) unsubscribe(t reflect.Type, q *Queue) { +func (b *Bus) unsubscribe(t reflect.Type, q *subscribeState) { b.topicsMu.Lock() defer b.topicsMu.Unlock() // Topic slices are accessed by pump without holding a lock, so we @@ -127,44 +168,6 @@ func (b *Bus) unsubscribe(t reflect.Type, q *Queue) { b.topics[t] = slices.Delete(slices.Clone(b.topics[t]), i, i+1) } -func (b *Bus) Close() { - b.stop.StopAndWait() -} - -// Queue returns a new queue with no subscriptions. Use [Subscribe] to -// atach subscriptions to it. -// -// The queue's name should be a short, human-readable string that -// identifies this queue. The name is only visible through debugging -// APIs. -func (b *Bus) Queue(name string) *Queue { - return newQueue(b, name) -} - -func (b *Bus) addQueue(q *Queue) { - b.topicsMu.Lock() - defer b.topicsMu.Unlock() - b.queues.Add(q) -} - -func (b *Bus) deleteQueue(q *Queue) { - b.topicsMu.Lock() - defer b.topicsMu.Unlock() - b.queues.Delete(q) -} - -func (b *Bus) addPublisher(p publisher) { - b.topicsMu.Lock() - defer b.topicsMu.Unlock() - b.publishers.Add(p) -} - -func (b *Bus) deletePublisher(p publisher) { - b.topicsMu.Lock() - defer b.topicsMu.Unlock() - b.publishers.Delete(p) -} - func newGoroutineShutdown() (goroutineShutdownControl, goroutineShutdownWorker) { ctx, cancel := context.WithCancel(context.Background()) diff --git a/util/eventbus/bus_test.go b/util/eventbus/bus_test.go index 180f4164a..e159b6a12 100644 --- a/util/eventbus/bus_test.go +++ b/util/eventbus/bus_test.go @@ -26,14 +26,16 @@ func TestBus(t *testing.T) { b := eventbus.New() defer b.Close() - q := b.Queue("TestBus") - defer q.Close() - s := eventbus.Subscribe[EventA](q) + c := b.Client("TestSub") + defer c.Close() + s := eventbus.Subscribe[EventA](c) go func() { - pa := eventbus.PublisherOf[EventA](b, "TestBusA") + p := b.Client("TestPub") + defer p.Close() + pa := eventbus.Publish[EventA](p) defer pa.Close() - pb := eventbus.PublisherOf[EventB](b, "TestBusB") + pb := eventbus.Publish[EventB](p) defer pb.Close() pa.Publish(EventA{1}) pb.Publish(EventB{2}) @@ -45,7 +47,7 @@ func TestBus(t *testing.T) { select { case got := <-s.Events(): want.Got(got) - case <-q.Done(): + case <-s.Done(): t.Fatalf("queue closed unexpectedly") case <-time.After(time.Second): t.Fatalf("timed out waiting for event") @@ -57,19 +59,21 @@ func TestBusMultipleConsumers(t *testing.T) { b := eventbus.New() defer b.Close() - q1 := b.Queue("TestBusA") - defer q1.Close() - s1 := eventbus.Subscribe[EventA](q1) + c1 := b.Client("TestSubA") + defer c1.Close() + s1 := eventbus.Subscribe[EventA](c1) - q2 := b.Queue("TestBusAB") - defer q2.Close() - s2A := eventbus.Subscribe[EventA](q2) - s2B := eventbus.Subscribe[EventB](q2) + c2 := b.Client("TestSubB") + defer c2.Close() + s2A := eventbus.Subscribe[EventA](c2) + s2B := eventbus.Subscribe[EventB](c2) go func() { - pa := eventbus.PublisherOf[EventA](b, "TestBusA") + p := b.Client("TestPub") + defer p.Close() + pa := eventbus.Publish[EventA](p) defer pa.Close() - pb := eventbus.PublisherOf[EventB](b, "TestBusB") + pb := eventbus.Publish[EventB](p) defer pb.Close() pa.Publish(EventA{1}) pb.Publish(EventB{2}) @@ -86,9 +90,11 @@ func TestBusMultipleConsumers(t *testing.T) { wantB.Got(got) case got := <-s2B.Events(): wantB.Got(got) - case <-q1.Done(): + case <-s1.Done(): t.Fatalf("queue closed unexpectedly") - case <-q2.Done(): + case <-s2A.Done(): + t.Fatalf("queue closed unexpectedly") + case <-s2B.Done(): t.Fatalf("queue closed unexpectedly") case <-time.After(time.Second): t.Fatalf("timed out waiting for event") @@ -111,15 +117,15 @@ func TestSpam(t *testing.T) { received := make([][]EventA, subscribers) for i := range subscribers { - q := b.Queue(fmt.Sprintf("Subscriber%d", i)) - defer q.Close() - s := eventbus.Subscribe[EventA](q) + c := b.Client(fmt.Sprintf("Subscriber%d", i)) + defer c.Close() + s := eventbus.Subscribe[EventA](c) g.Go(func() error { for range wantEvents { select { case evt := <-s.Events(): received[i] = append(received[i], evt) - case <-q.Done(): + case <-s.Done(): t.Errorf("queue done before expected number of events received") return errors.New("queue prematurely closed") case <-time.After(5 * time.Second): @@ -134,7 +140,8 @@ func TestSpam(t *testing.T) { published := make([][]EventA, publishers) for i := range publishers { g.Run(func() { - p := eventbus.PublisherOf[EventA](b, fmt.Sprintf("Publisher%d", i)) + c := b.Client(fmt.Sprintf("Publisher%d", i)) + p := eventbus.Publish[EventA](c) for j := range eventsPerPublisher { evt := EventA{i*eventsPerPublisher + j} p.Publish(evt) diff --git a/util/eventbus/client.go b/util/eventbus/client.go new file mode 100644 index 000000000..ff8eea6ee --- /dev/null +++ b/util/eventbus/client.go @@ -0,0 +1,100 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus + +import ( + "reflect" + "sync" + + "tailscale.com/util/set" +) + +// A Client can publish and subscribe to events on its attached +// bus. See [Publish] to publish events, and [Subscribe] to receive +// events. +// +// Subscribers that share the same client receive events one at a +// time, in the order they were published. +type Client struct { + name string + bus *Bus + + mu sync.Mutex + pub set.Set[publisher] + sub *subscribeState // Lazily created on first subscribe +} + +// Close closes the client. Implicitly closes all publishers and +// subscribers obtained from this client. +func (c *Client) Close() { + var ( + pub set.Set[publisher] + sub *subscribeState + ) + + c.mu.Lock() + pub, c.pub = c.pub, nil + sub, c.sub = c.sub, nil + c.mu.Unlock() + + if sub != nil { + sub.close() + } + for p := range pub { + p.Close() + } +} + +func (c *Client) subscribeState() *subscribeState { + c.mu.Lock() + defer c.mu.Unlock() + if c.sub == nil { + c.sub = newSubscribeState(c) + } + return c.sub +} + +func (c *Client) addPublisher(pub publisher) { + c.mu.Lock() + defer c.mu.Unlock() + c.pub.Add(pub) +} + +func (c *Client) deletePublisher(pub publisher) { + c.mu.Lock() + defer c.mu.Unlock() + c.pub.Delete(pub) +} + +func (c *Client) addSubscriber(t reflect.Type, s *subscribeState) { + c.bus.subscribe(t, s) +} + +func (c *Client) deleteSubscriber(t reflect.Type, s *subscribeState) { + c.bus.unsubscribe(t, s) +} + +func (c *Client) publish() chan<- any { + return c.bus.write +} + +func (c *Client) shouldPublish(t reflect.Type) bool { + return c.bus.shouldPublish(t) +} + +// Subscribe requests delivery of events of type T through the given +// Queue. Panics if the queue already has a subscriber for T. +func Subscribe[T any](c *Client) *Subscriber[T] { + return newSubscriber[T](c.subscribeState()) +} + +// Publisher returns a publisher for event type T using the given +// client. +func Publish[T any](c *Client) *Publisher[T] { + ret := newPublisher[T](c) + c.mu.Lock() + defer c.mu.Unlock() + c.pub.Add(ret) + return ret +} diff --git a/util/eventbus/doc.go b/util/eventbus/doc.go index 136823c42..b3509b48b 100644 --- a/util/eventbus/doc.go +++ b/util/eventbus/doc.go @@ -3,56 +3,59 @@ // Package eventbus provides an in-process event bus. // -// The event bus connects publishers of typed events with subscribers -// interested in those events. +// An event bus connects publishers of typed events with subscribers +// interested in those events. Typically, there is one global event +// bus per process. // // # Usage // -// To publish events, use [PublisherOf] to get a typed publisher for -// your event type, then call [Publisher.Publish] as needed. If your -// event is expensive to construct, you can optionally use -// [Publisher.ShouldPublish] to skip the work if nobody is listening -// for the event. +// To send or receive events, first use [Bus.Client] to register with +// the bus. Clients should register with a human-readable name that +// identifies the code using the client, to aid in debugging. // -// To receive events, first use [Bus.Queue] to create an event -// delivery queue, then use [Subscribe] to get a [Subscriber] for each -// event type you're interested in. Receive the events themselves by -// selecting over all your [Subscriber.Chan] channels, as well as -// [Queue.Done] for shutdown notifications. +// To publish events, use [Publish] on a Client to get a typed +// publisher for your event type, then call [Publisher.Publish] as +// needed. If your event is expensive to construct, you can optionally +// use [Publisher.ShouldPublish] to skip the work if nobody is +// listening for the event. +// +// To receive events, use [Subscribe] to get a typed subscriber for +// each event type you're interested in. Receive the events themselves +// by selecting over all your [Subscriber.Events] channels, as well as +// [Subscriber.Done] for shutdown notifications. // // # Concurrency properties // -// The bus serializes all published events, and preserves that -// ordering when delivering to subscribers that are attached to the -// same Queue. In more detail: +// The bus serializes all published events across all publishers, and +// preserves that ordering when delivering to subscribers that are +// attached to the same Client. In more detail: // // - An event is published to the bus at some instant between the // start and end of the call to [Publisher.Publish]. -// - Events cannot be published at the same instant, and so are +// - Two events cannot be published at the same instant, and so are // totally ordered by their publication time. Given two events E1 // and E2, either E1 happens before E2, or E2 happens before E1. -// - Queues dispatch events to their Subscribers in publication -// order: if E1 happens before E2, the queue always delivers E1 +// - Clients dispatch events to their Subscribers in publication +// order: if E1 happens before E2, the client always delivers E1 // before E2. -// - Queues do not synchronize with each other: given queues Q1 and -// Q2, both subscribed to events E1 and E2, Q1 may deliver both E1 -// and E2 before Q2 delivers E1. +// - Clients do not synchronize subscriptions with each other: given +// clients C1 and C2, both subscribed to events E1 and E2, C1 may +// deliver both E1 and E2 before C2 delivers E1. // // Less formally: there is one true timeline of all published events. -// If you make a Queue and subscribe to events on it, you will receive -// those events one at a time, in the same order as the one true +// If you make a Client and subscribe to events, you will receive +// events one at a time, in the same order as the one true // timeline. You will "skip over" events you didn't subscribe to, but // your view of the world always moves forward in time, never // backwards, and you will observe events in the same order as // everyone else. // -// However, you cannot assume that what your subscribers on your queue -// see as "now" is the same as what other subscribers on other -// queues. Their queue may be further behind you in the timeline, or -// running ahead of you. This means you should be careful about -// reaching out to another component directly after receiving an -// event, as its view of the world may not yet (or ever) be exactly -// consistent with yours. +// However, you cannot assume that what your client see as "now" is +// the same as what other clients. They may be further behind you in +// working through the timeline, or running ahead of you. This means +// you should be careful about reaching out to another component +// directly after receiving an event, as its view of the world may not +// yet (or ever) be exactly consistent with yours. // // To make your code more testable and understandable, you should try // to structure it following the actor model: you have some local @@ -63,7 +66,7 @@ // # Expected subscriber behavior // // Subscribers are expected to promptly receive their events on -// [Subscriber.Chan]. The bus has a small, fixed amount of internal +// [Subscriber.Events]. The bus has a small, fixed amount of internal // buffering, meaning that a slow subscriber will eventually cause // backpressure and block publication of all further events. // diff --git a/util/eventbus/publish.go b/util/eventbus/publish.go index 14828812b..19ddc1256 100644 --- a/util/eventbus/publish.go +++ b/util/eventbus/publish.go @@ -11,35 +11,41 @@ import ( // publisher is a uniformly typed wrapper around Publisher[T], so that // debugging facilities can look at active publishers. type publisher interface { - publisherName() string + publishType() reflect.Type + Close() } -// A Publisher publishes events on the bus. +// A Publisher publishes typed events on a bus. type Publisher[T any] struct { - bus *Bus - name string + client *Client stopCtx context.Context stop context.CancelFunc } -// PublisherOf returns a publisher for event type T on the given bus. -// -// The publisher's name should be a short, human-readable string that -// identifies this event publisher. The name is only visible through -// debugging APIs. -func PublisherOf[T any](b *Bus, name string) *Publisher[T] { +func newPublisher[T any](c *Client) *Publisher[T] { ctx, cancel := context.WithCancel(context.Background()) ret := &Publisher[T]{ - bus: b, - name: name, + client: c, stopCtx: ctx, stop: cancel, } - b.addPublisher(ret) + c.addPublisher(ret) return ret } -func (p *Publisher[T]) publisherName() string { return p.name } +// Close closes the publisher. +// +// Calls to Publish after Close silently do nothing. +func (p *Publisher[T]) Close() { + // Just unblocks any active calls to Publish, no other + // synchronization needed. + p.stop() + p.client.deletePublisher(p) +} + +func (p *Publisher[T]) publishType() reflect.Type { + return reflect.TypeFor[T]() +} // Publish publishes event v on the bus. func (p *Publisher[T]) Publish(v T) { @@ -48,32 +54,21 @@ func (p *Publisher[T]) Publish(v T) { select { case <-p.stopCtx.Done(): return - case <-p.bus.stop.WaitChan(): - return default: } select { - case p.bus.write <- v: + case p.client.publish() <- v: case <-p.stopCtx.Done(): - case <-p.bus.stop.WaitChan(): } } -// ShouldPublish reports whether anyone is subscribed to events of -// type T. +// ShouldPublish reports whether anyone is subscribed to the events +// that this publisher emits. // // ShouldPublish can be used to skip expensive event construction if // nobody seems to care. Publishers must not assume that someone will // definitely receive an event if ShouldPublish returns true. func (p *Publisher[T]) ShouldPublish() bool { - dests := p.bus.dest(reflect.TypeFor[T]()) - return len(dests) > 0 -} - -// Close closes the publisher, indicating that no further events will -// be published with it. -func (p *Publisher[T]) Close() { - p.stop() - p.bus.deletePublisher(p) + return p.client.shouldPublish(reflect.TypeFor[T]()) } diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index ade834d77..896f0ce1f 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -4,46 +4,59 @@ package eventbus import ( + "context" "fmt" "reflect" "sync" ) -type dispatchFn func(vals *queue, stop goroutineShutdownWorker, acceptCh func() chan any) bool +// subscriber is a uniformly typed wrapper around Subscriber[T], so +// that debugging facilities can look at active subscribers. +type subscriber interface { + subscribeType() reflect.Type + // dispatch is a function that dispatches the head value in vals to + // a subscriber, while also handling stop and incoming queue write + // events. + // + // dispatch exists because of the strongly typed Subscriber[T] + // wrapper around subscriptions: within the bus events are boxed in an + // 'any', and need to be unpacked to their full type before delivery + // to the subscriber. This involves writing to a strongly-typed + // channel, so subscribeState cannot handle that dispatch by itself - + // but if that strongly typed send blocks, we also need to keep + // processing other potential sources of wakeups, which is how we end + // up at this awkward type signature and sharing of internal state + // through dispatch. + dispatch(vals *queue, stop goroutineShutdownWorker, acceptCh func() chan any) bool + Close() +} -// A Queue receives events from a Bus. -// -// To receive events through the queue, see [Subscribe]. Subscribers -// that share the same Queue receive events one at time, in the order -// they were published. -type Queue struct { - bus *Bus - name string +// subscribeState handles dispatching of events received from a Bus. +type subscribeState struct { + client *Client write chan any stop goroutineShutdownControl snapshot chan chan []any outputsMu sync.Mutex - outputs map[reflect.Type]dispatchFn + outputs map[reflect.Type]subscriber } -func newQueue(b *Bus, name string) *Queue { +func newSubscribeState(c *Client) *subscribeState { stopCtl, stopWorker := newGoroutineShutdown() - ret := &Queue{ - bus: b, - name: name, + ret := &subscribeState{ + client: c, write: make(chan any), stop: stopCtl, snapshot: make(chan chan []any), - outputs: map[reflect.Type]dispatchFn{}, + outputs: map[reflect.Type]subscriber{}, } - b.addQueue(ret) go ret.pump(stopWorker) return ret } -func (q *Queue) pump(stop goroutineShutdownWorker) { +func (q *subscribeState) pump(stop goroutineShutdownWorker) { defer stop.Done() var vals queue acceptCh := func() chan any { @@ -55,13 +68,13 @@ func (q *Queue) pump(stop goroutineShutdownWorker) { for { if !vals.Empty() { val := vals.Peek() - fn := q.dispatchFn(val) - if fn == nil { + sub := q.subscriberFor(val) + if sub == nil { // Raced with unsubscribe. vals.Drop() continue } - if !fn(&vals, stop, acceptCh) { + if !sub.dispatch(&vals, stop, acceptCh) { return } } else { @@ -81,16 +94,74 @@ func (q *Queue) pump(stop goroutineShutdownWorker) { } } -// A Subscriber delivers one type of event from a [Queue]. +func (s *subscribeState) addSubscriber(t reflect.Type, sub subscriber) { + s.outputsMu.Lock() + defer s.outputsMu.Unlock() + if s.outputs[t] != nil { + panic(fmt.Errorf("double subscription for event %s", t)) + } + s.outputs[t] = sub + s.client.addSubscriber(t, s) +} + +func (s *subscribeState) deleteSubscriber(t reflect.Type) { + s.outputsMu.Lock() + defer s.outputsMu.Unlock() + delete(s.outputs, t) + s.client.deleteSubscriber(t, s) +} + +func (q *subscribeState) subscriberFor(val any) subscriber { + q.outputsMu.Lock() + defer q.outputsMu.Unlock() + return q.outputs[reflect.TypeOf(val)] +} + +// Close closes the subscribeState. Implicitly closes all Subscribers +// linked to this state, and any pending events are discarded. +func (s *subscribeState) close() { + s.stop.StopAndWait() + + var subs map[reflect.Type]subscriber + s.outputsMu.Lock() + subs, s.outputs = s.outputs, nil + s.outputsMu.Unlock() + for _, sub := range subs { + sub.Close() + } +} + +// A Subscriber delivers one type of event from a [Client]. type Subscriber[T any] struct { - recv *Queue - read chan T + doneCtx context.Context + done context.CancelFunc + recv *subscribeState + read chan T +} + +func newSubscriber[T any](r *subscribeState) *Subscriber[T] { + t := reflect.TypeFor[T]() + + ctx, cancel := context.WithCancel(context.Background()) + ret := &Subscriber[T]{ + doneCtx: ctx, + done: cancel, + recv: r, + read: make(chan T), + } + r.addSubscriber(t, ret) + + return ret +} + +func (s *Subscriber[T]) subscribeType() reflect.Type { + return reflect.TypeFor[T]() } func (s *Subscriber[T]) dispatch(vals *queue, stop goroutineShutdownWorker, acceptCh func() chan any) bool { t := vals.Peek().(T) for { - // Keep the cases in this select in sync with Queue.pump + // Keep the cases in this select in sync with subscribeState.pump // above. The only different should be that this select // delivers a value on s.read. select { @@ -113,58 +184,16 @@ func (s *Subscriber[T]) Events() <-chan T { return s.read } -// Close shuts down the Subscriber, indicating the caller no longer -// wishes to receive these events. After Close, receives on -// [Subscriber.Chan] block for ever. +// Done returns a channel that is closed when the subscriber is +// closed. +func (s *Subscriber[T]) Done() <-chan struct{} { + return s.doneCtx.Done() +} + +// Close closes the Subscriber, indicating the caller no longer wishes +// to receive this event type. After Close, receives on +// [Subscriber.Events] block for ever. func (s *Subscriber[T]) Close() { - t := reflect.TypeFor[T]() - s.recv.bus.unsubscribe(t, s.recv) - s.recv.deleteDispatchFn(t) -} - -func (q *Queue) dispatchFn(val any) dispatchFn { - q.outputsMu.Lock() - defer q.outputsMu.Unlock() - return q.outputs[reflect.ValueOf(val).Type()] -} - -func (q *Queue) addDispatchFn(t reflect.Type, fn dispatchFn) { - q.outputsMu.Lock() - defer q.outputsMu.Unlock() - if q.outputs[t] != nil { - panic(fmt.Errorf("double subscription for event %s", t)) - } - q.outputs[t] = fn -} - -func (q *Queue) deleteDispatchFn(t reflect.Type) { - q.outputsMu.Lock() - defer q.outputsMu.Unlock() - delete(q.outputs, t) -} - -// Done returns a channel that is closed when the Queue is closed. -func (q *Queue) Done() <-chan struct{} { - return q.stop.WaitChan() -} - -// Close closes the queue. All Subscribers attached to the queue are -// implicitly closed, and any pending events are discarded. -func (q *Queue) Close() { - q.stop.StopAndWait() - q.bus.deleteQueue(q) -} - -// Subscribe requests delivery of events of type T through the given -// Queue. Panics if the queue already has a subscriber for T. -func Subscribe[T any](r *Queue) Subscriber[T] { - t := reflect.TypeFor[T]() - ret := Subscriber[T]{ - recv: r, - read: make(chan T), - } - r.addDispatchFn(t, ret.dispatch) - r.bus.subscribe(t, r) - - return ret + s.done() // unblock receivers + s.recv.deleteSubscriber(reflect.TypeFor[T]()) } From 5eafce7e25b100f4dd235f5256607fe11727e843 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 7 Feb 2025 20:25:43 -0800 Subject: [PATCH 28/87] gokrazy/natlab: update gokrazy, wire up natlab tests to GitHub CI Updates #13038 Change-Id: I610f9076816f44d59c0ca405a1b4f5eb4c6c0594 Signed-off-by: Brad Fitzpatrick --- .github/workflows/natlab-integrationtest.yml | 30 ++++++++ gokrazy/build.go | 40 ++++++++-- gokrazy/go.mod | 12 +-- gokrazy/go.sum | 12 +-- .../builddir/tailscale.com/go.sum | 70 ++++++++++++++++++ gokrazy/natlabapp.arm64/config.json | 4 + .../natlabapp/builddir/tailscale.com/go.sum | 68 +++++++++++++++++ gokrazy/natlabapp/config.json | 4 + gokrazy/tsapp/builddir/tailscale.com/go.sum | 74 +++++++++++++++++++ gokrazy/tsapp/config.json | 4 + tstest/integration/nat/nat_test.go | 20 ++++- 11 files changed, 318 insertions(+), 20 deletions(-) create mode 100644 .github/workflows/natlab-integrationtest.yml diff --git a/.github/workflows/natlab-integrationtest.yml b/.github/workflows/natlab-integrationtest.yml new file mode 100644 index 000000000..b8d99e668 --- /dev/null +++ b/.github/workflows/natlab-integrationtest.yml @@ -0,0 +1,30 @@ +# Run some natlab integration tests. +# See https://github.com/tailscale/tailscale/issues/13038 +name: "natlab-integrationtest" + +concurrency: + group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + pull_request: + paths: + - "tailcfg/**" + - "wgengine/**" + - "ipn/ipnlocal/**" + - ".github/workflows/natlab-integrationtest.yml" +jobs: + natlab-integrationtest: + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Install qemu + run: | + sudo rm /var/lib/man-db/auto-update + sudo apt-get -y update + sudo apt-get -y remove man-db + sudo apt-get install -y qemu-system-x86 qemu-utils + - name: Run natlab integration tests + run: | + ./tool/go test -v -run=^TestEasyEasy$ -timeout=3m -count=1 ./tstest/integration/nat --run-vm-tests diff --git a/gokrazy/build.go b/gokrazy/build.go index 2392af0cb..c1ee1cbeb 100644 --- a/gokrazy/build.go +++ b/gokrazy/build.go @@ -11,7 +11,6 @@ package main import ( "bytes" - "cmp" "encoding/json" "errors" "flag" @@ -30,7 +29,6 @@ import ( var ( app = flag.String("app", "tsapp", "appliance name; one of the subdirectories of gokrazy/") bucket = flag.String("bucket", "tskrazy-import", "S3 bucket to upload disk image to while making AMI") - goArch = flag.String("arch", cmp.Or(os.Getenv("GOARCH"), "amd64"), "GOARCH architecture to build for: arm64 or amd64") build = flag.Bool("build", false, "if true, just build locally and stop, without uploading") ) @@ -54,6 +52,26 @@ func findMkfsExt4() (string, error) { return "", errors.New("No mkfs.ext4 found on system") } +var conf gokrazyConfig + +// gokrazyConfig is the subset of gokrazy/internal/config.Struct +// that we care about. +type gokrazyConfig struct { + // Environment is os.Environment pairs to use when + // building userspace. + // See https://gokrazy.org/userguide/instance-config/#environment + Environment []string +} + +func (c *gokrazyConfig) GOARCH() string { + for _, e := range c.Environment { + if v, ok := strings.CutPrefix(e, "GOARCH="); ok { + return v + } + } + return "" +} + func main() { flag.Parse() @@ -61,6 +79,19 @@ func main() { log.Fatalf("--app must be non-empty name such as 'tsapp' or 'natlabapp'") } + confJSON, err := os.ReadFile(filepath.Join(*app, "config.json")) + if err != nil { + log.Fatalf("reading config.json: %v", err) + } + if err := json.Unmarshal(confJSON, &conf); err != nil { + log.Fatalf("unmarshaling config.json: %v", err) + } + switch conf.GOARCH() { + case "amd64", "arm64": + default: + log.Fatalf("config.json GOARCH %q must be amd64 or arm64", conf.GOARCH()) + } + if err := buildImage(); err != nil { log.Fatalf("build image: %v", err) } @@ -106,7 +137,6 @@ func buildImage() error { // Build the tsapp.img var buf bytes.Buffer cmd := exec.Command("go", "run", - "-exec=env GOOS=linux GOARCH="+*goArch+" ", "github.com/gokrazy/tools/cmd/gok", "--parent_dir="+dir, "--instance="+*app, @@ -253,13 +283,13 @@ func waitForImportSnapshot(importTaskID string) (snapID string, err error) { func makeAMI(name, ebsSnapID string) (ami string, err error) { var arch string - switch *goArch { + switch conf.GOARCH() { case "arm64": arch = "arm64" case "amd64": arch = "x86_64" default: - return "", fmt.Errorf("unknown arch %q", *goArch) + return "", fmt.Errorf("unknown arch %q", conf.GOARCH()) } out, err := exec.Command("aws", "ec2", "register-image", "--name", name, diff --git a/gokrazy/go.mod b/gokrazy/go.mod index a9ba5a07d..f7483f41d 100644 --- a/gokrazy/go.mod +++ b/gokrazy/go.mod @@ -1,13 +1,13 @@ module tailscale.com/gokrazy -go 1.23.1 +go 1.23 -require github.com/gokrazy/tools v0.0.0-20240730192548-9f81add3a91e +require github.com/gokrazy/tools v0.0.0-20250128200151-63160424957c require ( github.com/breml/rootcerts v0.2.10 // indirect github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 // indirect - github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 // indirect + github.com/gokrazy/internal v0.0.0-20250126213949-423a5b587b57 // indirect github.com/gokrazy/updater v0.0.0-20230215172637-813ccc7f21e2 // indirect github.com/google/renameio/v2 v2.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -15,9 +15,5 @@ require ( github.com/spf13/pflag v1.0.5 // indirect golang.org/x/mod v0.11.0 // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.20.0 // indirect + golang.org/x/sys v0.28.0 // indirect ) - -replace github.com/gokrazy/gokrazy => github.com/tailscale/gokrazy v0.0.0-20240812224643-6b21ddf64678 - -replace github.com/gokrazy/tools => github.com/tailscale/gokrazy-tools v0.0.0-20240730192548-9f81add3a91e diff --git a/gokrazy/go.sum b/gokrazy/go.sum index dfac8ca37..170d15b3d 100644 --- a/gokrazy/go.sum +++ b/gokrazy/go.sum @@ -3,8 +3,10 @@ github.com/breml/rootcerts v0.2.10/go.mod h1:24FDtzYMpqIeYC7QzaE8VPRQaFZU5TIUDly github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 h1:C7t6eeMaEQVy6e8CarIhscYQlNmw5e3G36y7l7Y21Ao= github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0/go.mod h1:56wL82FO0bfMU5RvfXoIwSOP2ggqqxT+tAfNEIyxuHw= -github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5 h1:XDklMxV0pE5jWiNaoo5TzvWfqdoiRRScmr4ZtDzE4Uw= -github.com/gokrazy/internal v0.0.0-20240629150625-a0f1dee26ef5/go.mod h1:t3ZirVhcs9bH+fPAJuGh51rzT7sVCZ9yfXvszf0ZjF0= +github.com/gokrazy/internal v0.0.0-20250126213949-423a5b587b57 h1:f5bEvO4we3fbfiBkECrrUgWQ8OH6J3SdB2Dwxid/Yx4= +github.com/gokrazy/internal v0.0.0-20250126213949-423a5b587b57/go.mod h1:SJG1KwuJQXFEoBgryaNCkMbdISyovDgZd0xmXJRZmiw= +github.com/gokrazy/tools v0.0.0-20250128200151-63160424957c h1:iEbS8GrNOn671ze8J/AfrYFEVzf8qMx8aR5K0VxPK2w= +github.com/gokrazy/tools v0.0.0-20250128200151-63160424957c/go.mod h1:f2vZhnaPzy92+Bjpx1iuZHK7VuaJx6SNCWQWmu23HZA= github.com/gokrazy/updater v0.0.0-20230215172637-813ccc7f21e2 h1:kBY5R1tSf+EYZ+QaSrofLaVJtBqYsVNVBWkdMq3Smcg= github.com/gokrazy/updater v0.0.0-20230215172637-813ccc7f21e2/go.mod h1:PYOvzGOL4nlBmuxu7IyKQTFLaxr61+WPRNRzVtuYOHw= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= @@ -19,14 +21,12 @@ github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/tailscale/gokrazy-tools v0.0.0-20240730192548-9f81add3a91e h1:3/xIc1QCvnKL7BCLng9od98HEvxCadjvqiI/bN+Twso= -github.com/tailscale/gokrazy-tools v0.0.0-20240730192548-9f81add3a91e/go.mod h1:eTZ0QsugEPFU5UAQ/87bKMkPxQuTNa7+iFAIahOFwRg= golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.sum b/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.sum index 9123439ed..ae814f316 100644 --- a/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.sum +++ b/gokrazy/natlabapp.arm64/builddir/tailscale.com/go.sum @@ -4,32 +4,58 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= +github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk= +github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= github.com/aws/aws-sdk-go-v2/config v1.26.5 h1:lodGSevz7d+kkFJodfauThRxK9mdJbyutUxGq1NNhvw= github.com/aws/aws-sdk-go-v2/config v1.26.5/go.mod h1:DxHrz6diQJOc9EwDslVRh84VjjrE17g+pVZXUeSxaDU= +github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k= +github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg= github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= +github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y= +github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk= github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 h1:a8HvP/+ew3tKwSXqL3BCSjiuicr+XTU2eFYeogV9GJE= github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM= github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U= github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w= github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= +github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= @@ -46,10 +72,14 @@ github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc= github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg= github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= +github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 h1:KbX3Z3CgiYlbaavUq3Cj9/MjpO+88S7/AGXzynVDv84= +github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -62,6 +92,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/csrf v1.7.2 h1:oTUjx0vyf2T+wkrx09Trsev1TE+/EbDAeHtSTbtC2eI= github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= +github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30 h1:fiJdrgVBkjZ5B1HJ2WQwNOaXB+QyYcNXTA3t1XYLz0M= +github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= @@ -70,6 +102,8 @@ github.com/illarion/gonotify v1.0.1 h1:F1d+0Fgbq/sDWjj/r66ekjDG+IDeecQKUFH4wNwso github.com/illarion/gonotify v1.0.1/go.mod h1:zt5pmDofZpU1f8aqlK0+95eQhoEAn/d4G4B/FjVW4jE= github.com/illarion/gonotify/v2 v2.0.2 h1:oDH5yvxq9oiQGWUeut42uShcWzOy/hsT9E7pvO95+kQ= github.com/illarion/gonotify/v2 v2.0.2/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= +github.com/illarion/gonotify/v2 v2.0.3 h1:B6+SKPo/0Sw8cRJh1aLzNEeNVFfzE3c6N+o+vyxM+9A= +github.com/illarion/gonotify/v2 v2.0.3/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA= github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI= github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= @@ -84,6 +118,8 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ= github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= @@ -96,6 +132,8 @@ github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy5 github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg= +github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o= github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c= github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE= github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= @@ -126,12 +164,18 @@ github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4 github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w= github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU= +github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= +github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= +github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= +github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1 h1:ycpNCSYwzZ7x4G4ioPNtKQmIY0G/3o4pVf8wCZq6blY= github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 h1:RNpJrXfI5u6e+uzyIzvmnXbhmhdRkVf//90sMBH3lso= github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 h1:BcEJP2ewTIK2ZCsqgl6YGpuO6+oKqqag5HHb7ehljKw= +github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9 h1:81P7rjnikHKTJ75EkjppvbwUfKHDHYk6LJpO5PZy8pA= github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= @@ -144,6 +188,8 @@ github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs= github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI= github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzWx4Wo2TRVdpPXZEeemGQcajw= github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= +github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM= +github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= @@ -152,42 +198,66 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H9rmp5nkDTAst8= go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= +go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek= +go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07 h1:Z+Zg+aXJYq6f4TK2E4H+vZkQ4dJAWnInXDR6hM9znxo= +golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab h1:BMkEEWYOjkvOX7+YKOGbp6jCyQ5pR2j0Ah47p1Vdsx4= +golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 h1:/8/t5pz/mgdRXhYOIeqqYhFAQLE4DDGegc0Y4ZjyFJM= gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3/go.mod h1:NQHVAzMwvZ+Qe3ElSiHmq9RUm1MdNHpUZ52fiEqvn+0= gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8= gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU= +gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k= +gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM= k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= +k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= +k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/gokrazy/natlabapp.arm64/config.json b/gokrazy/natlabapp.arm64/config.json index 2577f61a5..2ba9a20f9 100644 --- a/gokrazy/natlabapp.arm64/config.json +++ b/gokrazy/natlabapp.arm64/config.json @@ -20,6 +20,10 @@ } } }, + "Environment": [ + "GOOS=linux", + "GOARCH=arm64" + ], "KernelPackage": "github.com/gokrazy/kernel.arm64", "FirmwarePackage": "github.com/gokrazy/kernel.arm64", "EEPROMPackage": "", diff --git a/gokrazy/natlabapp/builddir/tailscale.com/go.sum b/gokrazy/natlabapp/builddir/tailscale.com/go.sum index baa378c46..25f15059d 100644 --- a/gokrazy/natlabapp/builddir/tailscale.com/go.sum +++ b/gokrazy/natlabapp/builddir/tailscale.com/go.sum @@ -4,32 +4,58 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= +github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk= +github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= github.com/aws/aws-sdk-go-v2/config v1.26.5 h1:lodGSevz7d+kkFJodfauThRxK9mdJbyutUxGq1NNhvw= github.com/aws/aws-sdk-go-v2/config v1.26.5/go.mod h1:DxHrz6diQJOc9EwDslVRh84VjjrE17g+pVZXUeSxaDU= +github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k= +github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg= github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= +github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y= +github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk= github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 h1:a8HvP/+ew3tKwSXqL3BCSjiuicr+XTU2eFYeogV9GJE= github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM= github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U= github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w= github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= +github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo= @@ -46,10 +72,14 @@ github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc= github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg= github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= +github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 h1:KbX3Z3CgiYlbaavUq3Cj9/MjpO+88S7/AGXzynVDv84= +github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -62,6 +92,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/csrf v1.7.2 h1:oTUjx0vyf2T+wkrx09Trsev1TE+/EbDAeHtSTbtC2eI= github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= +github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30 h1:fiJdrgVBkjZ5B1HJ2WQwNOaXB+QyYcNXTA3t1XYLz0M= +github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= @@ -86,6 +118,8 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ= github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= @@ -98,6 +132,8 @@ github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy5 github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg= +github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o= github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c= github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE= github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= @@ -128,14 +164,20 @@ github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4 github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w= github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU= +github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= +github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= +github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= +github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1 h1:ycpNCSYwzZ7x4G4ioPNtKQmIY0G/3o4pVf8wCZq6blY= github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 h1:RNpJrXfI5u6e+uzyIzvmnXbhmhdRkVf//90sMBH3lso= github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc h1:cezaQN9pvKVaw56Ma5qr/G646uKIYP0yQf+OyWN/okc= github.com/tailscale/wireguard-go v0.0.0-20240905161824-799c1978fafc/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 h1:BcEJP2ewTIK2ZCsqgl6YGpuO6+oKqqag5HHb7ehljKw= +github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9 h1:81P7rjnikHKTJ75EkjppvbwUfKHDHYk6LJpO5PZy8pA= github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= @@ -148,6 +190,8 @@ github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs= github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI= github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzWx4Wo2TRVdpPXZEeemGQcajw= github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= +github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM= +github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= @@ -156,42 +200,66 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H9rmp5nkDTAst8= go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= +go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek= +go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07 h1:Z+Zg+aXJYq6f4TK2E4H+vZkQ4dJAWnInXDR6hM9znxo= +golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab h1:BMkEEWYOjkvOX7+YKOGbp6jCyQ5pR2j0Ah47p1Vdsx4= +golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 h1:/8/t5pz/mgdRXhYOIeqqYhFAQLE4DDGegc0Y4ZjyFJM= gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3/go.mod h1:NQHVAzMwvZ+Qe3ElSiHmq9RUm1MdNHpUZ52fiEqvn+0= gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8= gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU= +gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k= +gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM= k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= +k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= +k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/gokrazy/natlabapp/config.json b/gokrazy/natlabapp/config.json index 902f14acd..1968b2aac 100644 --- a/gokrazy/natlabapp/config.json +++ b/gokrazy/natlabapp/config.json @@ -20,6 +20,10 @@ } } }, + "Environment": [ + "GOOS=linux", + "GOARCH=amd64" + ], "KernelPackage": "github.com/tailscale/gokrazy-kernel", "FirmwarePackage": "", "EEPROMPackage": "", diff --git a/gokrazy/tsapp/builddir/tailscale.com/go.sum b/gokrazy/tsapp/builddir/tailscale.com/go.sum index b3b73e2d0..2ffef7bf7 100644 --- a/gokrazy/tsapp/builddir/tailscale.com/go.sum +++ b/gokrazy/tsapp/builddir/tailscale.com/go.sum @@ -4,48 +4,80 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= +github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG7fk= +github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= github.com/aws/aws-sdk-go-v2/config v1.26.5 h1:lodGSevz7d+kkFJodfauThRxK9mdJbyutUxGq1NNhvw= github.com/aws/aws-sdk-go-v2/config v1.26.5/go.mod h1:DxHrz6diQJOc9EwDslVRh84VjjrE17g+pVZXUeSxaDU= +github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k= +github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg= github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= +github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y= +github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31/go.mod h1:Huu6GG0YTfbPphQkDSo4dEGmQRTKb9k9G7RdtyQWxuI= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 h1:ACxDklUKKXb48+eg5ROZXi1vDgfMyfIA/WyvqHcHI0o= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31/go.mod h1:yadnfsDwqXeVaohbGc/RaD287PuyRw2wugkh5ZL2J6k= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rGjfihBewr5bT+QUfYUHIxCVgG61LHoT59shM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk= github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7 h1:a8HvP/+ew3tKwSXqL3BCSjiuicr+XTU2eFYeogV9GJE= github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7/go.mod h1:Q7XIWsMo0JcMpI/6TGD6XXcXcV1DbTj6e9BKNntIMIM= github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U= github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w= github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= +github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE= github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0= github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0= github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= +github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e h1:vUmf0yezR0y7jJ5pceLHthLaYf4bA5T14B6q39S4q2Q= github.com/digitalocean/go-smbios v0.0.0-20180907143718-390a4f403a8e/go.mod h1:YTIHhz/QFSYnu/EhlF2SpU2Uk+32abacUYA5ZPljz1A= github.com/djherbis/times v1.6.0 h1:w2ctJ92J8fBvWPxugmXIv7Nz7Q3iDMKNx9v5ocVH20c= github.com/djherbis/times v1.6.0/go.mod h1:gOHeRAz2h+VJNZ5Gmc/o7iD9k4wW7NMVqieYCY99oc0= github.com/fxamacker/cbor/v2 v2.6.0 h1:sU6J2usfADwWlYDAFhZBQ6TnLFBHxgesMrQfQgk1tWA= github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc= github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg= github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0 h1:ymLjT4f35nQbASLnvxEde4XOBL+Sn7rFuV+FOJqkljg= github.com/go-json-experiment/json v0.0.0-20231102232822-2e55bd4e08b0/go.mod h1:6daplAwHHGbUGib4990V3Il26O0OC4aRyvewaaAihaA= +github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288 h1:KbX3Z3CgiYlbaavUq3Cj9/MjpO+88S7/AGXzynVDv84= +github.com/go-json-experiment/json v0.0.0-20250103232110-6a9a0fde9288/go.mod h1:BWmvoE1Xia34f3l/ibJweyhrT+aROb/FQ6d+37F0e2s= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 h1:sQspH8M4niEijh3PFscJRLDnkL547IeP7kpPe3uUhEg= github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466/go.mod h1:ZiQxhyQ+bbbfxUKVvjfO498oPYvtYhZzycal3G/NHmU= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -58,12 +90,16 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/csrf v1.7.2 h1:oTUjx0vyf2T+wkrx09Trsev1TE+/EbDAeHtSTbtC2eI= github.com/gorilla/csrf v1.7.2/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= +github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30 h1:fiJdrgVBkjZ5B1HJ2WQwNOaXB+QyYcNXTA3t1XYLz0M= +github.com/gorilla/csrf v1.7.3-0.20250123201450-9dd6af1f6d30/go.mod h1:F1Fj3KG23WYHE6gozCmBAezKookxbIvUJT+121wTuLk= github.com/gorilla/securecookie v1.1.2 h1:YCIWL56dvtr73r6715mJs5ZvhtnY73hBvEF8kXD8ePA= github.com/gorilla/securecookie v1.1.2/go.mod h1:NfCASbcHqRSY+3a8tlWJwsQap2VX5pwzwo4h3eOamfo= github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= github.com/illarion/gonotify v1.0.1 h1:F1d+0Fgbq/sDWjj/r66ekjDG+IDeecQKUFH4wNwsoio= github.com/illarion/gonotify v1.0.1/go.mod h1:zt5pmDofZpU1f8aqlK0+95eQhoEAn/d4G4B/FjVW4jE= +github.com/illarion/gonotify/v2 v2.0.3 h1:B6+SKPo/0Sw8cRJh1aLzNEeNVFfzE3c6N+o+vyxM+9A= +github.com/illarion/gonotify/v2 v2.0.3/go.mod h1:38oIJTgFqupkEydkkClkbL6i5lXV/bxdH9do5TALPEE= github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2 h1:9K06NfxkBh25x56yVhWWlKFE8YpicaSfHwoV8SFbueA= github.com/insomniacslk/dhcp v0.0.0-20231206064809-8c70d406f6d2/go.mod h1:3A9PQ1cunSDF/1rbTq99Ts4pVnycWg+vlPkfeD2NLFI= github.com/jellydator/ttlcache/v3 v3.1.0 h1:0gPFG0IHHP6xyUyXq+JaD8fwkDCqgqwohXNJBcYE71g= @@ -78,6 +114,8 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a h1:+RR6SqnTkDLWyICxS1xpjCi/3dhyV+TgZwA6Ww3KncQ= github.com/kortschak/wol v0.0.0-20200729010619-da482cc4850a/go.mod h1:YTtCCM3ryyfiu4F7t8HQ1mxvp1UBdWM2r6Xa+nGWvDk= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= @@ -90,6 +128,8 @@ github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy5 github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42 h1:A1Cq6Ysb0GM0tpKMbdCXCIfBclan4oHk1Jb+Hrejirg= +github.com/mdlayher/netlink v1.7.3-0.20250113171957-fbb4dce95f42/go.mod h1:BB4YCPDOzfy7FniQ/lxuYQ3dgmM2cZumHbK8RpTjN2o= github.com/mdlayher/sdnotify v1.0.0 h1:Ma9XeLVN/l0qpyx1tNeMSeTjCPH6NtuD6/N9XdTlQ3c= github.com/mdlayher/sdnotify v1.0.0/go.mod h1:HQUmpM4XgYkhDLtd+Uad8ZFK1T9D5+pNxnXQjCeJlGE= github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI= @@ -116,14 +156,22 @@ github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a h1:SJy1Pu0eH1C29X github.com/tailscale/hujson v0.0.0-20221223112325-20486734a56a/go.mod h1:DFSS3NAGHthKo1gTlmEcSBiZrRJXi28rLNd/1udP1c8= github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85 h1:zrsUcqrG2uQSPhaUPjUQwozcRdDdSxxqhNgNZ3drZFk= github.com/tailscale/netlink v1.1.1-0.20211101221916-cabfb018fe85/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= +github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 h1:uFsXVBE9Qr4ZoF094vE6iYTLDl0qCiKzYXlL6UeWObU= +github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7/go.mod h1:NzVQi3Mleb+qzq8VmcWpSkcSYxXIg0DkI6XDzpVkhJ0= github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4 h1:Gz0rz40FvFVLTBk/K8UNAenb36EbDSnh+q7Z9ldcC8w= github.com/tailscale/peercred v0.0.0-20240214030740-b535050b2aa4/go.mod h1:phI29ccmHQBc+wvroosENp1IF9195449VDnFDhJ4rJU= +github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc h1:24heQPtnFR+yfntqhI3oAu9i27nEojcQ4NuBQOo5ZFA= +github.com/tailscale/peercred v0.0.0-20250107143737-35a0c7bd7edc/go.mod h1:f93CXfllFsO9ZQVq+Zocb1Gp4G5Fz0b0rXHLOzt/Djc= github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1 h1:tdUdyPqJ0C97SJfjB9tW6EylTtreyee9C44de+UBG0g= github.com/tailscale/web-client-prebuilt v0.0.0-20240226180453-5db17b287bf1/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= +github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976 h1:UBPHPtv8+nEAy2PD8RyAhOYvau1ek0HDJqLS/Pysi14= +github.com/tailscale/web-client-prebuilt v0.0.0-20250124233751-d4cd19a26976/go.mod h1:agQPE6y6ldqCOui2gkIh7ZMztTkIQKH049tv8siLuNQ= github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1 h1:ycpNCSYwzZ7x4G4ioPNtKQmIY0G/3o4pVf8wCZq6blY= github.com/tailscale/wireguard-go v0.0.0-20240705152531-2f5d148bcfe1/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98 h1:RNpJrXfI5u6e+uzyIzvmnXbhmhdRkVf//90sMBH3lso= github.com/tailscale/wireguard-go v0.0.0-20240731203015-71393c576b98/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= +github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19 h1:BcEJP2ewTIK2ZCsqgl6YGpuO6+oKqqag5HHb7ehljKw= +github.com/tailscale/wireguard-go v0.0.0-20250107165329-0b8b35511f19/go.mod h1:BOm5fXUBFM+m9woLNBoxI9TaBXXhGNP50LX/TGIvGb4= github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9 h1:81P7rjnikHKTJ75EkjppvbwUfKHDHYk6LJpO5PZy8pA= github.com/tailscale/xnet v0.0.0-20240117122442-62b9a7c569f9/go.mod h1:orPd6JZXXRyuDusYilywte7k094d7dycXXU5YnWsrwg= github.com/tailscale/xnet v0.0.0-20240729143630-8497ac4dab2e h1:zOGKqN5D5hHhiYUp091JqK7DPCqSARyUfduhGUY8Bek= @@ -136,6 +184,8 @@ github.com/u-root/u-root v0.12.0 h1:K0AuBFriwr0w/PGS3HawiAw89e3+MU7ks80GpghAsNs= github.com/u-root/u-root v0.12.0/go.mod h1:FYjTOh4IkIZHhjsd17lb8nYW6udgXdJhG1c0r6u0arI= github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e h1:BA9O3BmlTmpjbvajAwzWx4Wo2TRVdpPXZEeemGQcajw= github.com/u-root/uio v0.0.0-20240118234441-a3c409a6018e/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= +github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8RaCKgVpHZnecvArXvPXcFkM= +github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= @@ -144,42 +194,66 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= go4.org/mem v0.0.0-20220726221520-4f986261bf13 h1:CbZeCBZ0aZj8EfVgnqQcYZgf0lpZ3H9rmp5nkDTAst8= go4.org/mem v0.0.0-20220726221520-4f986261bf13/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= +go4.org/mem v0.0.0-20240501181205-ae6ca9944745 h1:Tl++JLUCe4sxGu8cTpDzRLd3tN7US4hOxG5YpKCzkek= +go4.org/mem v0.0.0-20240501181205-ae6ca9944745/go.mod h1:reUoABIJ9ikfM5sgtSF3Wushcza7+WeD01VB9Lirh3g= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07 h1:Z+Zg+aXJYq6f4TK2E4H+vZkQ4dJAWnInXDR6hM9znxo= +golang.org/x/crypto v0.32.1-0.20250118192723-a8ea4be81f07/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab h1:BMkEEWYOjkvOX7+YKOGbp6jCyQ5pR2j0Ah47p1Vdsx4= +golang.org/x/sys v0.29.1-0.20250107080300-1c14dcadc3ab/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3 h1:/8/t5pz/mgdRXhYOIeqqYhFAQLE4DDGegc0Y4ZjyFJM= gvisor.dev/gvisor v0.0.0-20240306221502-ee1e1f6070e3/go.mod h1:NQHVAzMwvZ+Qe3ElSiHmq9RUm1MdNHpUZ52fiEqvn+0= gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlLcNHzZu3x6mhoH2Mk0c8= gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU= +gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633 h1:2gap+Kh/3F47cO6hAu3idFvsJ0ue6TRcEi2IUkv/F8k= +gvisor.dev/gvisor v0.0.0-20250205023644-9414b50a5633/go.mod h1:5DMfjtclAbTIjbXqO1qCe2K5GKKxWz2JHvCChuTcJEM= k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q= k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc= k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= +k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= +k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/gokrazy/tsapp/config.json b/gokrazy/tsapp/config.json index 33dd98a96..b88be53a4 100644 --- a/gokrazy/tsapp/config.json +++ b/gokrazy/tsapp/config.json @@ -27,6 +27,10 @@ } } }, + "Environment": [ + "GOOS=linux", + "GOARCH=amd64" + ], "KernelPackage": "github.com/tailscale/gokrazy-kernel", "FirmwarePackage": "github.com/tailscale/gokrazy-kernel", "InternalCompatibilityFlags": {} diff --git a/tstest/integration/nat/nat_test.go b/tstest/integration/nat/nat_test.go index 535515588..9f77d31e9 100644 --- a/tstest/integration/nat/nat_test.go +++ b/tstest/integration/nat/nat_test.go @@ -32,6 +32,7 @@ import ( ) var ( + runVMTests = flag.Bool("run-vm-tests", false, "run tests that require a VM") logTailscaled = flag.Bool("log-tailscaled", false, "log tailscaled output") pcapFile = flag.String("pcap", "", "write pcap to file") ) @@ -59,8 +60,25 @@ func newNatTest(tb testing.TB) *natTest { base: filepath.Join(modRoot, "gokrazy/natlabapp.qcow2"), } + if !*runVMTests { + tb.Skip("skipping heavy test; set --run-vm-tests to run") + } + if _, err := os.Stat(nt.base); err != nil { - tb.Skipf("skipping test; base image %q not found", nt.base) + if !os.IsNotExist(err) { + tb.Fatal(err) + } + tb.Logf("building VM image...") + cmd := exec.Command("make", "natlab") + cmd.Dir = filepath.Join(modRoot, "gokrazy") + cmd.Stderr = os.Stderr + cmd.Stdout = os.Stdout + if err := cmd.Run(); err != nil { + tb.Fatalf("Error running 'make natlab' in gokrazy directory") + } + if _, err := os.Stat(nt.base); err != nil { + tb.Skipf("still can't find VM image: %v", err) + } } nt.kernel, err = findKernelPath(filepath.Join(modRoot, "gokrazy/natlabapp/builddir/github.com/tailscale/gokrazy-kernel/go.mod")) From 24d4846f007d34b160e2dba9fecf95a8357372d7 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Tue, 4 Mar 2025 12:08:32 -0800 Subject: [PATCH 29/87] util/eventbus: adjust worker goroutine management helpers This makes the helpers closer in behavior to cancelable contexts and taskgroup.Single, and makes the worker code use a more normal and easier to reason about context.Context for shutdown. Updates #15160 Signed-off-by: David Anderson --- util/eventbus/bus.go | 143 ++++++++++++++++++++++--------------- util/eventbus/publish.go | 17 ++--- util/eventbus/subscribe.go | 47 ++++++------ 3 files changed, 113 insertions(+), 94 deletions(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index 393596d75..3520be828 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -15,8 +15,8 @@ import ( // Bus is an event bus that distributes published events to interested // subscribers. type Bus struct { + router *worker write chan any - stop goroutineShutdownControl snapshot chan chan []any topicsMu sync.Mutex // guards everything below. @@ -30,15 +30,13 @@ type Bus struct { // New returns a new bus. Use [PublisherOf] to make event publishers, // and [Bus.Queue] and [Subscribe] to make event subscribers. func New() *Bus { - stopCtl, stopWorker := newGoroutineShutdown() ret := &Bus{ write: make(chan any), - stop: stopCtl, snapshot: make(chan chan []any), topics: map[reflect.Type][]*subscribeState{}, clients: set.Set[*Client]{}, } - go ret.pump(stopWorker) + ret.router = runWorker(ret.pump) return ret } @@ -67,7 +65,7 @@ func (b *Bus) Client(name string) *Client { // Close blocks until the bus is fully shut down. The bus is // permanently unusable after closing. func (b *Bus) Close() { - b.stop.StopAndWait() + b.router.StopAndWait() var clients set.Set[*Client] b.topicsMu.Lock() @@ -79,8 +77,7 @@ func (b *Bus) Close() { } } -func (b *Bus) pump(stop goroutineShutdownWorker) { - defer stop.Done() +func (b *Bus) pump(ctx context.Context) { var vals queue acceptCh := func() chan any { if vals.Full() { @@ -102,13 +99,13 @@ func (b *Bus) pump(stop goroutineShutdownWorker) { select { case d.write <- val: break deliverOne - case <-d.stop.WaitChan(): + case <-d.closed(): // Queue closed, don't block but continue // delivering to others. break deliverOne case in := <-acceptCh(): vals.Add(in) - case <-stop.Stop(): + case <-ctx.Done(): return case ch := <-b.snapshot: ch <- vals.Snapshot() @@ -122,7 +119,7 @@ func (b *Bus) pump(stop goroutineShutdownWorker) { // resuming. for vals.Empty() { select { - case <-stop.Stop(): + case <-ctx.Done(): return case val := <-b.write: vals.Add(val) @@ -168,59 +165,89 @@ func (b *Bus) unsubscribe(t reflect.Type, q *subscribeState) { b.topics[t] = slices.Delete(slices.Clone(b.topics[t]), i, i+1) } -func newGoroutineShutdown() (goroutineShutdownControl, goroutineShutdownWorker) { - ctx, cancel := context.WithCancel(context.Background()) +// A worker runs a worker goroutine and helps coordinate its shutdown. +type worker struct { + ctx context.Context + stop context.CancelFunc + stopped chan struct{} +} - ctl := goroutineShutdownControl{ - startShutdown: cancel, - shutdownFinished: make(chan struct{}), +// runWorker creates a worker goroutine running fn. The context passed +// to fn is canceled by [worker.Stop]. +func runWorker(fn func(context.Context)) *worker { + ctx, stop := context.WithCancel(context.Background()) + ret := &worker{ + ctx: ctx, + stop: stop, + stopped: make(chan struct{}), } - work := goroutineShutdownWorker{ - startShutdown: ctx.Done(), - shutdownFinished: ctl.shutdownFinished, + go ret.run(fn) + return ret +} + +func (w *worker) run(fn func(context.Context)) { + defer close(w.stopped) + fn(w.ctx) +} + +// Stop signals the worker goroutine to shut down. +func (w *worker) Stop() { w.stop() } + +// Done returns a channel that is closed when the worker goroutine +// exits. +func (w *worker) Done() <-chan struct{} { return w.stopped } + +// Wait waits until the worker goroutine has exited. +func (w *worker) Wait() { <-w.stopped } + +// StopAndWait signals the worker goroutine to shut down, then waits +// for it to exit. +func (w *worker) StopAndWait() { + w.stop() + <-w.stopped +} + +// stopFlag is a value that can be watched for a notification. The +// zero value is ready for use. +// +// The flag is notified by running [stopFlag.Stop]. Stop can be called +// multiple times. Upon the first call to Stop, [stopFlag.Done] is +// closed, all pending [stopFlag.Wait] calls return, and future Wait +// calls return immediately. +// +// A stopFlag can only notify once, and is intended for use as a +// one-way shutdown signal that's lighter than a cancellable +// context.Context. +type stopFlag struct { + // guards the lazy construction of stopped, and the value of + // alreadyStopped. + mu sync.Mutex + stopped chan struct{} + alreadyStopped bool +} + +func (s *stopFlag) Stop() { + s.mu.Lock() + defer s.mu.Unlock() + if s.alreadyStopped { + return } - - return ctl, work + s.alreadyStopped = true + if s.stopped == nil { + s.stopped = make(chan struct{}) + } + close(s.stopped) } -// goroutineShutdownControl is a helper type to manage the shutdown of -// a worker goroutine. The worker goroutine should use the -// goroutineShutdownWorker related to this controller. -type goroutineShutdownControl struct { - startShutdown context.CancelFunc - shutdownFinished chan struct{} +func (s *stopFlag) Done() <-chan struct{} { + s.mu.Lock() + defer s.mu.Unlock() + if s.stopped == nil { + s.stopped = make(chan struct{}) + } + return s.stopped } -func (ctl *goroutineShutdownControl) Stop() { - ctl.startShutdown() -} - -func (ctl *goroutineShutdownControl) Wait() { - <-ctl.shutdownFinished -} - -func (ctl *goroutineShutdownControl) WaitChan() <-chan struct{} { - return ctl.shutdownFinished -} - -func (ctl *goroutineShutdownControl) StopAndWait() { - ctl.Stop() - ctl.Wait() -} - -// goroutineShutdownWorker is a helper type for a worker goroutine to -// be notified that it should shut down, and to report that shutdown -// has completed. The notification is triggered by the related -// goroutineShutdownControl. -type goroutineShutdownWorker struct { - startShutdown <-chan struct{} - shutdownFinished chan struct{} -} - -func (work *goroutineShutdownWorker) Stop() <-chan struct{} { - return work.startShutdown -} - -func (work *goroutineShutdownWorker) Done() { - close(work.shutdownFinished) +func (s *stopFlag) Wait() { + <-s.Done() } diff --git a/util/eventbus/publish.go b/util/eventbus/publish.go index 19ddc1256..b2d0641d9 100644 --- a/util/eventbus/publish.go +++ b/util/eventbus/publish.go @@ -4,7 +4,6 @@ package eventbus import ( - "context" "reflect" ) @@ -17,17 +16,13 @@ type publisher interface { // A Publisher publishes typed events on a bus. type Publisher[T any] struct { - client *Client - stopCtx context.Context - stop context.CancelFunc + client *Client + stop stopFlag } func newPublisher[T any](c *Client) *Publisher[T] { - ctx, cancel := context.WithCancel(context.Background()) ret := &Publisher[T]{ - client: c, - stopCtx: ctx, - stop: cancel, + client: c, } c.addPublisher(ret) return ret @@ -39,7 +34,7 @@ func newPublisher[T any](c *Client) *Publisher[T] { func (p *Publisher[T]) Close() { // Just unblocks any active calls to Publish, no other // synchronization needed. - p.stop() + p.stop.Stop() p.client.deletePublisher(p) } @@ -52,14 +47,14 @@ func (p *Publisher[T]) Publish(v T) { // Check for just a stopped publisher or bus before trying to // write, so that once closed Publish consistently does nothing. select { - case <-p.stopCtx.Done(): + case <-p.stop.Done(): return default: } select { case p.client.publish() <- v: - case <-p.stopCtx.Done(): + case <-p.stop.Done(): } } diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index 896f0ce1f..606410c8e 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -27,7 +27,7 @@ type subscriber interface { // processing other potential sources of wakeups, which is how we end // up at this awkward type signature and sharing of internal state // through dispatch. - dispatch(vals *queue, stop goroutineShutdownWorker, acceptCh func() chan any) bool + dispatch(ctx context.Context, vals *queue, acceptCh func() chan any) bool Close() } @@ -35,29 +35,26 @@ type subscriber interface { type subscribeState struct { client *Client - write chan any - stop goroutineShutdownControl - snapshot chan chan []any + dispatcher *worker + write chan any + snapshot chan chan []any outputsMu sync.Mutex outputs map[reflect.Type]subscriber } func newSubscribeState(c *Client) *subscribeState { - stopCtl, stopWorker := newGoroutineShutdown() ret := &subscribeState{ client: c, write: make(chan any), - stop: stopCtl, snapshot: make(chan chan []any), outputs: map[reflect.Type]subscriber{}, } - go ret.pump(stopWorker) + ret.dispatcher = runWorker(ret.pump) return ret } -func (q *subscribeState) pump(stop goroutineShutdownWorker) { - defer stop.Done() +func (q *subscribeState) pump(ctx context.Context) { var vals queue acceptCh := func() chan any { if vals.Full() { @@ -74,7 +71,7 @@ func (q *subscribeState) pump(stop goroutineShutdownWorker) { vals.Drop() continue } - if !sub.dispatch(&vals, stop, acceptCh) { + if !sub.dispatch(ctx, &vals, acceptCh) { return } } else { @@ -85,7 +82,7 @@ func (q *subscribeState) pump(stop goroutineShutdownWorker) { select { case val := <-q.write: vals.Add(val) - case <-stop.Stop(): + case <-ctx.Done(): return case ch := <-q.snapshot: ch <- vals.Snapshot() @@ -120,7 +117,7 @@ func (q *subscribeState) subscriberFor(val any) subscriber { // Close closes the subscribeState. Implicitly closes all Subscribers // linked to this state, and any pending events are discarded. func (s *subscribeState) close() { - s.stop.StopAndWait() + s.dispatcher.StopAndWait() var subs map[reflect.Type]subscriber s.outputsMu.Lock() @@ -131,23 +128,23 @@ func (s *subscribeState) close() { } } +func (s *subscribeState) closed() <-chan struct{} { + return s.dispatcher.Done() +} + // A Subscriber delivers one type of event from a [Client]. type Subscriber[T any] struct { - doneCtx context.Context - done context.CancelFunc - recv *subscribeState - read chan T + stop stopFlag + recv *subscribeState + read chan T } func newSubscriber[T any](r *subscribeState) *Subscriber[T] { t := reflect.TypeFor[T]() - ctx, cancel := context.WithCancel(context.Background()) ret := &Subscriber[T]{ - doneCtx: ctx, - done: cancel, - recv: r, - read: make(chan T), + recv: r, + read: make(chan T), } r.addSubscriber(t, ret) @@ -158,7 +155,7 @@ func (s *Subscriber[T]) subscribeType() reflect.Type { return reflect.TypeFor[T]() } -func (s *Subscriber[T]) dispatch(vals *queue, stop goroutineShutdownWorker, acceptCh func() chan any) bool { +func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue, acceptCh func() chan any) bool { t := vals.Peek().(T) for { // Keep the cases in this select in sync with subscribeState.pump @@ -170,7 +167,7 @@ func (s *Subscriber[T]) dispatch(vals *queue, stop goroutineShutdownWorker, acce return true case val := <-acceptCh(): vals.Add(val) - case <-stop.Stop(): + case <-ctx.Done(): return false case ch := <-s.recv.snapshot: ch <- vals.Snapshot() @@ -187,13 +184,13 @@ func (s *Subscriber[T]) Events() <-chan T { // Done returns a channel that is closed when the subscriber is // closed. func (s *Subscriber[T]) Done() <-chan struct{} { - return s.doneCtx.Done() + return s.stop.Done() } // Close closes the Subscriber, indicating the caller no longer wishes // to receive this event type. After Close, receives on // [Subscriber.Events] block for ever. func (s *Subscriber[T]) Close() { - s.done() // unblock receivers + s.stop.Stop() // unblock receivers s.recv.deleteSubscriber(reflect.TypeFor[T]()) } From c6b8e6f6b7a2a95edbc620bcaaa473bd21a68d5b Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 3 Mar 2025 15:02:35 +0000 Subject: [PATCH 30/87] licenses: update license notices Signed-off-by: License Updater --- licenses/android.md | 57 +++++++++++++++++++++---------------------- licenses/apple.md | 23 +++++++++-------- licenses/tailscale.md | 25 +++++++++---------- licenses/windows.md | 24 +++++++++--------- 4 files changed, 63 insertions(+), 66 deletions(-) diff --git a/licenses/android.md b/licenses/android.md index 378baa805..c3e9e989a 100644 --- a/licenses/android.md +++ b/licenses/android.md @@ -9,34 +9,33 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519) ([BSD-3-Clause](https://github.com/FiloSottile/edwards25519/blob/v1.1.0/LICENSE)) - - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.24.1/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.26.5/config/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.16.16/credentials/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.14.11/feature/ec2/imds/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.2.10/internal/configsources/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.5.10/internal/endpoints/v2/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.7.2/internal/ini/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.24.1/internal/sync/singleflight/LICENSE)) - - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.10.4/service/internal/accept-encoding/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.10.10/service/internal/presigned-url/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.29.5/config/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/credentials/v1.17.58/credentials/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/feature/ec2/imds](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/feature/ec2/imds) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/feature/ec2/imds/v1.16.27/feature/ec2/imds/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/configsources](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/configsources) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/configsources/v1.3.31/internal/configsources/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/endpoints/v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/endpoints/v2.6.31/internal/endpoints/v2/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/ini](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/ini) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/internal/ini/v1.8.2/internal/ini/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/aws-sdk-go-v2/blob/v1.36.0/internal/sync/singleflight/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/accept-encoding/v1.12.2/service/internal/accept-encoding/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/internal/presigned-url](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/internal/presigned-url/v1.12.12/service/internal/presigned-url/LICENSE.txt)) - [github.com/aws/aws-sdk-go-v2/service/ssm](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssm) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssm/v1.44.7/service/ssm/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.18.7/service/sso/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.21.7/service/ssooidc/LICENSE.txt)) - - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.26.7/service/sts/LICENSE.txt)) - - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.19.0/LICENSE)) - - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.19.0/internal/sync/singleflight/LICENSE)) - - [github.com/bits-and-blooms/bitset](https://pkg.go.dev/github.com/bits-and-blooms/bitset) ([BSD-3-Clause](https://github.com/bits-and-blooms/bitset/blob/v1.13.0/LICENSE)) + - [github.com/aws/aws-sdk-go-v2/service/sso](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sso) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sso/v1.24.14/service/sso/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/ssooidc](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ssooidc) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/ssooidc/v1.28.13/service/ssooidc/LICENSE.txt)) + - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) + - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) + - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.11.1/LICENSE)) + - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/6a9a0fde9288/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - - [github.com/illarion/gonotify/v2](https://pkg.go.dev/github.com/illarion/gonotify/v2) ([MIT](https://github.com/illarion/gonotify/blob/v2.0.3/LICENSE)) + - [github.com/illarion/gonotify/v3](https://pkg.go.dev/github.com/illarion/gonotify/v3) ([MIT](https://github.com/illarion/gonotify/blob/v3.0.2/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/8c70d406f6d2/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) @@ -65,17 +64,17 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - [go4.org/unsafe/assume-no-moving-gc](https://pkg.go.dev/go4.org/unsafe/assume-no-moving-gc) ([BSD-3-Clause](https://github.com/go4org/unsafe-assume-no-moving-gc/blob/e7c30c78aeb2/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/a8ea4be8:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/7588d65b:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.33.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - [golang.org/x/mobile](https://pkg.go.dev/golang.org/x/mobile) ([BSD-3-Clause](https://cs.opensource.google/go/x/mobile/+/81131f64:LICENSE)) - - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.22.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.34.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.10.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/1c14dcad:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.28.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.21.0:LICENSE)) - - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.9.0:LICENSE)) - - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.29.0:LICENSE)) - - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/64c016c92987/LICENSE)) + - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.23.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.35.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) + - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.10.0:LICENSE)) + - [golang.org/x/tools](https://pkg.go.dev/golang.org/x/tools) ([BSD-3-Clause](https://cs.opensource.google/go/x/tools/+/v0.30.0:LICENSE)) + - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - [inet.af/netaddr](https://pkg.go.dev/inet.af/netaddr) ([BSD-3-Clause](Unknown)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/apple.md b/licenses/apple.md index 7741318f7..a2984ea2e 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -28,20 +28,19 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - - [github.com/bits-and-blooms/bitset](https://pkg.go.dev/github.com/bits-and-blooms/bitset) ([BSD-3-Clause](https://github.com/bits-and-blooms/bitset/blob/v1.13.0/LICENSE)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.11.1/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/6a9a0fde9288/LICENSE)) + - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) - [github.com/google/uuid](https://pkg.go.dev/github.com/google/uuid) ([BSD-3-Clause](https://github.com/google/uuid/blob/v1.6.0/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - - [github.com/illarion/gonotify/v2](https://pkg.go.dev/github.com/illarion/gonotify/v2) ([MIT](https://github.com/illarion/gonotify/blob/v2.0.3/LICENSE)) + - [github.com/illarion/gonotify/v3](https://pkg.go.dev/github.com/illarion/gonotify/v3) ([MIT](https://github.com/illarion/gonotify/blob/v3.0.2/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/15c9b8791914/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) @@ -69,14 +68,14 @@ See also the dependencies in the [Tailscale CLI][]. - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/a8ea4be8:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/7588d65b:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.34.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.10.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/1c14dcad:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.28.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.21.0:LICENSE)) - - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.9.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.35.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) + - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.10.0:LICENSE)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) - [tailscale.com](https://pkg.go.dev/tailscale.com) ([BSD-3-Clause](https://github.com/tailscale/tailscale/blob/HEAD/LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index ab79ee391..777687be6 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -33,7 +33,6 @@ Some packages may only be included on certain architectures or operating systems - [github.com/aws/aws-sdk-go-v2/service/sts](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/sts) ([Apache-2.0](https://github.com/aws/aws-sdk-go-v2/blob/service/sts/v1.33.13/service/sts/LICENSE.txt)) - [github.com/aws/smithy-go](https://pkg.go.dev/github.com/aws/smithy-go) ([Apache-2.0](https://github.com/aws/smithy-go/blob/v1.22.2/LICENSE)) - [github.com/aws/smithy-go/internal/sync/singleflight](https://pkg.go.dev/github.com/aws/smithy-go/internal/sync/singleflight) ([BSD-3-Clause](https://github.com/aws/smithy-go/blob/v1.22.2/internal/sync/singleflight/LICENSE)) - - [github.com/bits-and-blooms/bitset](https://pkg.go.dev/github.com/bits-and-blooms/bitset) ([BSD-3-Clause](https://github.com/bits-and-blooms/bitset/blob/v1.13.0/LICENSE)) - [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) ([ISC](https://github.com/coder/websocket/blob/v1.8.12/LICENSE.txt)) - [github.com/coreos/go-iptables/iptables](https://pkg.go.dev/github.com/coreos/go-iptables/iptables) ([Apache-2.0](https://github.com/coreos/go-iptables/blob/65c67c9f46e6/LICENSE)) - [github.com/creack/pty](https://pkg.go.dev/github.com/creack/pty) ([MIT](https://github.com/creack/pty/blob/v1.1.23/LICENSE)) @@ -41,8 +40,8 @@ Some packages may only be included on certain architectures or operating systems - [github.com/digitalocean/go-smbios/smbios](https://pkg.go.dev/github.com/digitalocean/go-smbios/smbios) ([Apache-2.0](https://github.com/digitalocean/go-smbios/blob/390a4f403a8e/LICENSE.md)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.11.1/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/6a9a0fde9288/LICENSE)) + - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) - [github.com/go-ole/go-ole](https://pkg.go.dev/github.com/go-ole/go-ole) ([MIT](https://github.com/go-ole/go-ole/blob/v1.3.0/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) @@ -52,7 +51,7 @@ Some packages may only be included on certain architectures or operating systems - [github.com/gorilla/csrf](https://pkg.go.dev/github.com/gorilla/csrf) ([BSD-3-Clause](https://github.com/gorilla/csrf/blob/9dd6af1f6d30/LICENSE)) - [github.com/gorilla/securecookie](https://pkg.go.dev/github.com/gorilla/securecookie) ([BSD-3-Clause](https://github.com/gorilla/securecookie/blob/v1.1.2/LICENSE)) - [github.com/hdevalence/ed25519consensus](https://pkg.go.dev/github.com/hdevalence/ed25519consensus) ([BSD-3-Clause](https://github.com/hdevalence/ed25519consensus/blob/v0.2.0/LICENSE)) - - [github.com/illarion/gonotify/v2](https://pkg.go.dev/github.com/illarion/gonotify/v2) ([MIT](https://github.com/illarion/gonotify/blob/v2.0.3/LICENSE)) + - [github.com/illarion/gonotify/v3](https://pkg.go.dev/github.com/illarion/gonotify/v3) ([MIT](https://github.com/illarion/gonotify/blob/v3.0.2/LICENSE)) - [github.com/insomniacslk/dhcp](https://pkg.go.dev/github.com/insomniacslk/dhcp) ([BSD-3-Clause](https://github.com/insomniacslk/dhcp/blob/8c70d406f6d2/LICENSE)) - [github.com/jellydator/ttlcache/v3](https://pkg.go.dev/github.com/jellydator/ttlcache/v3) ([MIT](https://github.com/jellydator/ttlcache/blob/v3.1.0/LICENSE)) - [github.com/jmespath/go-jmespath](https://pkg.go.dev/github.com/jmespath/go-jmespath) ([Apache-2.0](https://github.com/jmespath/go-jmespath/blob/v0.4.0/LICENSE)) @@ -91,15 +90,15 @@ Some packages may only be included on certain architectures or operating systems - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/a8ea4be8:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/7588d65b:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.34.0:LICENSE)) - - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.25.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.10.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/1c14dcad:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.28.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.21.0:LICENSE)) - - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.9.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.35.0:LICENSE)) + - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.26.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) + - [golang.org/x/time/rate](https://pkg.go.dev/golang.org/x/time/rate) ([BSD-3-Clause](https://cs.opensource.google/go/x/time/+/v0.10.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - [gvisor.dev/gvisor/pkg](https://pkg.go.dev/gvisor.dev/gvisor/pkg) ([Apache-2.0](https://github.com/google/gvisor/blob/9414b50a5633/LICENSE)) diff --git a/licenses/windows.md b/licenses/windows.md index 8abbd52d5..78fdcf7fb 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -35,7 +35,7 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/dblohm7/wingoes](https://pkg.go.dev/github.com/dblohm7/wingoes) ([BSD-3-Clause](https://github.com/dblohm7/wingoes/blob/b75a8a7d7eb0/LICENSE)) - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/6a9a0fde9288/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) - [github.com/google/nftables](https://pkg.go.dev/github.com/google/nftables) ([Apache-2.0](https://github.com/google/nftables/blob/5e242ec57806/LICENSE)) @@ -62,23 +62,23 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/72f92d5087d4/LICENSE)) - - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/cfd3289ef17f/LICENSE)) + - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/04068c1cab63/LICENSE)) + - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/5992cb43ca35/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE)) - [github.com/vishvananda/netns](https://pkg.go.dev/github.com/vishvananda/netns) ([Apache-2.0](https://github.com/vishvananda/netns/blob/v0.0.4/LICENSE)) - [github.com/x448/float16](https://pkg.go.dev/github.com/x448/float16) ([MIT](https://github.com/x448/float16/blob/v0.8.4/LICENSE)) - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/a8ea4be8:LICENSE)) - - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/7588d65b:LICENSE)) - - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.23.0:LICENSE)) - - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.22.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.34.0:LICENSE)) - - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.10.0:LICENSE)) - - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/1c14dcad:LICENSE)) - - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.28.0:LICENSE)) - - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.21.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) + - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) + - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.24.0:LICENSE)) + - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.23.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.35.0:LICENSE)) + - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) + - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) + - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) + - [golang.org/x/text](https://pkg.go.dev/golang.org/x/text) ([BSD-3-Clause](https://cs.opensource.google/go/x/text/+/v0.22.0:LICENSE)) - [golang.zx2c4.com/wintun](https://pkg.go.dev/golang.zx2c4.com/wintun) ([MIT](https://git.zx2c4.com/wintun-go/tree/LICENSE?id=0fa3db229ce2)) - [golang.zx2c4.com/wireguard/windows/tunnel/winipcfg](https://pkg.go.dev/golang.zx2c4.com/wireguard/windows/tunnel/winipcfg) ([MIT](https://git.zx2c4.com/wireguard-windows/tree/COPYING?h=v0.5.3)) - [google.golang.org/protobuf](https://pkg.go.dev/google.golang.org/protobuf) ([BSD-3-Clause](https://github.com/protocolbuffers/protobuf-go/blob/v1.35.1/LICENSE)) From 27e0575f76f0d8c88ec04d770ed7d6ec86bcba91 Mon Sep 17 00:00:00 2001 From: Sam Linville Date: Wed, 5 Mar 2025 10:55:37 -0600 Subject: [PATCH 31/87] cmd/tsidp: add README and Dockerfile (#15205) --- cmd/tsidp/Dockerfile | 41 ++++++++++++++++++ cmd/tsidp/README.md | 100 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 141 insertions(+) create mode 100644 cmd/tsidp/Dockerfile create mode 100644 cmd/tsidp/README.md diff --git a/cmd/tsidp/Dockerfile b/cmd/tsidp/Dockerfile new file mode 100644 index 000000000..605a7ba2e --- /dev/null +++ b/cmd/tsidp/Dockerfile @@ -0,0 +1,41 @@ +# Build stage +FROM golang:alpine AS builder + +# Install build dependencies +RUN apk add --no-cache git + +# Set working directory +WORKDIR /src + +# Copy only go.mod and go.sum first to leverage Docker caching +COPY go.mod go.sum ./ +RUN go mod download + +# Copy the entire repository +COPY . . + +# Build the tsidp binary +RUN go build -o /bin/tsidp ./cmd/tsidp + +# Final stage +FROM alpine:latest + +# Create necessary directories +RUN mkdir -p /var/lib/tsidp + +# Copy binary from builder stage +COPY --from=builder /bin/tsidp /app/tsidp + +# Set working directory +WORKDIR /app + +# Environment variables +ENV TAILSCALE_USE_WIP_CODE=1 \ + TS_HOSTNAME=tsidp \ + TS_STATE_DIR=/var/lib/tsidp + +# Expose the default port +EXPOSE 443 + +# Run the application +ENTRYPOINT ["/app/tsidp"] \ No newline at end of file diff --git a/cmd/tsidp/README.md b/cmd/tsidp/README.md new file mode 100644 index 000000000..d51138b6d --- /dev/null +++ b/cmd/tsidp/README.md @@ -0,0 +1,100 @@ +# `tsidp` - Tailscale OpenID Connect (OIDC) Identity Provider + +[![status: experimental](https://img.shields.io/badge/status-experimental-blue)](https://tailscale.com/kb/1167/release-stages/#experimental) + +`tsidp` is an OIDC Identity Provider (IdP) server that integrates with your Tailscale network. It allows you to use Tailscale identities for authentication in applications that support OpenID Connect, enabling single sign-on (SSO) capabilities within your tailnet. + +## Prerequisites + +- A Tailscale network (tailnet) with magicDNS and HTTPS enabled +- A Tailscale authentication key from your tailnet +- Docker installed on your system + +## Installation using Docker + +1. **Build the Docker Image** + + The Dockerfile uses a multi-stage build process to: + - Build the `tsidp` binary from source + - Create a minimal Alpine-based image with just the necessary components + + ```bash + # Clone the Tailscale repository + git clone https://github.com/tailscale/tailscale.git + cd tailscale + ``` + + ```bash + # Build the Docker image + docker build -t tsidp:latest -f cmd/tsidp/Dockerfile . + ``` + +2. **Run the Container** + + Replace `YOUR_TAILSCALE_AUTHKEY` with your Tailscale authentication key. + + ```bash + docker run -d \ + --name `tsidp` \ + -p 443:443 \ + -e TS_AUTHKEY=YOUR_TAILSCALE_AUTHKEY \ + -e TS_HOSTNAME=tsidp \ + -v tsidp-data:/var/lib/tsidp \ + tsidp:latest + ``` + +3. **Verify Installation** + ```bash + docker logs tsidp + ``` + + Visit `https://tsidp.tailnet.ts.net` to confirm the service is running. + +## Usage Example: Proxmox Integration + +Here's how to configure Proxmox to use `tsidp` for authentication: + +1. In Proxmox, navigate to Datacenter > Realms > Add OpenID Connect Server + +2. Configure the following settings: + - Issuer URL: `https://idp.velociraptor.ts.net` + - Realm: `tailscale` (or your preferred name) + - Client ID: `unused` + - Client Key: `unused` + - Default: `true` + - Autocreate users: `true` + - Username claim: `email` + +3. Set up user permissions: + - Go to Datacenter > Permissions > Groups + - Create a new group (e.g., "tsadmins") + - Click Permissions in the sidebar + - Add Group Permission + - Set Path to `/` for full admin access or scope as needed + - Set the group and role + - Add Tailscale-authenticated users to the group + +## Configuration Options + +The `tsidp` server supports several command-line flags: + +- `--verbose`: Enable verbose logging +- `--port`: Port to listen on (default: 443) +- `--local-port`: Allow requests from localhost +- `--use-local-tailscaled`: Use local tailscaled instead of tsnet +- `--dir`: tsnet state directory + +## Environment Variables + +- `TS_AUTHKEY`: Your Tailscale authentication key (required) +- `TS_HOSTNAME`: Hostname for the `tsidp` server (default: "idp") +- `TS_STATE_DIR`: State directory (default: "/var/lib/tsidp") +- `TAILSCALE_USE_WIP_CODE`: Enable work-in-progress code (default: "1") + +## Support + +This is an [experimental](https://tailscale.com/kb/1167/release-stages#experimental), work in progress feature. For issues or questions, file issues on the [GitHub repository](https://github.com/tailscale/tailscale) + +## License + +BSD-3-Clause License. See [LICENSE](../../LICENSE) for details. \ No newline at end of file From 96202a7c0cad1e2d63479339d8a99880d4c897e7 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Wed, 5 Mar 2025 14:14:19 -0800 Subject: [PATCH 32/87] .github/workflows: descope natlab CI for now until GitHub flakes are fixed The natlab VM tests are flaking on GitHub Actions. To not distract people, disable them for now (unless they're touched directly) until they're made more reliable, which will be some painful debugging probably. Updates #13038 Change-Id: I6570f1cd43f8f4d628a54af8481b67455ebe83dc Signed-off-by: Brad Fitzpatrick --- .github/workflows/natlab-integrationtest.yml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.github/workflows/natlab-integrationtest.yml b/.github/workflows/natlab-integrationtest.yml index b8d99e668..1de74cdaa 100644 --- a/.github/workflows/natlab-integrationtest.yml +++ b/.github/workflows/natlab-integrationtest.yml @@ -9,10 +9,7 @@ concurrency: on: pull_request: paths: - - "tailcfg/**" - - "wgengine/**" - - "ipn/ipnlocal/**" - - ".github/workflows/natlab-integrationtest.yml" + - "tstest/integration/nat/nat_test.go" jobs: natlab-integrationtest: runs-on: ubuntu-latest From bf40bc4fa0dc952a6c4f78997b14367b2eb96d4a Mon Sep 17 00:00:00 2001 From: David Anderson Date: Wed, 5 Mar 2025 10:33:35 -0800 Subject: [PATCH 33/87] util/eventbus: make internal queue a generic type In preparation for making the queues carry additional event metadata. Updates #15160 Signed-off-by: David Anderson --- util/eventbus/bus.go | 2 +- util/eventbus/queue.go | 26 ++++++++++++++------------ util/eventbus/subscribe.go | 6 +++--- 3 files changed, 18 insertions(+), 16 deletions(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index 3520be828..9f6adbfb7 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -78,7 +78,7 @@ func (b *Bus) Close() { } func (b *Bus) pump(ctx context.Context) { - var vals queue + var vals queue[any] acceptCh := func() chan any { if vals.Full() { return nil diff --git a/util/eventbus/queue.go b/util/eventbus/queue.go index 8f6bda748..a62bf3c62 100644 --- a/util/eventbus/queue.go +++ b/util/eventbus/queue.go @@ -10,32 +10,32 @@ import ( const maxQueuedItems = 16 // queue is an ordered queue of length up to maxQueuedItems. -type queue struct { - vals []any +type queue[T any] struct { + vals []T start int } // canAppend reports whether a value can be appended to q.vals without // shifting values around. -func (q *queue) canAppend() bool { +func (q *queue[T]) canAppend() bool { return cap(q.vals) < maxQueuedItems || len(q.vals) < cap(q.vals) } -func (q *queue) Full() bool { +func (q *queue[T]) Full() bool { return q.start == 0 && !q.canAppend() } -func (q *queue) Empty() bool { +func (q *queue[T]) Empty() bool { return q.start == len(q.vals) } -func (q *queue) Len() int { +func (q *queue[T]) Len() int { return len(q.vals) - q.start } // Add adds v to the end of the queue. Blocks until append can be // done. -func (q *queue) Add(v any) { +func (q *queue[T]) Add(v T) { if !q.canAppend() { if q.start == 0 { panic("Add on a full queue") @@ -54,21 +54,23 @@ func (q *queue) Add(v any) { // Peek returns the first value in the queue, without removing it from // the queue, or nil if the queue is empty. -func (q *queue) Peek() any { +func (q *queue[T]) Peek() T { if q.Empty() { - return nil + var zero T + return zero } return q.vals[q.start] } // Drop discards the first value in the queue, if any. -func (q *queue) Drop() { +func (q *queue[T]) Drop() { if q.Empty() { return } - q.vals[q.start] = nil + var zero T + q.vals[q.start] = zero q.start++ if q.Empty() { // Reset cursor to start of array, it's free to do. @@ -78,6 +80,6 @@ func (q *queue) Drop() { } // Snapshot returns a copy of the queue's contents. -func (q *queue) Snapshot() []any { +func (q *queue[T]) Snapshot() []T { return slices.Clone(q.vals[q.start:]) } diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index 606410c8e..85aa1ff6a 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -27,7 +27,7 @@ type subscriber interface { // processing other potential sources of wakeups, which is how we end // up at this awkward type signature and sharing of internal state // through dispatch. - dispatch(ctx context.Context, vals *queue, acceptCh func() chan any) bool + dispatch(ctx context.Context, vals *queue[any], acceptCh func() chan any) bool Close() } @@ -55,7 +55,7 @@ func newSubscribeState(c *Client) *subscribeState { } func (q *subscribeState) pump(ctx context.Context) { - var vals queue + var vals queue[any] acceptCh := func() chan any { if vals.Full() { return nil @@ -155,7 +155,7 @@ func (s *Subscriber[T]) subscribeType() reflect.Type { return reflect.TypeFor[T]() } -func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue, acceptCh func() chan any) bool { +func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[any], acceptCh func() chan any) bool { t := vals.Peek().(T) for { // Keep the cases in this select in sync with subscribeState.pump From a1192dd686fa4f2b53db4b1cba9030b01f80b891 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Wed, 5 Mar 2025 10:39:06 -0800 Subject: [PATCH 34/87] util/eventbus: track additional event context in publish queue Updates #15160 Signed-off-by: David Anderson --- util/eventbus/bus.go | 16 ++++++++-------- util/eventbus/client.go | 2 +- util/eventbus/publish.go | 15 ++++++++++++++- 3 files changed, 23 insertions(+), 10 deletions(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index 9f6adbfb7..33c0ae84d 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -16,8 +16,8 @@ import ( // subscribers. type Bus struct { router *worker - write chan any - snapshot chan chan []any + write chan publishedEvent + snapshot chan chan []publishedEvent topicsMu sync.Mutex // guards everything below. topics map[reflect.Type][]*subscribeState @@ -31,8 +31,8 @@ type Bus struct { // and [Bus.Queue] and [Subscribe] to make event subscribers. func New() *Bus { ret := &Bus{ - write: make(chan any), - snapshot: make(chan chan []any), + write: make(chan publishedEvent), + snapshot: make(chan chan []publishedEvent), topics: map[reflect.Type][]*subscribeState{}, clients: set.Set[*Client]{}, } @@ -78,8 +78,8 @@ func (b *Bus) Close() { } func (b *Bus) pump(ctx context.Context) { - var vals queue[any] - acceptCh := func() chan any { + var vals queue[publishedEvent] + acceptCh := func() chan publishedEvent { if vals.Full() { return nil } @@ -92,12 +92,12 @@ func (b *Bus) pump(ctx context.Context) { // queue space for it. for !vals.Empty() { val := vals.Peek() - dests := b.dest(reflect.ValueOf(val).Type()) + dests := b.dest(reflect.ValueOf(val.Event).Type()) for _, d := range dests { deliverOne: for { select { - case d.write <- val: + case d.write <- val.Event: break deliverOne case <-d.closed(): // Queue closed, don't block but continue diff --git a/util/eventbus/client.go b/util/eventbus/client.go index ff8eea6ee..174cc5ea5 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -75,7 +75,7 @@ func (c *Client) deleteSubscriber(t reflect.Type, s *subscribeState) { c.bus.unsubscribe(t, s) } -func (c *Client) publish() chan<- any { +func (c *Client) publish() chan<- publishedEvent { return c.bus.write } diff --git a/util/eventbus/publish.go b/util/eventbus/publish.go index b2d0641d9..fdabdcb23 100644 --- a/util/eventbus/publish.go +++ b/util/eventbus/publish.go @@ -5,8 +5,15 @@ package eventbus import ( "reflect" + "time" ) +type publishedEvent struct { + Event any + From *Client + Published time.Time +} + // publisher is a uniformly typed wrapper around Publisher[T], so that // debugging facilities can look at active publishers. type publisher interface { @@ -52,8 +59,14 @@ func (p *Publisher[T]) Publish(v T) { default: } + evt := publishedEvent{ + Event: v, + From: p.client, + Published: time.Now(), + } + select { - case p.client.publish() <- v: + case p.client.publish() <- evt: case <-p.stop.Done(): } } From cf5c788cf19001b09e71514c8a66593385e43ea9 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Wed, 5 Mar 2025 10:42:08 -0800 Subject: [PATCH 35/87] util/eventbus: track additional event context in subscribe queue Updates #15160 Signed-off-by: David Anderson --- util/eventbus/bus.go | 10 +++++++++- util/eventbus/subscribe.go | 28 ++++++++++++++++++---------- 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index 33c0ae84d..b479f3940 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -8,6 +8,7 @@ import ( "reflect" "slices" "sync" + "time" "tailscale.com/util/set" ) @@ -93,11 +94,18 @@ func (b *Bus) pump(ctx context.Context) { for !vals.Empty() { val := vals.Peek() dests := b.dest(reflect.ValueOf(val.Event).Type()) + routed := time.Now() for _, d := range dests { + evt := queuedEvent{ + Event: val.Event, + From: val.From, + Published: val.Published, + Routed: routed, + } deliverOne: for { select { - case d.write <- val.Event: + case d.write <- evt: break deliverOne case <-d.closed(): // Queue closed, don't block but continue diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index 85aa1ff6a..71201aa40 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -8,8 +8,16 @@ import ( "fmt" "reflect" "sync" + "time" ) +type queuedEvent struct { + Event any + From *Client + Published time.Time + Routed time.Time +} + // subscriber is a uniformly typed wrapper around Subscriber[T], so // that debugging facilities can look at active subscribers. type subscriber interface { @@ -27,7 +35,7 @@ type subscriber interface { // processing other potential sources of wakeups, which is how we end // up at this awkward type signature and sharing of internal state // through dispatch. - dispatch(ctx context.Context, vals *queue[any], acceptCh func() chan any) bool + dispatch(ctx context.Context, vals *queue[queuedEvent], acceptCh func() chan queuedEvent) bool Close() } @@ -36,8 +44,8 @@ type subscribeState struct { client *Client dispatcher *worker - write chan any - snapshot chan chan []any + write chan queuedEvent + snapshot chan chan []queuedEvent outputsMu sync.Mutex outputs map[reflect.Type]subscriber @@ -46,8 +54,8 @@ type subscribeState struct { func newSubscribeState(c *Client) *subscribeState { ret := &subscribeState{ client: c, - write: make(chan any), - snapshot: make(chan chan []any), + write: make(chan queuedEvent), + snapshot: make(chan chan []queuedEvent), outputs: map[reflect.Type]subscriber{}, } ret.dispatcher = runWorker(ret.pump) @@ -55,8 +63,8 @@ func newSubscribeState(c *Client) *subscribeState { } func (q *subscribeState) pump(ctx context.Context) { - var vals queue[any] - acceptCh := func() chan any { + var vals queue[queuedEvent] + acceptCh := func() chan queuedEvent { if vals.Full() { return nil } @@ -65,7 +73,7 @@ func (q *subscribeState) pump(ctx context.Context) { for { if !vals.Empty() { val := vals.Peek() - sub := q.subscriberFor(val) + sub := q.subscriberFor(val.Event) if sub == nil { // Raced with unsubscribe. vals.Drop() @@ -155,8 +163,8 @@ func (s *Subscriber[T]) subscribeType() reflect.Type { return reflect.TypeFor[T]() } -func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[any], acceptCh func() chan any) bool { - t := vals.Peek().(T) +func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[queuedEvent], acceptCh func() chan queuedEvent) bool { + t := vals.Peek().Event.(T) for { // Keep the cases in this select in sync with subscribeState.pump // above. The only different should be that this select From ffb0b66d5b99e018cdfc1b9fa9c79f6b3dd5542e Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Thu, 6 Mar 2025 06:05:41 -0800 Subject: [PATCH 36/87] cmd/k8s-operator: advertise VIPServices in ProxyGroup config (#14946) Now that packets flow for VIPServices, the last piece needed to start serving them from a ProxyGroup is config to tell the proxy Pods which services they should advertise. Updates tailscale/corp#24795 Change-Id: Ic7bbeac8e93c9503558107bc5f6123be02a84c77 Signed-off-by: Tom Proctor --- cmd/k8s-operator/egress-services.go | 6 +- cmd/k8s-operator/ingress-for-pg.go | 117 ++++++++++++++++++------ cmd/k8s-operator/ingress-for-pg_test.go | 43 ++++++++- cmd/k8s-operator/proxygroup.go | 27 +++++- cmd/k8s-operator/proxygroup_specs.go | 10 +- cmd/k8s-operator/proxygroup_test.go | 84 ++++++++++++++++- 6 files changed, 251 insertions(+), 36 deletions(-) diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go index cf218ba4f..e997e5884 100644 --- a/cmd/k8s-operator/egress-services.go +++ b/cmd/k8s-operator/egress-services.go @@ -630,7 +630,11 @@ func tailnetTargetFromSvc(svc *corev1.Service) egressservices.TailnetTarget { func portMap(p corev1.ServicePort) egressservices.PortMap { // TODO (irbekrm): out of bounds check? - return egressservices.PortMap{Protocol: string(p.Protocol), MatchPort: uint16(p.TargetPort.IntVal), TargetPort: uint16(p.Port)} + return egressservices.PortMap{ + Protocol: string(p.Protocol), + MatchPort: uint16(p.TargetPort.IntVal), + TargetPort: uint16(p.Port), + } } func isEgressSvcForProxyGroup(obj client.Object) bool { diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 4fa0af2a2..1fa12eb59 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -99,7 +99,7 @@ func (a *IngressPGReconciler) Reconcile(ctx context.Context, req reconcile.Reque hostname := hostnameForIngress(ing) logger = logger.With("hostname", hostname) - if !ing.DeletionTimestamp.IsZero() || !a.shouldExpose(ing) { + if !ing.DeletionTimestamp.IsZero() || !shouldExpose(ing) { return res, a.maybeCleanup(ctx, hostname, ing, logger) } @@ -122,6 +122,8 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin logger.Infof("[unexpected] no ProxyGroup annotation, skipping VIPService provisioning") return nil } + logger = logger.With("ProxyGroup", pgName) + pg := &tsapi.ProxyGroup{} if err := a.Get(ctx, client.ObjectKey{Name: pgName}, pg); err != nil { if apierrors.IsNotFound(err) { @@ -148,8 +150,6 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin a.recorder.Event(ing, corev1.EventTypeWarning, "HTTPSNotEnabled", "HTTPS is not enabled on the tailnet; ingress may not work") } - logger = logger.With("proxy-group", pg) - if !slices.Contains(ing.Finalizers, FinalizerNamePG) { // This log line is printed exactly once during initial provisioning, // because once the finalizer is in place this block gets skipped. So, @@ -288,7 +288,13 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin } } - // 5. Update Ingress status + // 5. Update tailscaled's AdvertiseServices config, which should add the VIPService + // IPs to the ProxyGroup Pods' AllowedIPs in the next netmap update if approved. + if err = a.maybeUpdateAdvertiseServicesConfig(ctx, pg.Name, serviceName, true, logger); err != nil { + return fmt.Errorf("failed to update tailscaled config: %w", err) + } + + // 6. Update Ingress status oldStatus := ing.Status.DeepCopy() ports := []networkingv1.IngressPortStatus{ { @@ -320,9 +326,9 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin // maybeCleanupProxyGroup ensures that if an Ingress hostname has changed, any VIPService resources created for the // Ingress' ProxyGroup corresponding to the old hostname are cleaned up. A run of this function will ensure that any // VIPServices that are associated with the provided ProxyGroup and no longer owned by an Ingress are cleaned up. -func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyGroupName string, logger *zap.SugaredLogger) error { +func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, pgName string, logger *zap.SugaredLogger) error { // Get serve config for the ProxyGroup - cm, cfg, err := a.proxyGroupServeConfig(ctx, proxyGroupName) + cm, cfg, err := a.proxyGroupServeConfig(ctx, pgName) if err != nil { return fmt.Errorf("getting serve config: %w", err) } @@ -349,17 +355,16 @@ func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG if !found { logger.Infof("VIPService %q is not owned by any Ingress, cleaning up", vipServiceName) - svc, err := a.getVIPService(ctx, vipServiceName, logger) + + // Delete the VIPService from control if necessary. + svc, err := a.tsClient.GetVIPService(ctx, vipServiceName) if err != nil { errResp := &tailscale.ErrResponse{} - if errors.As(err, &errResp) && errResp.Status == http.StatusNotFound { - delete(cfg.Services, vipServiceName) - serveConfigChanged = true - continue + if ok := errors.As(err, errResp); !ok || errResp.Status != http.StatusNotFound { + return err } - return err } - if isVIPServiceForAnyIngress(svc) { + if svc != nil && isVIPServiceForAnyIngress(svc) { logger.Infof("cleaning up orphaned VIPService %q", vipServiceName) if err := a.tsClient.DeleteVIPService(ctx, vipServiceName); err != nil { errResp := &tailscale.ErrResponse{} @@ -368,6 +373,11 @@ func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG } } } + + // Make sure the VIPService is not advertised in tailscaled or serve config. + if err = a.maybeUpdateAdvertiseServicesConfig(ctx, pgName, vipServiceName, false, logger); err != nil { + return fmt.Errorf("failed to update tailscaled config services: %w", err) + } delete(cfg.Services, vipServiceName) serveConfigChanged = true } @@ -383,6 +393,7 @@ func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG return fmt.Errorf("updating serve config: %w", err) } } + return nil } @@ -421,7 +432,12 @@ func (a *IngressPGReconciler) maybeCleanup(ctx context.Context, hostname string, return fmt.Errorf("error deleting VIPService: %w", err) } - // 3. Remove the VIPService from the serve config for the ProxyGroup. + // 3. Unadvertise the VIPService in tailscaled config. + if err = a.maybeUpdateAdvertiseServicesConfig(ctx, pg, serviceName, false, logger); err != nil { + return fmt.Errorf("failed to update tailscaled config services: %w", err) + } + + // 4. Remove the VIPService from the serve config for the ProxyGroup. logger.Infof("Removing VIPService %q from serve config for ProxyGroup %q", hostname, pg) delete(cfg.Services, serviceName) cfgBytes, err := json.Marshal(cfg) @@ -501,7 +517,7 @@ func (a *IngressPGReconciler) tailnetCertDomain(ctx context.Context) (string, er } // shouldExpose returns true if the Ingress should be exposed over Tailscale in HA mode (on a ProxyGroup) -func (a *IngressPGReconciler) shouldExpose(ing *networkingv1.Ingress) bool { +func shouldExpose(ing *networkingv1.Ingress) bool { isTSIngress := ing != nil && ing.Spec.IngressClassName != nil && *ing.Spec.IngressClassName == tailscaleIngressClassName @@ -509,18 +525,6 @@ func (a *IngressPGReconciler) shouldExpose(ing *networkingv1.Ingress) bool { return isTSIngress && pgAnnot != "" } -func (a *IngressPGReconciler) getVIPService(ctx context.Context, name tailcfg.ServiceName, logger *zap.SugaredLogger) (*tailscale.VIPService, error) { - svc, err := a.tsClient.GetVIPService(ctx, name) - if err != nil { - errResp := &tailscale.ErrResponse{} - if ok := errors.As(err, errResp); ok && errResp.Status != http.StatusNotFound { - logger.Infof("error getting VIPService %q: %v", name, err) - return nil, fmt.Errorf("error getting VIPService %q: %w", name, err) - } - } - return svc, nil -} - func isVIPServiceForIngress(svc *tailscale.VIPService, ing *networkingv1.Ingress) bool { if svc == nil || ing == nil { return false @@ -582,12 +586,16 @@ func (a *IngressPGReconciler) validateIngress(ing *networkingv1.Ingress, pg *tsa // deleteVIPServiceIfExists attempts to delete the VIPService if it exists and is owned by the given Ingress. func (a *IngressPGReconciler) deleteVIPServiceIfExists(ctx context.Context, name tailcfg.ServiceName, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error { - svc, err := a.getVIPService(ctx, name, logger) + svc, err := a.tsClient.GetVIPService(ctx, name) if err != nil { + errResp := &tailscale.ErrResponse{} + if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { + return nil + } + return fmt.Errorf("error getting VIPService: %w", err) } - // isVIPServiceForIngress handles nil svc, so we don't need to check it here if !isVIPServiceForIngress(svc, ing) { return nil } @@ -606,3 +614,54 @@ func isHTTPEndpointEnabled(ing *networkingv1.Ingress) bool { } return ing.Annotations[annotationHTTPEndpoint] == "enabled" } + +func (a *IngressPGReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, pgName string, serviceName tailcfg.ServiceName, shouldBeAdvertised bool, logger *zap.SugaredLogger) (err error) { + logger.Debugf("Updating ProxyGroup tailscaled configs to advertise service %q: %v", serviceName, shouldBeAdvertised) + + // Get all config Secrets for this ProxyGroup. + secrets := &corev1.SecretList{} + if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "config"))); err != nil { + return fmt.Errorf("failed to list config Secrets: %w", err) + } + + for _, secret := range secrets.Items { + var updated bool + for fileName, confB := range secret.Data { + var conf ipn.ConfigVAlpha + if err := json.Unmarshal(confB, &conf); err != nil { + return fmt.Errorf("error unmarshalling ProxyGroup config: %w", err) + } + + // Update the services to advertise if required. + idx := slices.Index(conf.AdvertiseServices, serviceName.String()) + isAdvertised := idx >= 0 + switch { + case isAdvertised == shouldBeAdvertised: + // Already up to date. + continue + case isAdvertised: + // Needs to be removed. + conf.AdvertiseServices = slices.Delete(conf.AdvertiseServices, idx, idx+1) + case shouldBeAdvertised: + // Needs to be added. + conf.AdvertiseServices = append(conf.AdvertiseServices, serviceName.String()) + } + + // Update the Secret. + confB, err := json.Marshal(conf) + if err != nil { + return fmt.Errorf("error marshalling ProxyGroup config: %w", err) + } + mak.Set(&secret.Data, fileName, confB) + updated = true + } + + if updated { + if err := a.Update(ctx, &secret); err != nil { + return fmt.Errorf("error updating ProxyGroup config Secret: %w", err) + } + } + } + + return nil +} diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index c432eb7e1..8c4ffb691 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -8,6 +8,7 @@ package main import ( "context" "encoding/json" + "fmt" "maps" "reflect" "testing" @@ -24,6 +25,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" + tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/tailcfg" "tailscale.com/types/ptr" @@ -63,6 +65,7 @@ func TestIngressPGReconciler(t *testing.T) { expectReconciled(t, ingPGR, "default", "test-ingress") verifyServeConfig(t, fc, "svc:my-svc", false) verifyVIPService(t, ft, "svc:my-svc", []string{"443"}) + verifyTailscaledConfig(t, fc, []string{"svc:my-svc"}) mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) { ing.Annotations["tailscale.com/tags"] = "tag:custom,tag:test" @@ -122,6 +125,8 @@ func TestIngressPGReconciler(t *testing.T) { verifyServeConfig(t, fc, "svc:my-svc", false) verifyVIPService(t, ft, "svc:my-svc", []string{"443"}) + verifyTailscaledConfig(t, fc, []string{"svc:my-svc", "svc:my-other-svc"}) + // Delete second Ingress if err := fc.Delete(context.Background(), ing2); err != nil { t.Fatalf("deleting second Ingress: %v", err) @@ -151,6 +156,8 @@ func TestIngressPGReconciler(t *testing.T) { t.Error("second Ingress service config was not cleaned up") } + verifyTailscaledConfig(t, fc, []string{"svc:my-svc"}) + // Delete the first Ingress and verify cleanup if err := fc.Delete(context.Background(), ing); err != nil { t.Fatalf("deleting Ingress: %v", err) @@ -175,6 +182,7 @@ func TestIngressPGReconciler(t *testing.T) { if len(cfg.Services) > 0 { t.Error("serve config not cleaned up") } + verifyTailscaledConfig(t, fc, nil) } func TestValidateIngress(t *testing.T) { @@ -464,6 +472,27 @@ func verifyServeConfig(t *testing.T, fc client.Client, serviceName string, wantH } } +func verifyTailscaledConfig(t *testing.T, fc client.Client, expectedServices []string) { + var expected string + if expectedServices != nil { + expectedServicesJSON, err := json.Marshal(expectedServices) + if err != nil { + t.Fatalf("marshaling expected services: %v", err) + } + expected = fmt.Sprintf(`,"AdvertiseServices":%s`, expectedServicesJSON) + } + expectEqual(t, fc, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgConfigSecretName("test-pg", 0), + Namespace: "operator-ns", + Labels: pgSecretLabels("test-pg", "config"), + }, + Data: map[string][]byte{ + tsoperator.TailscaledConfigFileName(106): []byte(fmt.Sprintf(`{"Version":""%s}`, expected)), + }, + }) +} + func setupIngressTest(t *testing.T) (*IngressPGReconciler, client.Client, *fakeTSClient) { t.Helper() @@ -494,9 +523,21 @@ func setupIngressTest(t *testing.T) (*IngressPGReconciler, client.Client, *fakeT }, } + // Pre-create a config Secret for the ProxyGroup + pgCfgSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgConfigSecretName("test-pg", 0), + Namespace: "operator-ns", + Labels: pgSecretLabels("test-pg", "config"), + }, + Data: map[string][]byte{ + tsoperator.TailscaledConfigFileName(106): []byte("{}"), + }, + } + fc := fake.NewClientBuilder(). WithScheme(tsapi.GlobalScheme). - WithObjects(pg, pgConfigMap, tsIngressClass). + WithObjects(pg, pgCfgSecret, pgConfigMap, tsIngressClass). WithStatusSubresource(pg). Build() diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 4b17d3470..463d29249 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -452,7 +452,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p for i := range pgReplicas(pg) { cfgSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%d-config", pg.Name, i), + Name: pgConfigSecretName(pg.Name, i), Namespace: r.tsNamespace, Labels: pgSecretLabels(pg.Name, "config"), OwnerReferences: pgOwnerReference(pg), @@ -596,10 +596,35 @@ func pgTailscaledConfig(pg *tsapi.ProxyGroup, class *tsapi.ProxyClass, idx int32 conf.AuthKey = key } capVerConfigs := make(map[tailcfg.CapabilityVersion]ipn.ConfigVAlpha) + + // AdvertiseServices config is set by ingress-pg-reconciler, so make sure we + // don't overwrite it here. + if err := copyAdvertiseServicesConfig(conf, oldSecret, 106); err != nil { + return nil, err + } capVerConfigs[106] = *conf return capVerConfigs, nil } +func copyAdvertiseServicesConfig(conf *ipn.ConfigVAlpha, oldSecret *corev1.Secret, capVer tailcfg.CapabilityVersion) error { + if oldSecret == nil { + return nil + } + + oldConfB := oldSecret.Data[tsoperator.TailscaledConfigFileName(capVer)] + if len(oldConfB) == 0 { + return nil + } + + var oldConf ipn.ConfigVAlpha + if err := json.Unmarshal(oldConfB, &oldConf); err != nil { + return fmt.Errorf("error unmarshalling existing config: %w", err) + } + conf.AdvertiseServices = oldConf.AdvertiseServices + + return nil +} + func (r *ProxyGroupReconciler) validate(_ *tsapi.ProxyGroup) error { return nil } diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 1ea91004b..40bbaec17 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -73,7 +73,7 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string Name: fmt.Sprintf("tailscaledconfig-%d", i), VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: fmt.Sprintf("%s-%d-config", pg.Name, i), + SecretName: pgConfigSecretName(pg.Name, i), }, }, }) @@ -236,8 +236,8 @@ func pgRole(pg *tsapi.ProxyGroup, namespace string) *rbacv1.Role { ResourceNames: func() (secrets []string) { for i := range pgReplicas(pg) { secrets = append(secrets, - fmt.Sprintf("%s-%d-config", pg.Name, i), // Config with auth key. - fmt.Sprintf("%s-%d", pg.Name, i), // State. + pgConfigSecretName(pg.Name, i), // Config with auth key. + fmt.Sprintf("%s-%d", pg.Name, i), // State. ) } return secrets @@ -349,6 +349,10 @@ func pgReplicas(pg *tsapi.ProxyGroup) int32 { return 2 } +func pgConfigSecretName(pgName string, i int32) string { + return fmt.Sprintf("%s-%d-config", pgName, i) +} + func pgEgressCMName(pg string) string { return fmt.Sprintf("%s-egress-config", pg) } diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 29100de1d..6829b3929 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -24,6 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "tailscale.com/client/tailscale" + "tailscale.com/ipn" tsoperator "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" "tailscale.com/kube/kubetypes" @@ -446,6 +447,79 @@ func TestProxyGroupTypes(t *testing.T) { }) } +func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + Build() + reconciler := &ProxyGroupReconciler{ + tsNamespace: tsNamespace, + proxyImage: testProxyImage, + Client: fc, + l: zap.Must(zap.NewDevelopment()).Sugar(), + tsClient: &fakeTSClient{}, + clock: tstest.NewClock(tstest.ClockOpts{}), + } + + existingServices := []string{"svc1", "svc2"} + existingConfigBytes, err := json.Marshal(ipn.ConfigVAlpha{ + AdvertiseServices: existingServices, + Version: "should-get-overwritten", + }) + if err != nil { + t.Fatal(err) + } + + const pgName = "test-ingress" + mustCreate(t, fc, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgConfigSecretName(pgName, 0), + Namespace: tsNamespace, + }, + // Write directly to Data because the fake client doesn't copy the write-only + // StringData field across to Data for us. + Data: map[string][]byte{ + tsoperator.TailscaledConfigFileName(106): existingConfigBytes, + }, + }) + + mustCreate(t, fc, &tsapi.ProxyGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgName, + UID: "test-ingress-uid", + }, + Spec: tsapi.ProxyGroupSpec{ + Type: tsapi.ProxyGroupTypeIngress, + Replicas: ptr.To[int32](1), + }, + }) + expectReconciled(t, reconciler, "", pgName) + + expectedConfigBytes, err := json.Marshal(ipn.ConfigVAlpha{ + // Preserved. + AdvertiseServices: existingServices, + + // Everything else got updated in the reconcile: + Version: "alpha0", + AcceptDNS: "false", + AcceptRoutes: "false", + Locked: "false", + Hostname: ptr.To(fmt.Sprintf("%s-%d", pgName, 0)), + }) + if err != nil { + t.Fatal(err) + } + expectEqual(t, fc, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: pgConfigSecretName(pgName, 0), + Namespace: tsNamespace, + ResourceVersion: "2", + }, + StringData: map[string]string{ + tsoperator.TailscaledConfigFileName(106): string(expectedConfigBytes), + }, + }, omitSecretData) +} + func verifyProxyGroupCounts(t *testing.T, r *ProxyGroupReconciler, wantIngress, wantEgress int) { t.Helper() if r.ingressProxyGroups.Len() != wantIngress { @@ -501,7 +575,7 @@ func expectProxyGroupResources(t *testing.T, fc client.WithWatch, pg *tsapi.Prox for i := range pgReplicas(pg) { expectedSecrets = append(expectedSecrets, fmt.Sprintf("%s-%d", pg.Name, i), - fmt.Sprintf("%s-%d-config", pg.Name, i), + pgConfigSecretName(pg.Name, i), ) } } @@ -546,3 +620,11 @@ func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyG }) } } + +// The operator mostly writes to StringData and reads from Data, but the fake +// client doesn't copy StringData across to Data on write. When comparing actual +// vs expected Secrets, use this function to only check what the operator writes +// to StringData. +func omitSecretData(secret *corev1.Secret) { + secret.Data = nil +} From 9d7f2719bb5e120d87fb51ac093534474d279cc4 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Thu, 6 Mar 2025 08:52:35 -0800 Subject: [PATCH 37/87] cmd/tsidp: use constant time comparison for client_id/secret (#15222) Use secure constant time comparisons for the client ID and secret values during the allowRelyingParty authorization check. Updates #cleanup Signed-off-by: Patrick O'Doherty --- cmd/tsidp/tsidp.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 3eabef245..96fac58fd 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -11,6 +11,7 @@ import ( "context" crand "crypto/rand" "crypto/rsa" + "crypto/subtle" "crypto/tls" "crypto/x509" "encoding/base64" @@ -345,7 +346,9 @@ func (ar *authRequest) allowRelyingParty(r *http.Request, lc *local.Client) erro clientID = r.FormValue("client_id") clientSecret = r.FormValue("client_secret") } - if ar.funnelRP.ID != clientID || ar.funnelRP.Secret != clientSecret { + clientIDcmp := subtle.ConstantTimeCompare([]byte(clientID), []byte(ar.funnelRP.ID)) + clientSecretcmp := subtle.ConstantTimeCompare([]byte(clientSecret), []byte(ar.funnelRP.Secret)) + if clientIDcmp != 1 || clientSecretcmp != 1 { return fmt.Errorf("tsidp: invalid client credentials") } return nil From 74a2373e1d0e7213d0a89b9b1b4d17f159bb2ba4 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Thu, 6 Mar 2025 15:13:10 -0800 Subject: [PATCH 38/87] cmd/k8s-operator: ensure HA Ingress can operate in multicluster mode. (#15157) cmd/k8s-operator: ensure HA Ingress can operate in multicluster mode. Update the owner reference mechanism so that: - if during HA Ingress resource creation, a VIPService with some other operator's owner reference is already found, just update the owner references to add one for this operator - if during HA Ingress deletion, the VIPService is found to have owner reference(s) from another operator, don't delete the VIPService, just remove this operator's owner reference - requeue after HA Ingress reconciles that resulted in VIPService updates, to guard against overwrites due to concurrent operations from different clusters. Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- cmd/k8s-operator/ingress-for-pg.go | 512 +++++++++++++++--------- cmd/k8s-operator/ingress-for-pg_test.go | 142 ++++++- cmd/k8s-operator/ingress.go | 1 + cmd/k8s-operator/operator.go | 20 +- 4 files changed, 480 insertions(+), 195 deletions(-) diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 1fa12eb59..85a64a336 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -15,6 +15,9 @@ import ( "slices" "strings" "sync" + "time" + + "math/rand/v2" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" @@ -53,9 +56,9 @@ const ( var gaugePGIngressResources = clientmetric.NewGauge(kubetypes.MetricIngressPGResourceCount) -// IngressPGReconciler is a controller that reconciles Tailscale Ingresses should be exposed on an ingress ProxyGroup -// (in HA mode). -type IngressPGReconciler struct { +// HAIngressReconciler is a controller that reconciles Tailscale Ingresses +// should be exposed on an ingress ProxyGroup (in HA mode). +type HAIngressReconciler struct { client.Client recorder record.EventRecorder @@ -65,6 +68,7 @@ type IngressPGReconciler struct { tsNamespace string lc localClient defaultTags []string + operatorID string // stableID of the operator's Tailscale device mu sync.Mutex // protects following // managedIngresses is a set of all ingress resources that we're currently @@ -72,20 +76,29 @@ type IngressPGReconciler struct { managedIngresses set.Slice[types.UID] } -// Reconcile reconciles Ingresses that should be exposed over Tailscale in HA mode (on a ProxyGroup). It looks at all -// Ingresses with tailscale.com/proxy-group annotation. For each such Ingress, it ensures that a VIPService named after -// the hostname of the Ingress exists and is up to date. It also ensures that the serve config for the ingress -// ProxyGroup is updated to route traffic for the VIPService to the Ingress's backend Services. -// When an Ingress is deleted or unexposed, the VIPService and the associated serve config are cleaned up. -// Ingress hostname change also results in the VIPService for the previous hostname being cleaned up and a new VIPService -// being created for the new hostname. -func (a *IngressPGReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { - logger := a.logger.With("Ingress", req.NamespacedName) +// Reconcile reconciles Ingresses that should be exposed over Tailscale in HA +// mode (on a ProxyGroup). It looks at all Ingresses with +// tailscale.com/proxy-group annotation. For each such Ingress, it ensures that +// a VIPService named after the hostname of the Ingress exists and is up to +// date. It also ensures that the serve config for the ingress ProxyGroup is +// updated to route traffic for the VIPService to the Ingress's backend +// Services. Ingress hostname change also results in the VIPService for the +// previous hostname being cleaned up and a new VIPService being created for the +// new hostname. +// HA Ingresses support multi-cluster Ingress setup. +// Each VIPService contains a list of owner references that uniquely identify +// the Ingress resource and the operator. When an Ingress that acts as a +// backend is being deleted, the corresponding VIPService is only deleted if the +// only owner reference that it contains is for this Ingress. If other owner +// references are found, then cleanup operation only removes this Ingress' owner +// reference. +func (r *HAIngressReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) { + logger := r.logger.With("Ingress", req.NamespacedName) logger.Debugf("starting reconcile") defer logger.Debugf("reconcile finished") ing := new(networkingv1.Ingress) - err = a.Get(ctx, req.NamespacedName, ing) + err = r.Get(ctx, req.NamespacedName, ing) if apierrors.IsNotFound(err) { // Request object not found, could have been deleted after reconcile request. logger.Debugf("Ingress not found, assuming it was deleted") @@ -99,57 +112,71 @@ func (a *IngressPGReconciler) Reconcile(ctx context.Context, req reconcile.Reque hostname := hostnameForIngress(ing) logger = logger.With("hostname", hostname) - if !ing.DeletionTimestamp.IsZero() || !shouldExpose(ing) { - return res, a.maybeCleanup(ctx, hostname, ing, logger) + // needsRequeue is set to true if the underlying VIPService has changed as a result of this reconcile. If that + // is the case, we reconcile the Ingress one more time to ensure that concurrent updates to the VIPService in a + // multi-cluster Ingress setup have not resulted in another actor overwriting our VIPService update. + needsRequeue := false + if !ing.DeletionTimestamp.IsZero() || !r.shouldExpose(ing) { + needsRequeue, err = r.maybeCleanup(ctx, hostname, ing, logger) + } else { + needsRequeue, err = r.maybeProvision(ctx, hostname, ing, logger) } - - if err := a.maybeProvision(ctx, hostname, ing, logger); err != nil { - return res, fmt.Errorf("failed to provision: %w", err) + if err != nil { + return res, err + } + if needsRequeue { + res = reconcile.Result{RequeueAfter: requeueInterval()} } return res, nil } -// maybeProvision ensures that the VIPService and serve config for the Ingress are created or updated. -func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error { - if err := validateIngressClass(ctx, a.Client); err != nil { +// maybeProvision ensures that a VIPService for this Ingress exists and is up to date and that the serve config for the +// corresponding ProxyGroup contains the Ingress backend's definition. +// If a VIPService does not exist, it will be created. +// If a VIPService exists, but only with owner references from other operator instances, an owner reference for this +// operator instance is added. +// If a VIPService exists, but does not have an owner reference from any operator, we error +// out assuming that this is an owner reference created by an unknown actor. +// Returns true if the operation resulted in a VIPService update. +func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) (svcsChanged bool, err error) { + if err := validateIngressClass(ctx, r.Client); err != nil { logger.Infof("error validating tailscale IngressClass: %v.", err) - return nil + return false, nil } - // Get and validate ProxyGroup readiness pgName := ing.Annotations[AnnotationProxyGroup] if pgName == "" { logger.Infof("[unexpected] no ProxyGroup annotation, skipping VIPService provisioning") - return nil + return false, nil } logger = logger.With("ProxyGroup", pgName) pg := &tsapi.ProxyGroup{} - if err := a.Get(ctx, client.ObjectKey{Name: pgName}, pg); err != nil { + if err := r.Get(ctx, client.ObjectKey{Name: pgName}, pg); err != nil { if apierrors.IsNotFound(err) { logger.Infof("ProxyGroup %q does not exist", pgName) - return nil + return false, nil } - return fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) + return false, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) } if !tsoperator.ProxyGroupIsReady(pg) { - // TODO(irbekrm): we need to reconcile ProxyGroup Ingresses on ProxyGroup changes to not miss the status update - // in this case. - logger.Infof("ProxyGroup %q is not ready", pgName) - return nil + logger.Infof("ProxyGroup %q is not (yet) ready", pgName) + return false, nil } // Validate Ingress configuration - if err := a.validateIngress(ing, pg); err != nil { + if err := r.validateIngress(ctx, ing, pg); err != nil { logger.Infof("invalid Ingress configuration: %v", err) - a.recorder.Event(ing, corev1.EventTypeWarning, "InvalidIngressConfiguration", err.Error()) - return nil + r.recorder.Event(ing, corev1.EventTypeWarning, "InvalidIngressConfiguration", err.Error()) + return false, nil } - if !IsHTTPSEnabledOnTailnet(a.tsnetServer) { - a.recorder.Event(ing, corev1.EventTypeWarning, "HTTPSNotEnabled", "HTTPS is not enabled on the tailnet; ingress may not work") + if !IsHTTPSEnabledOnTailnet(r.tsnetServer) { + r.recorder.Event(ing, corev1.EventTypeWarning, "HTTPSNotEnabled", "HTTPS is not enabled on the tailnet; ingress may not work") } + logger = logger.With("proxy-group", pg.Name) + if !slices.Contains(ing.Finalizers, FinalizerNamePG) { // This log line is printed exactly once during initial provisioning, // because once the finalizer is in place this block gets skipped. So, @@ -157,64 +184,78 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin // multi-reconcile operation is underway. logger.Infof("exposing Ingress over tailscale") ing.Finalizers = append(ing.Finalizers, FinalizerNamePG) - if err := a.Update(ctx, ing); err != nil { - return fmt.Errorf("failed to add finalizer: %w", err) + if err := r.Update(ctx, ing); err != nil { + return false, fmt.Errorf("failed to add finalizer: %w", err) } - a.mu.Lock() - a.managedIngresses.Add(ing.UID) - gaugePGIngressResources.Set(int64(a.managedIngresses.Len())) - a.mu.Unlock() + r.mu.Lock() + r.managedIngresses.Add(ing.UID) + gaugePGIngressResources.Set(int64(r.managedIngresses.Len())) + r.mu.Unlock() } - // 1. Ensure that if Ingress' hostname has changed, any VIPService resources corresponding to the old hostname - // are cleaned up. - // In practice, this function will ensure that any VIPServices that are associated with the provided ProxyGroup - // and no longer owned by an Ingress are cleaned up. This is fine- it is not expensive and ensures that in edge - // cases (a single update changed both hostname and removed ProxyGroup annotation) the VIPService is more likely - // to be (eventually) removed. - if err := a.maybeCleanupProxyGroup(ctx, pgName, logger); err != nil { - return fmt.Errorf("failed to cleanup VIPService resources for ProxyGroup: %w", err) - } - - // 2. Ensure that there isn't a VIPService with the same hostname already created and not owned by this Ingress. - // TODO(irbekrm): perhaps in future we could have record names being stored on VIPServices. I am not certain if - // there might not be edge cases (custom domains, etc?) where attempting to determine the DNS name of the - // VIPService in this way won't be incorrect. - tcd, err := a.tailnetCertDomain(ctx) + // 1. Ensure that if Ingress' hostname has changed, any VIPService + // resources corresponding to the old hostname are cleaned up. + // In practice, this function will ensure that any VIPServices that are + // associated with the provided ProxyGroup and no longer owned by an + // Ingress are cleaned up. This is fine- it is not expensive and ensures + // that in edge cases (a single update changed both hostname and removed + // ProxyGroup annotation) the VIPService is more likely to be + // (eventually) removed. + svcsChanged, err = r.maybeCleanupProxyGroup(ctx, pgName, logger) if err != nil { - return fmt.Errorf("error determining DNS name base: %w", err) + return false, fmt.Errorf("failed to cleanup VIPService resources for ProxyGroup: %w", err) + } + + // 2. Ensure that there isn't a VIPService with the same hostname + // already created and not owned by this Ingress. + // TODO(irbekrm): perhaps in future we could have record names being + // stored on VIPServices. I am not certain if there might not be edge + // cases (custom domains, etc?) where attempting to determine the DNS + // name of the VIPService in this way won't be incorrect. + tcd, err := r.tailnetCertDomain(ctx) + if err != nil { + return false, fmt.Errorf("error determining DNS name base: %w", err) } dnsName := hostname + "." + tcd serviceName := tailcfg.ServiceName("svc:" + hostname) - existingVIPSvc, err := a.tsClient.GetVIPService(ctx, serviceName) - // TODO(irbekrm): here and when creating the VIPService, verify if the error is not terminal (and therefore - // should not be reconciled). For example, if the hostname is already a hostname of a Tailscale node, the GET - // here will fail. + existingVIPSvc, err := r.tsClient.GetVIPService(ctx, serviceName) + // TODO(irbekrm): here and when creating the VIPService, verify if the + // error is not terminal (and therefore should not be reconciled). For + // example, if the hostname is already a hostname of a Tailscale node, + // the GET here will fail. if err != nil { errResp := &tailscale.ErrResponse{} if ok := errors.As(err, errResp); ok && errResp.Status != http.StatusNotFound { - return fmt.Errorf("error getting VIPService %q: %w", hostname, err) + return false, fmt.Errorf("error getting VIPService %q: %w", hostname, err) } } - if existingVIPSvc != nil && !isVIPServiceForIngress(existingVIPSvc, ing) { - logger.Infof("VIPService %q for MagicDNS name %q already exists, but is not owned by this Ingress. Please delete it manually and recreate this Ingress to proceed or create an Ingress for a different MagicDNS name", hostname, dnsName) - a.recorder.Event(ing, corev1.EventTypeWarning, "ConflictingVIPServiceExists", fmt.Sprintf("VIPService %q for MagicDNS name %q already exists, but is not owned by this Ingress. Please delete it manually to proceed or create an Ingress for a different MagicDNS name", hostname, dnsName)) - return nil + // Generate the VIPService comment for new or existing VIPService. This + // checks and ensures that VIPService's owner references are updated for + // this Ingress and errors if that is not possible (i.e. because it + // appears that the VIPService has been created by a non-operator + // actor). + svcComment, err := r.ownerRefsComment(existingVIPSvc) + if err != nil { + const instr = "To proceed, you can either manually delete the existing VIPService or choose a different MagicDNS name at `.spec.tls.hosts[0] in the Ingress definition" + msg := fmt.Sprintf("error ensuring ownership of VIPService %s: %v. %s", hostname, err, instr) + logger.Warn(msg) + r.recorder.Event(ing, corev1.EventTypeWarning, "InvalidVIPService", msg) + return false, nil } - // 3. Ensure that the serve config for the ProxyGroup contains the VIPService - cm, cfg, err := a.proxyGroupServeConfig(ctx, pgName) + // 3. Ensure that the serve config for the ProxyGroup contains the VIPService. + cm, cfg, err := r.proxyGroupServeConfig(ctx, pgName) if err != nil { - return fmt.Errorf("error getting Ingress serve config: %w", err) + return false, fmt.Errorf("error getting Ingress serve config: %w", err) } if cm == nil { logger.Infof("no Ingress serve config ConfigMap found, unable to update serve config. Ensure that ProxyGroup is healthy.") - return nil + return svcsChanged, nil } ep := ipn.HostPort(fmt.Sprintf("%s:443", dnsName)) - handlers, err := handlersForIngress(ctx, ing, a.Client, a.recorder, dnsName, logger) + handlers, err := handlersForIngress(ctx, ing, r.Client, r.recorder, dnsName, logger) if err != nil { - return fmt.Errorf("failed to get handlers for Ingress: %w", err) + return false, fmt.Errorf("failed to get handlers for Ingress: %w", err) } ingCfg := &ipn.ServiceConfig{ TCP: map[uint16]*ipn.TCPPortHandler{ @@ -250,16 +291,16 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin mak.Set(&cfg.Services, serviceName, ingCfg) cfgBytes, err := json.Marshal(cfg) if err != nil { - return fmt.Errorf("error marshaling serve config: %w", err) + return false, fmt.Errorf("error marshaling serve config: %w", err) } mak.Set(&cm.BinaryData, serveConfigKey, cfgBytes) - if err := a.Update(ctx, cm); err != nil { - return fmt.Errorf("error updating serve config: %w", err) + if err := r.Update(ctx, cm); err != nil { + return false, fmt.Errorf("error updating serve config: %w", err) } } // 4. Ensure that the VIPService exists and is up to date. - tags := a.defaultTags + tags := r.defaultTags if tstr, ok := ing.Annotations[AnnotationTags]; ok { tags = strings.Split(tstr, ",") } @@ -273,27 +314,32 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin Name: serviceName, Tags: tags, Ports: vipPorts, - Comment: fmt.Sprintf(VIPSvcOwnerRef, ing.UID), + Comment: svcComment, } if existingVIPSvc != nil { vipSvc.Addrs = existingVIPSvc.Addrs } + // TODO(irbekrm): right now if two Ingress resources attempt to apply different VIPService configs (different + // tags, or HTTP endpoint settings) we can end up reconciling those in a loop. We should detect when an Ingress + // with the same generation number has been reconciled ~more than N times and stop attempting to apply updates. if existingVIPSvc == nil || !reflect.DeepEqual(vipSvc.Tags, existingVIPSvc.Tags) || - !reflect.DeepEqual(vipSvc.Ports, existingVIPSvc.Ports) { + !reflect.DeepEqual(vipSvc.Ports, existingVIPSvc.Ports) || + !strings.EqualFold(vipSvc.Comment, existingVIPSvc.Comment) { logger.Infof("Ensuring VIPService %q exists and is up to date", hostname) - if err := a.tsClient.CreateOrUpdateVIPService(ctx, vipSvc); err != nil { - logger.Infof("error creating VIPService: %v", err) - return fmt.Errorf("error creating VIPService: %w", err) + if err := r.tsClient.CreateOrUpdateVIPService(ctx, vipSvc); err != nil { + return false, fmt.Errorf("error creating VIPService: %w", err) } } // 5. Update tailscaled's AdvertiseServices config, which should add the VIPService // IPs to the ProxyGroup Pods' AllowedIPs in the next netmap update if approved. - if err = a.maybeUpdateAdvertiseServicesConfig(ctx, pg.Name, serviceName, true, logger); err != nil { - return fmt.Errorf("failed to update tailscaled config: %w", err) + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, pg.Name, serviceName, true, logger); err != nil { + return false, fmt.Errorf("failed to update tailscaled config: %w", err) } + // TODO(irbekrm): check that the replicas are ready to route traffic for the VIPService before updating Ingress + // status. // 6. Update Ingress status oldStatus := ing.Status.DeepCopy() ports := []networkingv1.IngressPortStatus{ @@ -315,30 +361,29 @@ func (a *IngressPGReconciler) maybeProvision(ctx context.Context, hostname strin }, } if apiequality.Semantic.DeepEqual(oldStatus, ing.Status) { - return nil + return svcsChanged, nil } - if err := a.Status().Update(ctx, ing); err != nil { - return fmt.Errorf("failed to update Ingress status: %w", err) + if err := r.Status().Update(ctx, ing); err != nil { + return false, fmt.Errorf("failed to update Ingress status: %w", err) } - return nil + return svcsChanged, nil } -// maybeCleanupProxyGroup ensures that if an Ingress hostname has changed, any VIPService resources created for the -// Ingress' ProxyGroup corresponding to the old hostname are cleaned up. A run of this function will ensure that any -// VIPServices that are associated with the provided ProxyGroup and no longer owned by an Ingress are cleaned up. -func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, pgName string, logger *zap.SugaredLogger) error { +// VIPServices that are associated with the provided ProxyGroup and no longer managed this operator's instance are deleted, if not owned by other operator instances, else the owner reference is cleaned up. +// Returns true if the operation resulted in existing VIPService updates (owner reference removal). +func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyGroupName string, logger *zap.SugaredLogger) (svcsChanged bool, err error) { // Get serve config for the ProxyGroup - cm, cfg, err := a.proxyGroupServeConfig(ctx, pgName) + cm, cfg, err := r.proxyGroupServeConfig(ctx, proxyGroupName) if err != nil { - return fmt.Errorf("getting serve config: %w", err) + return false, fmt.Errorf("getting serve config: %w", err) } if cfg == nil { - return nil // ProxyGroup does not have any VIPServices + return false, nil // ProxyGroup does not have any VIPServices } ingList := &networkingv1.IngressList{} - if err := a.List(ctx, ingList); err != nil { - return fmt.Errorf("listing Ingresses: %w", err) + if err := r.List(ctx, ingList); err != nil { + return false, fmt.Errorf("listing Ingresses: %w", err) } serveConfigChanged := false // For each VIPService in serve config... @@ -357,26 +402,21 @@ func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, pgName logger.Infof("VIPService %q is not owned by any Ingress, cleaning up", vipServiceName) // Delete the VIPService from control if necessary. - svc, err := a.tsClient.GetVIPService(ctx, vipServiceName) - if err != nil { - errResp := &tailscale.ErrResponse{} - if ok := errors.As(err, errResp); !ok || errResp.Status != http.StatusNotFound { - return err - } - } + svc, _ := r.tsClient.GetVIPService(ctx, vipServiceName) if svc != nil && isVIPServiceForAnyIngress(svc) { logger.Infof("cleaning up orphaned VIPService %q", vipServiceName) - if err := a.tsClient.DeleteVIPService(ctx, vipServiceName); err != nil { + svcsChanged, err = r.cleanupVIPService(ctx, vipServiceName, logger) + if err != nil { errResp := &tailscale.ErrResponse{} if !errors.As(err, &errResp) || errResp.Status != http.StatusNotFound { - return fmt.Errorf("deleting VIPService %q: %w", vipServiceName, err) + return false, fmt.Errorf("deleting VIPService %q: %w", vipServiceName, err) } } } // Make sure the VIPService is not advertised in tailscaled or serve config. - if err = a.maybeUpdateAdvertiseServicesConfig(ctx, pgName, vipServiceName, false, logger); err != nil { - return fmt.Errorf("failed to update tailscaled config services: %w", err) + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, proxyGroupName, vipServiceName, false, logger); err != nil { + return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } delete(cfg.Services, vipServiceName) serveConfigChanged = true @@ -386,55 +426,67 @@ func (a *IngressPGReconciler) maybeCleanupProxyGroup(ctx context.Context, pgName if serveConfigChanged { cfgBytes, err := json.Marshal(cfg) if err != nil { - return fmt.Errorf("marshaling serve config: %w", err) + return false, fmt.Errorf("marshaling serve config: %w", err) } mak.Set(&cm.BinaryData, serveConfigKey, cfgBytes) - if err := a.Update(ctx, cm); err != nil { - return fmt.Errorf("updating serve config: %w", err) + if err := r.Update(ctx, cm); err != nil { + return false, fmt.Errorf("updating serve config: %w", err) } } - - return nil + return svcsChanged, nil } // maybeCleanup ensures that any resources, such as a VIPService created for this Ingress, are cleaned up when the -// Ingress is being deleted or is unexposed. -func (a *IngressPGReconciler) maybeCleanup(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error { +// Ingress is being deleted or is unexposed. The cleanup is safe for a multi-cluster setup- the VIPService is only +// deleted if it does not contain any other owner references. If it does the cleanup only removes the owner reference +// corresponding to this Ingress. +func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, ing *networkingv1.Ingress, logger *zap.SugaredLogger) (svcChanged bool, err error) { logger.Debugf("Ensuring any resources for Ingress are cleaned up") ix := slices.Index(ing.Finalizers, FinalizerNamePG) if ix < 0 { logger.Debugf("no finalizer, nothing to do") - a.mu.Lock() - defer a.mu.Unlock() - a.managedIngresses.Remove(ing.UID) - gaugePGIngressResources.Set(int64(a.managedIngresses.Len())) - return nil - } - - // 1. Check if there is a VIPService created for this Ingress. - pg := ing.Annotations[AnnotationProxyGroup] - cm, cfg, err := a.proxyGroupServeConfig(ctx, pg) - if err != nil { - return fmt.Errorf("error getting ProxyGroup serve config: %w", err) - } - serviceName := tailcfg.ServiceName("svc:" + hostname) - // VIPService is always first added to serve config and only then created in the Tailscale API, so if it is not - // found in the serve config, we can assume that there is no VIPService. TODO(irbekrm): once we have ingress - // ProxyGroup, we will probably add currently exposed VIPServices to its status. At that point, we can use the - // status rather than checking the serve config each time. - if cfg == nil || cfg.Services == nil || cfg.Services[serviceName] == nil { - return nil + return false, nil } logger.Infof("Ensuring that VIPService %q configuration is cleaned up", hostname) - // 2. Delete the VIPService. - if err := a.deleteVIPServiceIfExists(ctx, serviceName, ing, logger); err != nil { - return fmt.Errorf("error deleting VIPService: %w", err) + // Ensure that if cleanup succeeded Ingress finalizers are removed. + defer func() { + if err != nil { + return + } + if e := r.deleteFinalizer(ctx, ing, logger); err != nil { + err = errors.Join(err, e) + } + }() + + // 1. Check if there is a VIPService associated with this Ingress. + pg := ing.Annotations[AnnotationProxyGroup] + cm, cfg, err := r.proxyGroupServeConfig(ctx, pg) + if err != nil { + return false, fmt.Errorf("error getting ProxyGroup serve config: %w", err) + } + serviceName := tailcfg.ServiceName("svc:" + hostname) + + // VIPService is always first added to serve config and only then created in the Tailscale API, so if it is not + // found in the serve config, we can assume that there is no VIPService. (If the serve config does not exist at + // all, it is possible that the ProxyGroup has been deleted before cleaning up the Ingress, so carry on with + // cleanup). + if cfg != nil && cfg.Services != nil && cfg.Services[serviceName] == nil { + return false, nil + } + + // 2. Clean up the VIPService resources. + svcChanged, err = r.cleanupVIPService(ctx, serviceName, logger) + if err != nil { + return false, fmt.Errorf("error deleting VIPService: %w", err) + } + if cfg == nil || cfg.Services == nil { // user probably deleted the ProxyGroup + return svcChanged, nil } // 3. Unadvertise the VIPService in tailscaled config. - if err = a.maybeUpdateAdvertiseServicesConfig(ctx, pg, serviceName, false, logger); err != nil { - return fmt.Errorf("failed to update tailscaled config services: %w", err) + if err = r.maybeUpdateAdvertiseServicesConfig(ctx, pg, serviceName, false, logger); err != nil { + return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } // 4. Remove the VIPService from the serve config for the ProxyGroup. @@ -442,24 +494,13 @@ func (a *IngressPGReconciler) maybeCleanup(ctx context.Context, hostname string, delete(cfg.Services, serviceName) cfgBytes, err := json.Marshal(cfg) if err != nil { - return fmt.Errorf("error marshaling serve config: %w", err) + return false, fmt.Errorf("error marshaling serve config: %w", err) } mak.Set(&cm.BinaryData, serveConfigKey, cfgBytes) - if err := a.Update(ctx, cm); err != nil { - return fmt.Errorf("error updating ConfigMap %q: %w", cm.Name, err) - } - - if err := a.deleteFinalizer(ctx, ing, logger); err != nil { - return fmt.Errorf("failed to remove finalizer: %w", err) - } - a.mu.Lock() - defer a.mu.Unlock() - a.managedIngresses.Remove(ing.UID) - gaugePGIngressResources.Set(int64(a.managedIngresses.Len())) - return nil + return svcChanged, r.Update(ctx, cm) } -func (a *IngressPGReconciler) deleteFinalizer(ctx context.Context, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error { +func (r *HAIngressReconciler) deleteFinalizer(ctx context.Context, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error { found := false ing.Finalizers = slices.DeleteFunc(ing.Finalizers, func(f string) bool { found = true @@ -470,9 +511,13 @@ func (a *IngressPGReconciler) deleteFinalizer(ctx context.Context, ing *networki } logger.Debug("ensure %q finalizer is removed", FinalizerNamePG) - if err := a.Update(ctx, ing); err != nil { + if err := r.Update(ctx, ing); err != nil { return fmt.Errorf("failed to remove finalizer %q: %w", FinalizerNamePG, err) } + r.mu.Lock() + defer r.mu.Unlock() + r.managedIngresses.Remove(ing.UID) + gaugePGIngressResources.Set(int64(r.managedIngresses.Len())) return nil } @@ -480,15 +525,15 @@ func pgIngressCMName(pg string) string { return fmt.Sprintf("%s-ingress-config", pg) } -func (a *IngressPGReconciler) proxyGroupServeConfig(ctx context.Context, pg string) (cm *corev1.ConfigMap, cfg *ipn.ServeConfig, err error) { +func (r *HAIngressReconciler) proxyGroupServeConfig(ctx context.Context, pg string) (cm *corev1.ConfigMap, cfg *ipn.ServeConfig, err error) { name := pgIngressCMName(pg) cm = &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: a.tsNamespace, + Namespace: r.tsNamespace, }, } - if err := a.Get(ctx, client.ObjectKeyFromObject(cm), cm); err != nil && !apierrors.IsNotFound(err) { + if err := r.Get(ctx, client.ObjectKeyFromObject(cm), cm); err != nil && !apierrors.IsNotFound(err) { return nil, nil, fmt.Errorf("error retrieving ingress serve config ConfigMap %s: %v", name, err) } if apierrors.IsNotFound(err) { @@ -508,16 +553,16 @@ type localClient interface { } // tailnetCertDomain returns the base domain (TCD) of the current tailnet. -func (a *IngressPGReconciler) tailnetCertDomain(ctx context.Context) (string, error) { - st, err := a.lc.StatusWithoutPeers(ctx) +func (r *HAIngressReconciler) tailnetCertDomain(ctx context.Context) (string, error) { + st, err := r.lc.StatusWithoutPeers(ctx) if err != nil { return "", fmt.Errorf("error getting tailscale status: %w", err) } return st.CurrentTailnet.MagicDNSSuffix, nil } -// shouldExpose returns true if the Ingress should be exposed over Tailscale in HA mode (on a ProxyGroup) -func shouldExpose(ing *networkingv1.Ingress) bool { +// shouldExpose returns true if the Ingress should be exposed over Tailscale in HA mode (on a ProxyGroup). +func (r *HAIngressReconciler) shouldExpose(ing *networkingv1.Ingress) bool { isTSIngress := ing != nil && ing.Spec.IngressClassName != nil && *ing.Spec.IngressClassName == tailscaleIngressClassName @@ -525,13 +570,6 @@ func shouldExpose(ing *networkingv1.Ingress) bool { return isTSIngress && pgAnnot != "" } -func isVIPServiceForIngress(svc *tailscale.VIPService, ing *networkingv1.Ingress) bool { - if svc == nil || ing == nil { - return false - } - return strings.EqualFold(svc.Comment, fmt.Sprintf(VIPSvcOwnerRef, ing.UID)) -} - func isVIPServiceForAnyIngress(svc *tailscale.VIPService) bool { if svc == nil { return false @@ -545,7 +583,7 @@ func isVIPServiceForAnyIngress(svc *tailscale.VIPService) bool { // - The derived hostname is a valid DNS label // - The referenced ProxyGroup exists and is of type 'ingress' // - Ingress' TLS block is invalid -func (a *IngressPGReconciler) validateIngress(ing *networkingv1.Ingress, pg *tsapi.ProxyGroup) error { +func (r *HAIngressReconciler) validateIngress(ctx context.Context, ing *networkingv1.Ingress, pg *tsapi.ProxyGroup) error { var errs []error // Validate tags if present @@ -581,30 +619,66 @@ func (a *IngressPGReconciler) validateIngress(ing *networkingv1.Ingress, pg *tsa errs = append(errs, fmt.Errorf("ProxyGroup %q is not ready", pg.Name)) } + // It is invalid to have multiple Ingress resources for the same VIPService in one cluster. + ingList := &networkingv1.IngressList{} + if err := r.List(ctx, ingList); err != nil { + errs = append(errs, fmt.Errorf("[unexpected] error listing Ingresses: %w", err)) + return errors.Join(errs...) + } + for _, i := range ingList.Items { + if r.shouldExpose(&i) && hostnameForIngress(&i) == hostname && i.Name != ing.Name { + errs = append(errs, fmt.Errorf("found duplicate Ingress %q for hostname %q - multiple Ingresses for the same hostname in the same cluster are not allowed", i.Name, hostname)) + } + } return errors.Join(errs...) } -// deleteVIPServiceIfExists attempts to delete the VIPService if it exists and is owned by the given Ingress. -func (a *IngressPGReconciler) deleteVIPServiceIfExists(ctx context.Context, name tailcfg.ServiceName, ing *networkingv1.Ingress, logger *zap.SugaredLogger) error { - svc, err := a.tsClient.GetVIPService(ctx, name) +// cleanupVIPService deletes any VIPService by the provided name if it is not owned by operator instances other than this one. +// If a VIPService is found, but contains other owner references, only removes this operator's owner reference. +// If a VIPService by the given name is not found or does not contain this operator's owner reference, do nothing. +// It returns true if an existing VIPService was updated to remove owner reference, as well as any error that occurred. +func (r *HAIngressReconciler) cleanupVIPService(ctx context.Context, name tailcfg.ServiceName, logger *zap.SugaredLogger) (updated bool, _ error) { + svc, err := r.tsClient.GetVIPService(ctx, name) if err != nil { errResp := &tailscale.ErrResponse{} if ok := errors.As(err, errResp); ok && errResp.Status == http.StatusNotFound { - return nil + return false, nil } - return fmt.Errorf("error getting VIPService: %w", err) + return false, fmt.Errorf("error getting VIPService: %w", err) } - - if !isVIPServiceForIngress(svc, ing) { - return nil + if svc == nil { + return false, nil } - + c, err := parseComment(svc) + if err != nil { + return false, fmt.Errorf("error parsing VIPService comment") + } + if c == nil || len(c.OwnerRefs) == 0 { + return false, nil + } + // Comparing with the operatorID only means that we will not be able to + // clean up VIPServices in cases where the operator was deleted from the + // cluster before deleting the Ingress. Perhaps the comparison could be + // 'if or.OperatorID === r.operatorID || or.ingressUID == r.ingressUID'. + ix := slices.IndexFunc(c.OwnerRefs, func(or OwnerRef) bool { + return or.OperatorID == r.operatorID + }) + if ix == -1 { + return false, nil + } + if len(c.OwnerRefs) == 1 { + logger.Infof("Deleting VIPService %q", name) + return false, r.tsClient.DeleteVIPService(ctx, name) + } + c.OwnerRefs = slices.Delete(c.OwnerRefs, ix, ix+1) logger.Infof("Deleting VIPService %q", name) - if err = a.tsClient.DeleteVIPService(ctx, name); err != nil { - return fmt.Errorf("error deleting VIPService: %w", err) + json, err := json.Marshal(c) + if err != nil { + return false, fmt.Errorf("error marshalling updated VIPService owner reference: %w", err) } - return nil + svc.Comment = string(json) + return true, r.tsClient.CreateOrUpdateVIPService(ctx, svc) } // isHTTPEndpointEnabled returns true if the Ingress has been configured to expose an HTTP endpoint to tailnet. @@ -615,7 +689,7 @@ func isHTTPEndpointEnabled(ing *networkingv1.Ingress) bool { return ing.Annotations[annotationHTTPEndpoint] == "enabled" } -func (a *IngressPGReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, pgName string, serviceName tailcfg.ServiceName, shouldBeAdvertised bool, logger *zap.SugaredLogger) (err error) { +func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Context, pgName string, serviceName tailcfg.ServiceName, shouldBeAdvertised bool, logger *zap.SugaredLogger) (err error) { logger.Debugf("Updating ProxyGroup tailscaled configs to advertise service %q: %v", serviceName, shouldBeAdvertised) // Get all config Secrets for this ProxyGroup. @@ -665,3 +739,75 @@ func (a *IngressPGReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con return nil } + +// OwnerRef is an owner reference that uniquely identifies a Tailscale +// Kubernetes operator instance. +type OwnerRef struct { + // OperatorID is the stable ID of the operator's Tailscale device. + OperatorID string `json:"operatorID,omitempty"` +} + +// comment is the content of the VIPService.Comment field. +type comment struct { + // OwnerRefs is a list of owner references that identify all operator + // instances that manage this VIPService. + OwnerRefs []OwnerRef `json:"ownerRefs,omitempty"` +} + +// ownerRefsComment return VIPService Comment that includes owner reference for this +// operator instance for the provided VIPService. If the VIPService is nil, a +// new comment with owner ref is returned. If the VIPService is not nil, the +// existing comment is returned with the owner reference added, if not already +// present. If the VIPService is not nil, but does not contain a comment we +// return an error as this likely means that the VIPService was created by +// somthing other than a Tailscale Kubernetes operator. +func (r *HAIngressReconciler) ownerRefsComment(svc *tailscale.VIPService) (string, error) { + ref := OwnerRef{ + OperatorID: r.operatorID, + } + if svc == nil { + c := &comment{OwnerRefs: []OwnerRef{ref}} + json, err := json.Marshal(c) + if err != nil { + return "", fmt.Errorf("[unexpected] unable to marshal VIPService comment contents: %w, please report this", err) + } + return string(json), nil + } + c, err := parseComment(svc) + if err != nil { + return "", fmt.Errorf("error parsing existing VIPService comment: %w", err) + } + if c == nil || len(c.OwnerRefs) == 0 { + return "", fmt.Errorf("VIPService %s exists, but does not contain Comment field with owner references- not proceeding as this is likely a resource created by something other than a Tailscale Kubernetes Operator", svc.Name) + } + if slices.Contains(c.OwnerRefs, ref) { // up to date + return svc.Comment, nil + } + c.OwnerRefs = append(c.OwnerRefs, ref) + json, err := json.Marshal(c) + if err != nil { + return "", fmt.Errorf("error marshalling updated owner references: %w", err) + } + return string(json), nil +} + +// parseComment returns VIPService comment or nil if none found or not matching the expected format. +func parseComment(vipSvc *tailscale.VIPService) (*comment, error) { + if vipSvc.Comment == "" { + return nil, nil + } + c := &comment{} + if err := json.Unmarshal([]byte(vipSvc.Comment), c); err != nil { + return nil, fmt.Errorf("error parsing VIPService Comment field %q: %w", vipSvc.Comment, err) + } + return c, nil +} + +// requeueInterval returns a time duration between 5 and 10 minutes, which is +// the period of time after which an HA Ingress, whose VIPService has been newly +// created or changed, needs to be requeued. This is to protect against +// VIPService owner references being overwritten as a result of concurrent +// updates during multi-clutster Ingress create/update operations. +func requeueInterval() time.Duration { + return time.Duration(rand.N(5)+5) * time.Minute +} diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 8c4ffb691..7a995e169 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -23,6 +23,7 @@ import ( "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "tailscale.com/internal/client/tailscale" "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" tsoperator "tailscale.com/k8s-operator" @@ -190,6 +191,15 @@ func TestValidateIngress(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "test-ingress", Namespace: "default", + Annotations: map[string]string{ + AnnotationProxyGroup: "test-pg", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"test"}}, + }, }, } @@ -213,10 +223,11 @@ func TestValidateIngress(t *testing.T) { } tests := []struct { - name string - ing *networkingv1.Ingress - pg *tsapi.ProxyGroup - wantErr string + name string + ing *networkingv1.Ingress + pg *tsapi.ProxyGroup + existingIngs []networkingv1.Ingress + wantErr string }{ { name: "valid_ingress_with_hostname", @@ -306,12 +317,38 @@ func TestValidateIngress(t *testing.T) { }, wantErr: "ProxyGroup \"test-pg\" is not ready", }, + { + name: "duplicate_hostname", + ing: baseIngress, + pg: readyProxyGroup, + existingIngs: []networkingv1.Ingress{{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-ingress", + Namespace: "default", + Annotations: map[string]string{ + AnnotationProxyGroup: "test-pg", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"test"}}, + }, + }, + }}, + wantErr: `found duplicate Ingress "existing-ingress" for hostname "test" - multiple Ingresses for the same hostname in the same cluster are not allowed`, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - r := &IngressPGReconciler{} - err := r.validateIngress(tt.ing, tt.pg) + fc := fake.NewClientBuilder(). + WithScheme(tsapi.GlobalScheme). + WithObjects(tt.ing). + WithLists(&networkingv1.IngressList{Items: tt.existingIngs}). + Build() + r := &HAIngressReconciler{Client: fc} + err := r.validateIngress(context.Background(), tt.ing, tt.pg) if (err == nil && tt.wantErr != "") || (err != nil && err.Error() != tt.wantErr) { t.Errorf("validateIngress() error = %v, wantErr %v", err, tt.wantErr) } @@ -493,8 +530,7 @@ func verifyTailscaledConfig(t *testing.T, fc client.Client, expectedServices []s }) } -func setupIngressTest(t *testing.T) (*IngressPGReconciler, client.Client, *fakeTSClient) { - t.Helper() +func setupIngressTest(t *testing.T) (*HAIngressReconciler, client.Client, *fakeTSClient) { tsIngressClass := &networkingv1.IngressClass{ ObjectMeta: metav1.ObjectMeta{Name: "tailscale"}, @@ -552,9 +588,9 @@ func setupIngressTest(t *testing.T) (*IngressPGReconciler, client.Client, *fakeT if err := fc.Status().Update(context.Background(), pg); err != nil { t.Fatal(err) } + fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} ft := &fakeTSClient{} - fakeTsnetServer := &fakeTSNetServer{certDomains: []string{"foo.com"}} zl, err := zap.NewDevelopment() if err != nil { t.Fatal(err) @@ -568,12 +604,12 @@ func setupIngressTest(t *testing.T) (*IngressPGReconciler, client.Client, *fakeT }, } - ingPGR := &IngressPGReconciler{ + ingPGR := &HAIngressReconciler{ Client: fc, tsClient: ft, - tsnetServer: fakeTsnetServer, defaultTags: []string{"tag:k8s"}, tsNamespace: "operator-ns", + tsnetServer: fakeTsnetServer, logger: zl.Sugar(), recorder: record.NewFakeRecorder(10), lc: lc, @@ -581,3 +617,87 @@ func setupIngressTest(t *testing.T) (*IngressPGReconciler, client.Client, *fakeT return ingPGR, fc, ft } + +func TestIngressPGReconciler_MultiCluster(t *testing.T) { + ingPGR, fc, ft := setupIngressTest(t) + ingPGR.operatorID = "operator-1" + + // Create initial Ingress + ing := &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: "default", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test-pg", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"my-svc"}}, + }, + }, + } + mustCreate(t, fc, ing) + + // Simulate existing VIPService from another cluster + existingVIPSvc := &tailscale.VIPService{ + Name: "svc:my-svc", + Comment: `{"ownerrefs":[{"operatorID":"operator-2"}]}`, + } + ft.vipServices = map[tailcfg.ServiceName]*tailscale.VIPService{ + "svc:my-svc": existingVIPSvc, + } + + // Verify reconciliation adds our operator reference + expectReconciled(t, ingPGR, "default", "test-ingress") + + vipSvc, err := ft.GetVIPService(context.Background(), "svc:my-svc") + if err != nil { + t.Fatalf("getting VIPService: %v", err) + } + if vipSvc == nil { + t.Fatal("VIPService not found") + } + + c := &comment{} + if err := json.Unmarshal([]byte(vipSvc.Comment), c); err != nil { + t.Fatalf("parsing comment: %v", err) + } + + wantOwnerRefs := []OwnerRef{ + {OperatorID: "operator-2"}, + {OperatorID: "operator-1"}, + } + if !reflect.DeepEqual(c.OwnerRefs, wantOwnerRefs) { + t.Errorf("incorrect owner refs\ngot: %+v\nwant: %+v", c.OwnerRefs, wantOwnerRefs) + } + + // Delete the Ingress and verify VIPService still exists with one owner ref + if err := fc.Delete(context.Background(), ing); err != nil { + t.Fatalf("deleting Ingress: %v", err) + } + expectRequeue(t, ingPGR, "default", "test-ingress") + + vipSvc, err = ft.GetVIPService(context.Background(), "svc:my-svc") + if err != nil { + t.Fatalf("getting VIPService after deletion: %v", err) + } + if vipSvc == nil { + t.Fatal("VIPService was incorrectly deleted") + } + + c = &comment{} + if err := json.Unmarshal([]byte(vipSvc.Comment), c); err != nil { + t.Fatalf("parsing comment after deletion: %v", err) + } + + wantOwnerRefs = []OwnerRef{ + {OperatorID: "operator-2"}, + } + if !reflect.DeepEqual(c.OwnerRefs, wantOwnerRefs) { + t.Errorf("incorrect owner refs after deletion\ngot: %+v\nwant: %+v", c.OwnerRefs, wantOwnerRefs) + } +} diff --git a/cmd/k8s-operator/ingress.go b/cmd/k8s-operator/ingress.go index 7cadaecc4..8c19a5e05 100644 --- a/cmd/k8s-operator/ingress.go +++ b/cmd/k8s-operator/ingress.go @@ -73,6 +73,7 @@ func (a *IngressReconciler) Reconcile(ctx context.Context, req reconcile.Request return reconcile.Result{}, fmt.Errorf("failed to get ing: %w", err) } if !ing.DeletionTimestamp.IsZero() || !a.shouldExpose(ing) { + // TODO(irbekrm): this message is confusing if the Ingress is an HA Ingress logger.Debugf("ingress is being deleted or should not be exposed, cleaning up") return reconcile.Result{}, a.maybeCleanup(ctx, logger, ing) } diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 37e37a96e..1dcd130fb 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -9,6 +9,7 @@ package main import ( "context" + "fmt" "net/http" "os" "regexp" @@ -39,6 +40,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager/signals" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "tailscale.com/client/local" "tailscale.com/client/tailscale" "tailscale.com/hostinfo" "tailscale.com/ipn" @@ -335,6 +337,10 @@ func runReconcilers(opts reconcilerOpts) { if err != nil { startlog.Fatalf("could not get local client: %v", err) } + id, err := id(context.Background(), lc) + if err != nil { + startlog.Fatalf("error determining stable ID of the operator's Tailscale device: %v", err) + } ingressProxyGroupFilter := handler.EnqueueRequestsFromMapFunc(ingressesFromIngressProxyGroup(mgr.GetClient(), opts.log)) err = builder. ControllerManagedBy(mgr). @@ -342,7 +348,7 @@ func runReconcilers(opts reconcilerOpts) { Named("ingress-pg-reconciler"). Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngressPG(mgr.GetClient(), startlog))). Watches(&tsapi.ProxyGroup{}, ingressProxyGroupFilter). - Complete(&IngressPGReconciler{ + Complete(&HAIngressReconciler{ recorder: eventRecorder, tsClient: opts.tsClient, tsnetServer: opts.tsServer, @@ -350,6 +356,7 @@ func runReconcilers(opts reconcilerOpts) { Client: mgr.GetClient(), logger: opts.log.Named("ingress-pg-reconciler"), lc: lc, + operatorID: id, tsNamespace: opts.tailscaleNamespace, }) if err != nil { @@ -1262,3 +1269,14 @@ func hasProxyGroupAnnotation(obj client.Object) bool { ing := obj.(*networkingv1.Ingress) return ing.Annotations[AnnotationProxyGroup] != "" } + +func id(ctx context.Context, lc *local.Client) (string, error) { + st, err := lc.StatusWithoutPeers(ctx) + if err != nil { + return "", fmt.Errorf("error getting tailscale status: %w", err) + } + if st.Self == nil { + return "", fmt.Errorf("unexpected: device's status does not contain node's metadata") + } + return string(st.Self.ID), nil +} From dd7166cb8e12261eafd43a06cd4ee31a7356d016 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Wed, 5 Mar 2025 18:57:14 -0800 Subject: [PATCH 39/87] util/eventbus: add internal hook type for debugging Publicly exposed debugging functions will use these hooks to observe dataflow in the bus. Updates #15160 Signed-off-by: David Anderson --- util/eventbus/debug.go | 62 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 util/eventbus/debug.go diff --git a/util/eventbus/debug.go b/util/eventbus/debug.go new file mode 100644 index 000000000..912fe7623 --- /dev/null +++ b/util/eventbus/debug.go @@ -0,0 +1,62 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus + +import ( + "slices" + "sync" + "sync/atomic" +) + +// A hook collects hook functions that can be run as a group. +type hook[T any] struct { + sync.Mutex + fns []hookFn[T] +} + +var hookID atomic.Uint64 + +// add registers fn to be called when the hook is run. Returns an +// unregistration function that removes fn from the hook when called. +// +//lint:ignore U1000 Not used yet, but will be in an upcoming change +func (h *hook[T]) add(fn func(T)) (remove func()) { + id := hookID.Add(1) + h.Lock() + defer h.Unlock() + h.fns = append(h.fns, hookFn[T]{id, fn}) + return func() { h.remove(id) } +} + +// remove removes the function with the given ID from the hook. +// +//lint:ignore U1000 Not used yet, but will be in an upcoming change +func (h *hook[T]) remove(id uint64) { + h.Lock() + defer h.Unlock() + h.fns = slices.DeleteFunc(h.fns, func(f hookFn[T]) bool { return f.ID == id }) +} + +// active reports whether any functions are registered with the +// hook. This can be used to skip expensive work when the hook is +// inactive. +func (h *hook[T]) active() bool { + h.Lock() + defer h.Unlock() + return len(h.fns) > 0 +} + +// run calls all registered functions with the value v. +func (h *hook[T]) run(v T) { + h.Lock() + defer h.Unlock() + for _, fn := range h.fns { + fn.Fn(v) + } +} + +type hookFn[T any] struct { + ID uint64 + Fn func(T) +} From e80d2b4ad1e427c7700264a05d4bc8a6d95e29d7 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Wed, 5 Mar 2025 19:37:03 -0800 Subject: [PATCH 40/87] util/eventbus: add debug hooks to snoop on bus traffic Updates #15160 Signed-off-by: David Anderson --- util/eventbus/bus.go | 45 ++++++++++++++++++++++++++++---------- util/eventbus/client.go | 5 +++-- util/eventbus/publish.go | 12 ++-------- util/eventbus/subscribe.go | 22 ++++++++++++++----- 4 files changed, 56 insertions(+), 28 deletions(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index b479f3940..a9b6f0dec 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -8,17 +8,28 @@ import ( "reflect" "slices" "sync" - "time" "tailscale.com/util/set" ) +type publishedEvent struct { + Event any + From *Client +} + +type routedEvent struct { + Event any + From *Client + To []*Client +} + // Bus is an event bus that distributes published events to interested // subscribers. type Bus struct { - router *worker - write chan publishedEvent - snapshot chan chan []publishedEvent + router *worker + write chan publishedEvent + snapshot chan chan []publishedEvent + routeDebug hook[routedEvent] topicsMu sync.Mutex // guards everything below. topics map[reflect.Type][]*subscribeState @@ -94,13 +105,23 @@ func (b *Bus) pump(ctx context.Context) { for !vals.Empty() { val := vals.Peek() dests := b.dest(reflect.ValueOf(val.Event).Type()) - routed := time.Now() + + if b.routeDebug.active() { + clients := make([]*Client, len(dests)) + for i := range len(dests) { + clients[i] = dests[i].client + } + b.routeDebug.run(routedEvent{ + Event: val.Event, + From: val.From, + To: clients, + }) + } + for _, d := range dests { evt := queuedEvent{ - Event: val.Event, - From: val.From, - Published: val.Published, - Routed: routed, + Event: val.Event, + From: val.From, } deliverOne: for { @@ -113,6 +134,7 @@ func (b *Bus) pump(ctx context.Context) { break deliverOne case in := <-acceptCh(): vals.Add(in) + in.From.publishDebug.run(in) case <-ctx.Done(): return case ch := <-b.snapshot: @@ -129,8 +151,9 @@ func (b *Bus) pump(ctx context.Context) { select { case <-ctx.Done(): return - case val := <-b.write: - vals.Add(val) + case in := <-b.write: + vals.Add(in) + in.From.publishDebug.run(in) case ch := <-b.snapshot: ch <- nil } diff --git a/util/eventbus/client.go b/util/eventbus/client.go index 174cc5ea5..17f7e8608 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -17,8 +17,9 @@ import ( // Subscribers that share the same client receive events one at a // time, in the order they were published. type Client struct { - name string - bus *Bus + name string + bus *Bus + publishDebug hook[publishedEvent] mu sync.Mutex pub set.Set[publisher] diff --git a/util/eventbus/publish.go b/util/eventbus/publish.go index fdabdcb23..b228708ac 100644 --- a/util/eventbus/publish.go +++ b/util/eventbus/publish.go @@ -5,15 +5,8 @@ package eventbus import ( "reflect" - "time" ) -type publishedEvent struct { - Event any - From *Client - Published time.Time -} - // publisher is a uniformly typed wrapper around Publisher[T], so that // debugging facilities can look at active publishers. type publisher interface { @@ -60,9 +53,8 @@ func (p *Publisher[T]) Publish(v T) { } evt := publishedEvent{ - Event: v, - From: p.client, - Published: time.Now(), + Event: v, + From: p.client, } select { diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index 71201aa40..c38949d9d 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -8,14 +8,17 @@ import ( "fmt" "reflect" "sync" - "time" ) +type deliveredEvent struct { + Event any + From *Client + To *Client +} + type queuedEvent struct { - Event any - From *Client - Published time.Time - Routed time.Time + Event any + From *Client } // subscriber is a uniformly typed wrapper around Subscriber[T], so @@ -46,6 +49,7 @@ type subscribeState struct { dispatcher *worker write chan queuedEvent snapshot chan chan []queuedEvent + debug hook[deliveredEvent] outputsMu sync.Mutex outputs map[reflect.Type]subscriber @@ -82,6 +86,14 @@ func (q *subscribeState) pump(ctx context.Context) { if !sub.dispatch(ctx, &vals, acceptCh) { return } + + if q.debug.active() { + q.debug.run(deliveredEvent{ + Event: val.Event, + From: val.From, + To: q.client, + }) + } } else { // Keep the cases in this select in sync with // Subscriber.dispatch below. The only different should be From 7fac0175c08565076f92b9ae4d2742dc8abda9af Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Tue, 4 Mar 2025 13:41:12 -0800 Subject: [PATCH 41/87] cmd/derper, derp/derphttp: support, generate self-signed IP address certs For people who can't use LetsEncrypt because it's banned. Per https://github.com/tailscale/tailscale/issues/11776#issuecomment-2520955317 This does two things: 1) if you run derper with --certmode=manual and --hostname=$IP_ADDRESS we previously permitted, but now we also: * auto-generate the self-signed cert for you if it doesn't yet exist on disk * print out the derpmap configuration you need to use that self-signed cert 2) teaches derp/derphttp's derp dialer to verify the signature of self-signed TLS certs, if so declared in the existing DERPNode.CertName field, which previously existed for domain fronting, separating out the dial hostname from how certs are validates, so it's not overloaded much; that's what it was meant for. Fixes #11776 Change-Id: Ie72d12f209416bb7e8325fe0838cd2c66342c5cf Signed-off-by: Brad Fitzpatrick --- cmd/derper/cert.go | 102 ++++++++++++++++++++++++++++++- cmd/derper/cert_test.go | 73 ++++++++++++++++++++++ derp/derphttp/derphttp_client.go | 20 +++++- net/tlsdial/tlsdial.go | 41 +++++++++++++ tailcfg/derpmap.go | 6 ++ 5 files changed, 238 insertions(+), 4 deletions(-) diff --git a/cmd/derper/cert.go b/cmd/derper/cert.go index 623fa376f..b95755c64 100644 --- a/cmd/derper/cert.go +++ b/cmd/derper/cert.go @@ -4,16 +4,28 @@ package main import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha256" "crypto/tls" "crypto/x509" + "crypto/x509/pkix" + "encoding/json" + "encoding/pem" "errors" "fmt" + "log" + "math/big" "net" "net/http" + "os" "path/filepath" "regexp" + "time" "golang.org/x/crypto/acme/autocert" + "tailscale.com/tailcfg" ) var unsafeHostnameCharacters = regexp.MustCompile(`[^a-zA-Z0-9-\.]`) @@ -65,8 +77,18 @@ func NewManualCertManager(certdir, hostname string) (certProvider, error) { crtPath := filepath.Join(certdir, keyname+".crt") keyPath := filepath.Join(certdir, keyname+".key") cert, err := tls.LoadX509KeyPair(crtPath, keyPath) + hostnameIP := net.ParseIP(hostname) // or nil if hostname isn't an IP address if err != nil { - return nil, fmt.Errorf("can not load x509 key pair for hostname %q: %w", keyname, err) + // If the hostname is an IP address, automatically create a + // self-signed certificate for it. + var certp *tls.Certificate + if os.IsNotExist(err) && hostnameIP != nil { + certp, err = createSelfSignedIPCert(crtPath, keyPath, hostname) + } + if err != nil { + return nil, fmt.Errorf("can not load x509 key pair for hostname %q: %w", keyname, err) + } + cert = *certp } // ensure hostname matches with the certificate x509Cert, err := x509.ParseCertificate(cert.Certificate[0]) @@ -76,6 +98,18 @@ func NewManualCertManager(certdir, hostname string) (certProvider, error) { if err := x509Cert.VerifyHostname(hostname); err != nil { return nil, fmt.Errorf("cert invalid for hostname %q: %w", hostname, err) } + if hostnameIP != nil { + // If the hostname is an IP address, print out information on how to + // confgure this in the derpmap. + dn := &tailcfg.DERPNode{ + Name: "custom", + RegionID: 900, + HostName: hostname, + CertName: fmt.Sprintf("sha256-raw:%-02x", sha256.Sum256(x509Cert.Raw)), + } + dnJSON, _ := json.Marshal(dn) + log.Printf("Using self-signed certificate for IP address %q. Configure it in DERPMap using: (https://tailscale.com/s/custom-derp)\n %s", hostname, dnJSON) + } return &manualCertManager{ cert: &cert, hostname: hostname, @@ -109,3 +143,69 @@ func (m *manualCertManager) getCertificate(hi *tls.ClientHelloInfo) (*tls.Certif func (m *manualCertManager) HTTPHandler(fallback http.Handler) http.Handler { return fallback } + +func createSelfSignedIPCert(crtPath, keyPath, ipStr string) (*tls.Certificate, error) { + ip := net.ParseIP(ipStr) + if ip == nil { + return nil, fmt.Errorf("invalid IP address: %s", ipStr) + } + + priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, fmt.Errorf("failed to generate EC private key: %v", err) + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, fmt.Errorf("failed to generate serial number: %v", err) + } + + now := time.Now() + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: ipStr, + }, + NotBefore: now, + NotAfter: now.AddDate(1, 0, 0), // expires in 1 year; a bit over that is rejected by macOS etc + + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + // Set the IP as a SAN. + template.IPAddresses = []net.IP{ip} + + // Create the self-signed certificate. + derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %v", err) + } + + certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + + keyBytes, err := x509.MarshalECPrivateKey(priv) + if err != nil { + return nil, fmt.Errorf("unable to marshal EC private key: %v", err) + } + + keyPEM := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}) + + if err := os.MkdirAll(filepath.Dir(crtPath), 0700); err != nil { + return nil, fmt.Errorf("failed to create directory for certificate: %v", err) + } + if err := os.WriteFile(crtPath, certPEM, 0644); err != nil { + return nil, fmt.Errorf("failed to write certificate to %s: %v", crtPath, err) + } + if err := os.WriteFile(keyPath, keyPEM, 0600); err != nil { + return nil, fmt.Errorf("failed to write key to %s: %v", keyPath, err) + } + + tlsCert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + return nil, fmt.Errorf("failed to create tls.Certificate: %v", err) + } + return &tlsCert, nil +} diff --git a/cmd/derper/cert_test.go b/cmd/derper/cert_test.go index a379e5c04..2ec7b756e 100644 --- a/cmd/derper/cert_test.go +++ b/cmd/derper/cert_test.go @@ -4,19 +4,29 @@ package main import ( + "context" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" + "crypto/sha256" "crypto/tls" "crypto/x509" "crypto/x509/pkix" "encoding/pem" + "fmt" "math/big" "net" + "net/http" "os" "path/filepath" "testing" "time" + + "tailscale.com/derp" + "tailscale.com/derp/derphttp" + "tailscale.com/net/netmon" + "tailscale.com/tailcfg" + "tailscale.com/types/key" ) // Verify that in --certmode=manual mode, we can use a bare IP address @@ -95,3 +105,66 @@ func TestCertIP(t *testing.T) { t.Fatalf("GetCertificate returned nil") } } + +// Test that we can dial a raw IP without using a hostname and without a WebPKI +// cert, validating the cert against the signature of the cert in the DERP map's +// DERPNode. +// +// See https://github.com/tailscale/tailscale/issues/11776. +func TestPinnedCertRawIP(t *testing.T) { + td := t.TempDir() + cp, err := NewManualCertManager(td, "127.0.0.1") + if err != nil { + t.Fatalf("NewManualCertManager: %v", err) + } + + cert, err := cp.TLSConfig().GetCertificate(&tls.ClientHelloInfo{ + ServerName: "127.0.0.1", + }) + if err != nil { + t.Fatalf("GetCertificate: %v", err) + } + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("Listen: %v", err) + } + defer ln.Close() + + ds := derp.NewServer(key.NewNode(), t.Logf) + + derpHandler := derphttp.Handler(ds) + mux := http.NewServeMux() + mux.Handle("/derp", derpHandler) + + var hs http.Server + hs.Handler = mux + hs.TLSConfig = cp.TLSConfig() + go hs.ServeTLS(ln, "", "") + + lnPort := ln.Addr().(*net.TCPAddr).Port + + reg := &tailcfg.DERPRegion{ + RegionID: 900, + Nodes: []*tailcfg.DERPNode{ + { + RegionID: 900, + HostName: "127.0.0.1", + CertName: fmt.Sprintf("sha256-raw:%-02x", sha256.Sum256(cert.Leaf.Raw)), + DERPPort: lnPort, + }, + }, + } + + netMon := netmon.NewStatic() + dc := derphttp.NewRegionClient(key.NewNode(), t.Logf, netMon, func() *tailcfg.DERPRegion { + return reg + }) + defer dc.Close() + + _, connClose, _, err := dc.DialRegionTLS(context.Background(), reg) + if err != nil { + t.Fatalf("DialRegionTLS: %v", err) + } + defer connClose.Close() +} diff --git a/derp/derphttp/derphttp_client.go b/derp/derphttp/derphttp_client.go index 7387b60b4..319c02429 100644 --- a/derp/derphttp/derphttp_client.go +++ b/derp/derphttp/derphttp_client.go @@ -652,7 +652,11 @@ func (c *Client) tlsClient(nc net.Conn, node *tailcfg.DERPNode) *tls.Conn { tlsConf.VerifyConnection = nil } if node.CertName != "" { - tlsdial.SetConfigExpectedCert(tlsConf, node.CertName) + if suf, ok := strings.CutPrefix(node.CertName, "sha256-raw:"); ok { + tlsdial.SetConfigExpectedCertHash(tlsConf, suf) + } else { + tlsdial.SetConfigExpectedCert(tlsConf, node.CertName) + } } } return tls.Client(nc, tlsConf) @@ -666,7 +670,7 @@ func (c *Client) tlsClient(nc net.Conn, node *tailcfg.DERPNode) *tls.Conn { func (c *Client) DialRegionTLS(ctx context.Context, reg *tailcfg.DERPRegion) (tlsConn *tls.Conn, connClose io.Closer, node *tailcfg.DERPNode, err error) { tcpConn, node, err := c.dialRegion(ctx, reg) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, fmt.Errorf("dialRegion(%d): %w", reg.RegionID, err) } done := make(chan bool) // unbuffered defer close(done) @@ -741,6 +745,17 @@ func (c *Client) dialNode(ctx context.Context, n *tailcfg.DERPNode) (net.Conn, e nwait := 0 startDial := func(dstPrimary, proto string) { + dst := cmp.Or(dstPrimary, n.HostName) + + // If dialing an IP address directly, check its address family + // and bail out before incrementing nwait. + if ip, err := netip.ParseAddr(dst); err == nil { + if proto == "tcp4" && ip.Is6() || + proto == "tcp6" && ip.Is4() { + return + } + } + nwait++ go func() { if proto == "tcp4" && c.preferIPv6() { @@ -755,7 +770,6 @@ func (c *Client) dialNode(ctx context.Context, n *tailcfg.DERPNode) (net.Conn, e // Start v4 dial } } - dst := cmp.Or(dstPrimary, n.HostName) port := "443" if !c.useHTTPS() { port = "3340" diff --git a/net/tlsdial/tlsdial.go b/net/tlsdial/tlsdial.go index 2af87bd02..4d22383ef 100644 --- a/net/tlsdial/tlsdial.go +++ b/net/tlsdial/tlsdial.go @@ -12,6 +12,7 @@ package tlsdial import ( "bytes" "context" + "crypto/sha256" "crypto/tls" "crypto/x509" "errors" @@ -246,6 +247,46 @@ func SetConfigExpectedCert(c *tls.Config, certDNSName string) { } } +// SetConfigExpectedCertHash configures c's VerifyPeerCertificate function +// to require that exactly 1 cert is presented, and that the hex of its SHA256 hash +// is equal to wantFullCertSHA256Hex and that it's a valid cert for c.ServerName. +func SetConfigExpectedCertHash(c *tls.Config, wantFullCertSHA256Hex string) { + if c.VerifyPeerCertificate != nil { + panic("refusing to override tls.Config.VerifyPeerCertificate") + } + // Set InsecureSkipVerify to prevent crypto/tls from doing its + // own cert verification, but do the same work that it'd do + // (but using certDNSName) in the VerifyPeerCertificate hook. + c.InsecureSkipVerify = true + c.VerifyConnection = nil + c.VerifyPeerCertificate = func(rawCerts [][]byte, _ [][]*x509.Certificate) error { + if len(rawCerts) == 0 { + return errors.New("no certs presented") + } + if len(rawCerts) > 1 { + return errors.New("unexpected multiple certs presented") + } + if fmt.Sprintf("%02x", sha256.Sum256(rawCerts[0])) != wantFullCertSHA256Hex { + return fmt.Errorf("cert hash does not match expected cert hash") + } + cert, err := x509.ParseCertificate(rawCerts[0]) + if err != nil { + return fmt.Errorf("ParseCertificate: %w", err) + } + if err := cert.VerifyHostname(c.ServerName); err != nil { + return fmt.Errorf("cert does not match server name %q: %w", c.ServerName, err) + } + now := time.Now() + if now.After(cert.NotAfter) { + return fmt.Errorf("cert expired %v", cert.NotAfter) + } + if now.Before(cert.NotBefore) { + return fmt.Errorf("cert not yet valid until %v; is your clock correct?", cert.NotBefore) + } + return nil + } +} + // NewTransport returns a new HTTP transport that verifies TLS certs using this // package, including its baked-in LetsEncrypt fallback roots. func NewTransport() *http.Transport { diff --git a/tailcfg/derpmap.go b/tailcfg/derpmap.go index 056152157..b3e54983f 100644 --- a/tailcfg/derpmap.go +++ b/tailcfg/derpmap.go @@ -139,6 +139,12 @@ type DERPNode struct { // name. If empty, HostName is used. If CertName is non-empty, // HostName is only used for the TCP dial (if IPv4/IPv6 are // not present) + TLS ClientHello. + // + // As a special case, if CertName starts with "sha256-raw:", + // then the rest of the string is a hex-encoded SHA256 of the + // cert to expect. This is used for self-signed certs. + // In this case, the HostName field will typically be an IP + // address literal. CertName string `json:",omitempty"` // IPv4 optionally forces an IPv4 address to use, instead of using DNS. From 75a03fc71903b6e161af5ec2fb135df99e85bd23 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 7 Feb 2025 19:45:20 -0800 Subject: [PATCH 42/87] wgengine/magicsock: use learned DERP route as send path of last resort If we get a packet in over some DERP and don't otherwise know how to reply (no known DERP home or UDP endpoint), this makes us use the DERP connection on which we received the packet to reply. This will almost always be our own home DERP region. This is particularly useful for large one-way nodes (such as hello.ts.net) that don't actively reach out to other nodes, so don't need to be told the DERP home of peers. They can instead learn the DERP home upon getting the first connection. This can also help nodes from a slow or misbehaving control plane. Updates tailscale/corp#26438 Change-Id: I6241ec92828bf45982e0eb83ad5c7404df5968bc Signed-off-by: Brad Fitzpatrick --- control/controlclient/direct.go | 2 ++ control/controlclient/map.go | 3 +++ tstest/integration/nat/nat_test.go | 36 ++++++++++++++++++++++++++++++ wgengine/magicsock/derp.go | 24 ++++++++++++++++++-- wgengine/magicsock/endpoint.go | 10 ++++++++- 5 files changed, 72 insertions(+), 3 deletions(-) diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index 883a1a587..e7d1d25f8 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -1255,6 +1255,7 @@ type devKnobs struct { DumpNetMapsVerbose func() bool ForceProxyDNS func() bool StripEndpoints func() bool // strip endpoints from control (only use disco messages) + StripHomeDERP func() bool // strip Home DERP from control StripCaps func() bool // strip all local node's control-provided capabilities } @@ -1266,6 +1267,7 @@ func initDevKnob() devKnobs { DumpRegister: envknob.RegisterBool("TS_DEBUG_REGISTER"), ForceProxyDNS: envknob.RegisterBool("TS_DEBUG_PROXY_DNS"), StripEndpoints: envknob.RegisterBool("TS_DEBUG_STRIP_ENDPOINTS"), + StripHomeDERP: envknob.RegisterBool("TS_DEBUG_STRIP_HOME_DERP"), StripCaps: envknob.RegisterBool("TS_DEBUG_STRIP_CAPS"), } } diff --git a/control/controlclient/map.go b/control/controlclient/map.go index df2182c8b..769c8f1e3 100644 --- a/control/controlclient/map.go +++ b/control/controlclient/map.go @@ -240,6 +240,9 @@ func upgradeNode(n *tailcfg.Node) { } n.LegacyDERPString = "" } + if DevKnob.StripHomeDERP() { + n.HomeDERP = 0 + } if n.AllowedIPs == nil { n.AllowedIPs = slices.Clone(n.Addresses) diff --git a/tstest/integration/nat/nat_test.go b/tstest/integration/nat/nat_test.go index 9f77d31e9..15f126985 100644 --- a/tstest/integration/nat/nat_test.go +++ b/tstest/integration/nat/nat_test.go @@ -236,6 +236,22 @@ func hard(c *vnet.Config) *vnet.Node { fmt.Sprintf("10.0.%d.1/24", n), vnet.HardNAT)) } +func hardNoDERPOrEndoints(c *vnet.Config) *vnet.Node { + n := c.NumNodes() + 1 + return c.AddNode(c.AddNetwork( + fmt.Sprintf("2.%d.%d.%d", n, n, n), // public IP + fmt.Sprintf("10.0.%d.1/24", n), vnet.HardNAT), + vnet.TailscaledEnv{ + Key: "TS_DEBUG_STRIP_ENDPOINTS", + Value: "1", + }, + vnet.TailscaledEnv{ + Key: "TS_DEBUG_STRIP_HOME_DERP", + Value: "1", + }, + ) +} + func hardPMP(c *vnet.Config) *vnet.Node { n := c.NumNodes() + 1 return c.AddNode(c.AddNetwork( @@ -510,6 +526,26 @@ func TestEasyEasy(t *testing.T) { nt.want(routeDirect) } +// Issue tailscale/corp#26438: use learned DERP route as send path of last +// resort +// +// See (*magicsock.Conn).fallbackDERPRegionForPeer and its comment for +// background. +// +// This sets up a test with two nodes that must use DERP to communicate but the +// target of the ping (the second node) additionally is not getting DERP or +// Endpoint updates from the control plane. (Or rather, it's getting them but is +// configured to scrub them right when they come off the network before being +// processed) This then tests whether node2, upon receiving a packet, will be +// able to reply to node1 since it knows neither node1's endpoints nor its home +// DERP. The only reply route it can use is that fact that it just received a +// packet over a particular DERP from that peer. +func TestFallbackDERPRegionForPeer(t *testing.T) { + nt := newNatTest(t) + nt.runTest(hard, hardNoDERPOrEndoints) + nt.want(routeDERP) +} + func TestSingleJustIPv6(t *testing.T) { nt := newNatTest(t) nt.runTest(just6) diff --git a/wgengine/magicsock/derp.go b/wgengine/magicsock/derp.go index 7c8ffc01a..ffdff14a1 100644 --- a/wgengine/magicsock/derp.go +++ b/wgengine/magicsock/derp.go @@ -64,10 +64,30 @@ func (c *Conn) removeDerpPeerRoute(peer key.NodePublic, regionID int, dc *derpht // addDerpPeerRoute adds a DERP route entry, noting that peer was seen // on DERP node derpID, at least on the connection identified by dc. // See issue 150 for details. -func (c *Conn) addDerpPeerRoute(peer key.NodePublic, derpID int, dc *derphttp.Client) { +func (c *Conn) addDerpPeerRoute(peer key.NodePublic, regionID int, dc *derphttp.Client) { c.mu.Lock() defer c.mu.Unlock() - mak.Set(&c.derpRoute, peer, derpRoute{derpID, dc}) + mak.Set(&c.derpRoute, peer, derpRoute{regionID, dc}) +} + +// fallbackDERPRegionForPeer returns the DERP region ID we might be able to use +// to contact peer, learned from observing recent DERP traffic from them. +// +// This is used as a fallback when a peer receives a packet from a peer +// over DERP but doesn't known that peer's home DERP or any UDP endpoints. +// This is particularly useful for large one-way nodes (such as hello.ts.net) +// that don't actively reach out to other nodes, so don't need to be told +// the DERP home of peers. They can instead learn the DERP home upon getting the +// first connection. +// +// This can also help nodes from a slow or misbehaving control plane. +func (c *Conn) fallbackDERPRegionForPeer(peer key.NodePublic) (regionID int) { + c.mu.Lock() + defer c.mu.Unlock() + if dr, ok := c.derpRoute[peer]; ok { + return dr.regionID + } + return 0 } // activeDerp contains fields for an active DERP connection. diff --git a/wgengine/magicsock/endpoint.go b/wgengine/magicsock/endpoint.go index 7780c7db6..0c48acddf 100644 --- a/wgengine/magicsock/endpoint.go +++ b/wgengine/magicsock/endpoint.go @@ -948,7 +948,15 @@ func (de *endpoint) send(buffs [][]byte) error { de.mu.Unlock() if !udpAddr.IsValid() && !derpAddr.IsValid() { - return errNoUDPOrDERP + // Make a last ditch effort to see if we have a DERP route for them. If + // they contacted us over DERP and we don't know their UDP endpoints or + // their DERP home, we can at least assume they're reachable over the + // DERP they used to contact us. + if rid := de.c.fallbackDERPRegionForPeer(de.publicKey); rid != 0 { + derpAddr = netip.AddrPortFrom(tailcfg.DerpMagicIPAddr, uint16(rid)) + } else { + return errNoUDPOrDERP + } } var err error if udpAddr.IsValid() { From a4b8c24834e4cd386d633a85d6df05de35c4d023 Mon Sep 17 00:00:00 2001 From: Naman Sood Date: Fri, 7 Mar 2025 12:50:15 -0500 Subject: [PATCH 43/87] ipn: sort VIP services before hashing (#15035) We're computing the list of services to hash by iterating over the values of a map, the ordering of which is not guaranteed. This can cause the hash to fluctuate depending on the ordering if there's more than one service hosted by the same host. Updates tailscale/corp#25733. Signed-off-by: Naman Sood --- ipn/ipnlocal/local.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index 1ce299371..e9f263996 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -8238,7 +8238,14 @@ func (b *LocalBackend) vipServicesFromPrefsLocked(prefs ipn.PrefsView) []*tailcf services[sn].Active = true } - return slicesx.MapValues(services) + servicesList := slicesx.MapValues(services) + // [slicesx.MapValues] provides the values in an indeterminate order, but since we'll + // be hashing a representation of this list later we want it to be in a consistent + // order. + slices.SortFunc(servicesList, func(a, b *tailcfg.VIPService) int { + return strings.Compare(a.Name.String(), b.Name.String()) + }) + return servicesList } var ( From 5177fd2ccb4bb39f38efc01673d75186b5030181 Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Thu, 6 Mar 2025 21:00:18 -0500 Subject: [PATCH 44/87] net/portmapper: retry UPnP when we get an "Invalid Args" We previously retried getting a UPnP mapping when the device returned error code 725, "OnlyPermanentLeasesSupported". However, we've seen devices in the wild also return 402, "Invalid Args", when given a lease duration. Fall back to the no-duration mapping method in these cases. Updates #15223 Signed-off-by: Andrew Dunham Change-Id: I6a25007c9eeac0dac83750dd3ae9bfcc287c8fcf --- net/portmapper/upnp.go | 5 +- net/portmapper/upnp_test.go | 107 ++++++++++++++++++++++++++++++++++++ 2 files changed, 110 insertions(+), 2 deletions(-) diff --git a/net/portmapper/upnp.go b/net/portmapper/upnp.go index f1199f0a6..134183135 100644 --- a/net/portmapper/upnp.go +++ b/net/portmapper/upnp.go @@ -610,8 +610,9 @@ func (c *Client) tryUPnPPortmapWithDevice( } // From the UPnP spec: http://upnp.org/specs/gw/UPnP-gw-WANIPConnection-v2-Service.pdf + // 402: Invalid Args (see: https://github.com/tailscale/tailscale/issues/15223) // 725: OnlyPermanentLeasesSupported - if ok && code == 725 { + if ok && (code == 402 || code == 725) { newPort, err = addAnyPortMapping( ctx, client, @@ -620,7 +621,7 @@ func (c *Client) tryUPnPPortmapWithDevice( internal.Addr().String(), 0, // permanent ) - c.vlogf("addAnyPortMapping: 725 retry %v, err=%q", newPort, err) + c.vlogf("addAnyPortMapping: errcode=%d retried: port=%v err=%v", code, newPort, err) } } if err != nil { diff --git a/net/portmapper/upnp_test.go b/net/portmapper/upnp_test.go index c41b535a5..0c296813f 100644 --- a/net/portmapper/upnp_test.go +++ b/net/portmapper/upnp_test.go @@ -628,6 +628,96 @@ func TestGetUPnPPortMapping(t *testing.T) { } } +func TestGetUPnPPortMapping_LeaseDuration(t *testing.T) { + testCases := []struct { + name string + resp string + }{ + {"only_permanent_leases", testAddPortMappingPermanentLease}, + {"invalid_args", testAddPortMappingPermanentLease_InvalidArgs}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + // This is a very basic fake UPnP server handler. + var sawRequestWithLease atomic.Bool + handlers := map[string]any{ + "AddPortMapping": func(body []byte) (int, string) { + // Decode a minimal body to determine whether we skip the request or not. + var req struct { + Protocol string `xml:"NewProtocol"` + InternalPort string `xml:"NewInternalPort"` + ExternalPort string `xml:"NewExternalPort"` + InternalClient string `xml:"NewInternalClient"` + LeaseDuration string `xml:"NewLeaseDuration"` + } + if err := xml.Unmarshal(body, &req); err != nil { + t.Errorf("bad request: %v", err) + return http.StatusBadRequest, "bad request" + } + + if req.Protocol != "UDP" { + t.Errorf(`got Protocol=%q, want "UDP"`, req.Protocol) + } + if req.LeaseDuration != "0" { + // Return a fake error to ensure that we fall back to a permanent lease. + sawRequestWithLease.Store(true) + return http.StatusOK, tc.resp + } + + return http.StatusOK, testAddPortMappingResponse + }, + "GetExternalIPAddress": testGetExternalIPAddressResponse, + "GetStatusInfo": testGetStatusInfoResponse, + "DeletePortMapping": "", // Do nothing for test + } + + igd, err := NewTestIGD(t.Logf, TestIGDOptions{UPnP: true}) + if err != nil { + t.Fatal(err) + } + defer igd.Close() + + igd.SetUPnPHandler(&upnpServer{ + t: t, + Desc: testRootDesc, + Control: map[string]map[string]any{ + "/ctl/IPConn": handlers, + "/upnp/control/yomkmsnooi/wanipconn-1": handlers, + }, + }) + + ctx := context.Background() + c := newTestClient(t, igd) + c.debug.VerboseLogs = true + t.Logf("Listening on upnp=%v", c.testUPnPPort) + defer c.Close() + + // Actually test the UPnP port mapping. + mustProbeUPnP(t, ctx, c) + + gw, myIP, ok := c.gatewayAndSelfIP() + if !ok { + t.Fatalf("could not get gateway and self IP") + } + t.Logf("gw=%v myIP=%v", gw, myIP) + + ext, ok := c.getUPnPPortMapping(ctx, gw, netip.AddrPortFrom(myIP, 12345), 0) + if !ok { + t.Fatal("could not get UPnP port mapping") + } + if got, want := ext.Addr(), netip.MustParseAddr("123.123.123.123"); got != want { + t.Errorf("bad external address; got %v want %v", got, want) + } + if !sawRequestWithLease.Load() { + t.Errorf("wanted request with lease, but didn't see one") + } + t.Logf("external IP: %v", ext) + }) + } +} + // TestGetUPnPPortMapping_NoValidServices tests that getUPnPPortMapping doesn't // crash when a valid UPnP response with no supported services is discovered // and parsed. @@ -1045,6 +1135,23 @@ const testAddPortMappingPermanentLease = ` ` +const testAddPortMappingPermanentLease_InvalidArgs = ` + + + + SOAP:Client + UPnPError + + + 402 + Invalid Args + + + + + +` + const testAddPortMappingResponse = ` From 5ce8cd5fecc1745a63b0ff4474182af3c50baeec Mon Sep 17 00:00:00 2001 From: Mario Minardi Date: Thu, 6 Mar 2025 22:10:22 -0700 Subject: [PATCH 45/87] .github/workflows: tidy go caches before uploading Delete files from `$(go env GOCACHE)` and `$(go env GOMODCACHE)/cache` that have not been modified in >= 90 minutes as these files are not resulting in cache hits on the current branch. These deltions have resulted in the uploaded / downloaded compressed cache size to go down to ~1/3 of the original size in some instances with the extracted size being ~1/4 of the original extraced size. Updates https://github.com/tailscale/tailscale/issues/15238 Signed-off-by: Mario Minardi --- .github/workflows/test.yml | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 4ff2f2421..87b8959ba 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -139,7 +139,11 @@ jobs: echo "Build/test created untracked files in the repo (file names above)." exit 1 fi - + - name: Tidy cache + shell: bash + run: | + find $(go env GOCACHE) -type f -mmin +90 -delete + find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete windows: runs-on: windows-2022 steps: @@ -176,6 +180,11 @@ jobs: # Somewhere in the layers (powershell?) # the equals signs cause great confusion. run: go test ./... -bench . -benchtime 1x -run "^$" + - name: Tidy cache + shell: bash + run: | + find $(go env GOCACHE) -type f -mmin +90 -delete + find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete privileged: runs-on: ubuntu-22.04 @@ -283,6 +292,11 @@ jobs: GOOS: ${{ matrix.goos }} GOARCH: ${{ matrix.goarch }} CGO_ENABLED: "0" + - name: Tidy cache + shell: bash + run: | + find $(go env GOCACHE) -type f -mmin +90 -delete + find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete ios: # similar to cross above, but iOS can't build most of the repo. So, just #make it build a few smoke packages. @@ -342,6 +356,11 @@ jobs: GOARCH: ${{ matrix.goarch }} GOARM: ${{ matrix.goarm }} CGO_ENABLED: "0" + - name: Tidy cache + shell: bash + run: | + find $(go env GOCACHE) -type f -mmin +90 -delete + find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete android: # similar to cross above, but android fails to build a few pieces of the @@ -394,6 +413,11 @@ jobs: run: | ./tool/go run ./cmd/tsconnect --fast-compression build ./tool/go run ./cmd/tsconnect --fast-compression build-pkg + - name: Tidy cache + shell: bash + run: | + find $(go env GOCACHE) -type f -mmin +90 -delete + find $(go env GOMODCACHE)/cache -type f -mmin +90 -delete tailscale_go: # Subset of tests that depend on our custom Go toolchain. runs-on: ubuntu-22.04 From 853abf86619d2994157012fec3cd123b64475d5f Mon Sep 17 00:00:00 2001 From: David Anderson Date: Thu, 6 Mar 2025 21:51:18 -0800 Subject: [PATCH 46/87] util/eventbus: initial debugging facilities for the event bus Enables monitoring events as they flow, listing bus clients, and snapshotting internal queues to troubleshoot stalls. Updates #15160 Signed-off-by: David Anderson --- util/eventbus/bus.go | 63 +++++++++++++++-------- util/eventbus/client.go | 16 +++++- util/eventbus/debug.go | 103 +++++++++++++++++++++++++++++++++++-- util/eventbus/doc.go | 17 ++---- util/eventbus/publish.go | 2 +- util/eventbus/subscribe.go | 72 +++++++++++++++++--------- 6 files changed, 207 insertions(+), 66 deletions(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index a9b6f0dec..fc497add2 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -12,12 +12,12 @@ import ( "tailscale.com/util/set" ) -type publishedEvent struct { +type PublishedEvent struct { Event any From *Client } -type routedEvent struct { +type RoutedEvent struct { Event any From *Client To []*Client @@ -27,24 +27,25 @@ type routedEvent struct { // subscribers. type Bus struct { router *worker - write chan publishedEvent - snapshot chan chan []publishedEvent - routeDebug hook[routedEvent] + write chan PublishedEvent + snapshot chan chan []PublishedEvent + routeDebug hook[RoutedEvent] - topicsMu sync.Mutex // guards everything below. + topicsMu sync.Mutex topics map[reflect.Type][]*subscribeState // Used for introspection/debugging only, not in the normal event // publishing path. - clients set.Set[*Client] + clientsMu sync.Mutex + clients set.Set[*Client] } // New returns a new bus. Use [PublisherOf] to make event publishers, // and [Bus.Queue] and [Subscribe] to make event subscribers. func New() *Bus { ret := &Bus{ - write: make(chan publishedEvent), - snapshot: make(chan chan []publishedEvent), + write: make(chan PublishedEvent), + snapshot: make(chan chan []PublishedEvent), topics: map[reflect.Type][]*subscribeState{}, clients: set.Set[*Client]{}, } @@ -65,12 +66,17 @@ func (b *Bus) Client(name string) *Client { bus: b, pub: set.Set[publisher]{}, } - b.topicsMu.Lock() - defer b.topicsMu.Unlock() + b.clientsMu.Lock() + defer b.clientsMu.Unlock() b.clients.Add(ret) return ret } +// Debugger returns the debugging facility for the bus. +func (b *Bus) Debugger() Debugger { + return Debugger{b} +} + // Close closes the bus. Implicitly closes all clients, publishers and // subscribers attached to the bus. // @@ -79,19 +85,17 @@ func (b *Bus) Client(name string) *Client { func (b *Bus) Close() { b.router.StopAndWait() - var clients set.Set[*Client] - b.topicsMu.Lock() - clients, b.clients = b.clients, set.Set[*Client]{} - b.topicsMu.Unlock() - - for c := range clients { + b.clientsMu.Lock() + defer b.clientsMu.Unlock() + for c := range b.clients { c.Close() } + b.clients = nil } func (b *Bus) pump(ctx context.Context) { - var vals queue[publishedEvent] - acceptCh := func() chan publishedEvent { + var vals queue[PublishedEvent] + acceptCh := func() chan PublishedEvent { if vals.Full() { return nil } @@ -111,7 +115,7 @@ func (b *Bus) pump(ctx context.Context) { for i := range len(dests) { clients[i] = dests[i].client } - b.routeDebug.run(routedEvent{ + b.routeDebug.run(RoutedEvent{ Event: val.Event, From: val.From, To: clients, @@ -119,9 +123,10 @@ func (b *Bus) pump(ctx context.Context) { } for _, d := range dests { - evt := queuedEvent{ + evt := DeliveredEvent{ Event: val.Event, From: val.From, + To: d.client, } deliverOne: for { @@ -173,6 +178,22 @@ func (b *Bus) shouldPublish(t reflect.Type) bool { return len(b.topics[t]) > 0 } +func (b *Bus) listClients() []*Client { + b.clientsMu.Lock() + defer b.clientsMu.Unlock() + return b.clients.Slice() +} + +func (b *Bus) snapshotPublishQueue() []PublishedEvent { + resp := make(chan []PublishedEvent) + select { + case b.snapshot <- resp: + return <-resp + case <-b.router.Done(): + return nil + } +} + func (b *Bus) subscribe(t reflect.Type, q *subscribeState) (cancel func()) { b.topicsMu.Lock() defer b.topicsMu.Unlock() diff --git a/util/eventbus/client.go b/util/eventbus/client.go index 17f7e8608..5cf7f97f5 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -19,13 +19,15 @@ import ( type Client struct { name string bus *Bus - publishDebug hook[publishedEvent] + publishDebug hook[PublishedEvent] mu sync.Mutex pub set.Set[publisher] sub *subscribeState // Lazily created on first subscribe } +func (c *Client) Name() string { return c.name } + // Close closes the client. Implicitly closes all publishers and // subscribers obtained from this client. func (c *Client) Close() { @@ -47,6 +49,16 @@ func (c *Client) Close() { } } +func (c *Client) snapshotSubscribeQueue() []DeliveredEvent { + return c.peekSubscribeState().snapshotQueue() +} + +func (c *Client) peekSubscribeState() *subscribeState { + c.mu.Lock() + defer c.mu.Unlock() + return c.sub +} + func (c *Client) subscribeState() *subscribeState { c.mu.Lock() defer c.mu.Unlock() @@ -76,7 +88,7 @@ func (c *Client) deleteSubscriber(t reflect.Type, s *subscribeState) { c.bus.unsubscribe(t, s) } -func (c *Client) publish() chan<- publishedEvent { +func (c *Client) publish() chan<- PublishedEvent { return c.bus.write } diff --git a/util/eventbus/debug.go b/util/eventbus/debug.go index 912fe7623..d41fc0385 100644 --- a/util/eventbus/debug.go +++ b/util/eventbus/debug.go @@ -4,11 +4,110 @@ package eventbus import ( + "fmt" "slices" "sync" "sync/atomic" ) +// A Debugger offers access to a bus's privileged introspection and +// debugging facilities. +// +// The debugger's functionality is intended for humans and their tools +// to examine and troubleshoot bus clients, and should not be used in +// normal codepaths. +// +// In particular, the debugger provides access to information that is +// deliberately withheld from bus clients to encourage more robust and +// maintainable code - for example, the sender of an event, or the +// event streams of other clients. Please don't use the debugger to +// circumvent these restrictions for purposes other than debugging. +type Debugger struct { + bus *Bus +} + +// Clients returns a list of all clients attached to the bus. +func (d *Debugger) Clients() []*Client { + return d.bus.listClients() +} + +// PublishQueue returns the contents of the publish queue. +// +// The publish queue contains events that have been accepted by the +// bus from Publish() calls, but have not yet been routed to relevant +// subscribers. +// +// This queue is expected to be almost empty in normal operation. A +// full publish queue indicates that a slow subscriber downstream is +// causing backpressure and stalling the bus. +func (d *Debugger) PublishQueue() []PublishedEvent { + return d.bus.snapshotPublishQueue() +} + +// checkClient verifies that client is attached to the same bus as the +// Debugger, and panics if not. +func (d *Debugger) checkClient(client *Client) { + if client.bus != d.bus { + panic(fmt.Errorf("SubscribeQueue given client belonging to wrong bus")) + } +} + +// SubscribeQueue returns the contents of the given client's subscribe +// queue. +// +// The subscribe queue contains events that are to be delivered to the +// client, but haven't yet been handed off to the relevant +// [Subscriber]. +// +// This queue is expected to be almost empty in normal operation. A +// full subscribe queue indicates that the client is accepting events +// too slowly, and may be causing the rest of the bus to stall. +func (d *Debugger) SubscribeQueue(client *Client) []DeliveredEvent { + d.checkClient(client) + return client.snapshotSubscribeQueue() +} + +// WatchBus streams information about all events passing through the +// bus. +// +// Monitored events are delivered in the bus's global publication +// order (see "Concurrency properties" in the package docs). +// +// The caller must consume monitoring events promptly to avoid +// stalling the bus (see "Expected subscriber behavior" in the package +// docs). +func (d *Debugger) WatchBus() *Subscriber[RoutedEvent] { + return newMonitor(d.bus.routeDebug.add) +} + +// WatchPublish streams information about all events published by the +// given client. +// +// Monitored events are delivered in the bus's global publication +// order (see "Concurrency properties" in the package docs). +// +// The caller must consume monitoring events promptly to avoid +// stalling the bus (see "Expected subscriber behavior" in the package +// docs). +func (d *Debugger) WatchPublish(client *Client) *Subscriber[PublishedEvent] { + d.checkClient(client) + return newMonitor(client.publishDebug.add) +} + +// WatchSubscribe streams information about all events received by the +// given client. +// +// Monitored events are delivered in the bus's global publication +// order (see "Concurrency properties" in the package docs). +// +// The caller must consume monitoring events promptly to avoid +// stalling the bus (see "Expected subscriber behavior" in the package +// docs). +func (d *Debugger) WatchSubscribe(client *Client) *Subscriber[DeliveredEvent] { + d.checkClient(client) + return newMonitor(client.subscribeState().debug.add) +} + // A hook collects hook functions that can be run as a group. type hook[T any] struct { sync.Mutex @@ -19,8 +118,6 @@ var hookID atomic.Uint64 // add registers fn to be called when the hook is run. Returns an // unregistration function that removes fn from the hook when called. -// -//lint:ignore U1000 Not used yet, but will be in an upcoming change func (h *hook[T]) add(fn func(T)) (remove func()) { id := hookID.Add(1) h.Lock() @@ -30,8 +127,6 @@ func (h *hook[T]) add(fn func(T)) (remove func()) { } // remove removes the function with the given ID from the hook. -// -//lint:ignore U1000 Not used yet, but will be in an upcoming change func (h *hook[T]) remove(id uint64) { h.Lock() defer h.Unlock() diff --git a/util/eventbus/doc.go b/util/eventbus/doc.go index b3509b48b..964a686ea 100644 --- a/util/eventbus/doc.go +++ b/util/eventbus/doc.go @@ -86,18 +86,7 @@ // // # Debugging facilities // -// (TODO, not implemented yet, sorry, I promise we're working on it next!) -// -// The bus comes with introspection facilities to help reason about -// the state of the client, and diagnose issues such as slow -// subscribers. -// -// The bus provide a tsweb debugging page that shows the current state -// of the bus, including all publishers, subscribers, and queued -// events. -// -// The bus also has a snooping and tracing facility, which lets you -// observe all events flowing through the bus, along with their -// source, destination(s) and timing information such as the time of -// delivery to each subscriber and end-to-end bus delays. +// The [Debugger], obtained through [Bus.Debugger], provides +// introspection facilities to monitor events flowing through the bus, +// and inspect publisher and subscriber state. package eventbus diff --git a/util/eventbus/publish.go b/util/eventbus/publish.go index b228708ac..9897114b6 100644 --- a/util/eventbus/publish.go +++ b/util/eventbus/publish.go @@ -52,7 +52,7 @@ func (p *Publisher[T]) Publish(v T) { default: } - evt := publishedEvent{ + evt := PublishedEvent{ Event: v, From: p.client, } diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index c38949d9d..60e91edd5 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -10,17 +10,12 @@ import ( "sync" ) -type deliveredEvent struct { +type DeliveredEvent struct { Event any From *Client To *Client } -type queuedEvent struct { - Event any - From *Client -} - // subscriber is a uniformly typed wrapper around Subscriber[T], so // that debugging facilities can look at active subscribers. type subscriber interface { @@ -38,7 +33,7 @@ type subscriber interface { // processing other potential sources of wakeups, which is how we end // up at this awkward type signature and sharing of internal state // through dispatch. - dispatch(ctx context.Context, vals *queue[queuedEvent], acceptCh func() chan queuedEvent) bool + dispatch(ctx context.Context, vals *queue[DeliveredEvent], acceptCh func() chan DeliveredEvent, snapshot chan chan []DeliveredEvent) bool Close() } @@ -47,9 +42,9 @@ type subscribeState struct { client *Client dispatcher *worker - write chan queuedEvent - snapshot chan chan []queuedEvent - debug hook[deliveredEvent] + write chan DeliveredEvent + snapshot chan chan []DeliveredEvent + debug hook[DeliveredEvent] outputsMu sync.Mutex outputs map[reflect.Type]subscriber @@ -58,8 +53,8 @@ type subscribeState struct { func newSubscribeState(c *Client) *subscribeState { ret := &subscribeState{ client: c, - write: make(chan queuedEvent), - snapshot: make(chan chan []queuedEvent), + write: make(chan DeliveredEvent), + snapshot: make(chan chan []DeliveredEvent), outputs: map[reflect.Type]subscriber{}, } ret.dispatcher = runWorker(ret.pump) @@ -67,8 +62,8 @@ func newSubscribeState(c *Client) *subscribeState { } func (q *subscribeState) pump(ctx context.Context) { - var vals queue[queuedEvent] - acceptCh := func() chan queuedEvent { + var vals queue[DeliveredEvent] + acceptCh := func() chan DeliveredEvent { if vals.Full() { return nil } @@ -83,12 +78,12 @@ func (q *subscribeState) pump(ctx context.Context) { vals.Drop() continue } - if !sub.dispatch(ctx, &vals, acceptCh) { + if !sub.dispatch(ctx, &vals, acceptCh, q.snapshot) { return } if q.debug.active() { - q.debug.run(deliveredEvent{ + q.debug.run(DeliveredEvent{ Event: val.Event, From: val.From, To: q.client, @@ -111,6 +106,20 @@ func (q *subscribeState) pump(ctx context.Context) { } } +func (s *subscribeState) snapshotQueue() []DeliveredEvent { + if s == nil { + return nil + } + + resp := make(chan []DeliveredEvent) + select { + case s.snapshot <- resp: + return <-resp + case <-s.dispatcher.Done(): + return nil + } +} + func (s *subscribeState) addSubscriber(t reflect.Type, sub subscriber) { s.outputsMu.Lock() defer s.outputsMu.Unlock() @@ -154,28 +163,43 @@ func (s *subscribeState) closed() <-chan struct{} { // A Subscriber delivers one type of event from a [Client]. type Subscriber[T any] struct { - stop stopFlag - recv *subscribeState - read chan T + stop stopFlag + read chan T + unregister func() } func newSubscriber[T any](r *subscribeState) *Subscriber[T] { t := reflect.TypeFor[T]() ret := &Subscriber[T]{ - recv: r, - read: make(chan T), + read: make(chan T), + unregister: func() { r.deleteSubscriber(t) }, } r.addSubscriber(t, ret) return ret } +func newMonitor[T any](attach func(fn func(T)) (cancel func())) *Subscriber[T] { + ret := &Subscriber[T]{ + read: make(chan T, 100), // arbitrary, large + } + ret.unregister = attach(ret.monitor) + return ret +} + func (s *Subscriber[T]) subscribeType() reflect.Type { return reflect.TypeFor[T]() } -func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[queuedEvent], acceptCh func() chan queuedEvent) bool { +func (s *Subscriber[T]) monitor(debugEvent T) { + select { + case s.read <- debugEvent: + case <-s.stop.Done(): + } +} + +func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[DeliveredEvent], acceptCh func() chan DeliveredEvent, snapshot chan chan []DeliveredEvent) bool { t := vals.Peek().Event.(T) for { // Keep the cases in this select in sync with subscribeState.pump @@ -189,7 +213,7 @@ func (s *Subscriber[T]) dispatch(ctx context.Context, vals *queue[queuedEvent], vals.Add(val) case <-ctx.Done(): return false - case ch := <-s.recv.snapshot: + case ch := <-snapshot: ch <- vals.Snapshot() } } @@ -212,5 +236,5 @@ func (s *Subscriber[T]) Done() <-chan struct{} { // [Subscriber.Events] block for ever. func (s *Subscriber[T]) Close() { s.stop.Stop() // unblock receivers - s.recv.deleteSubscriber(reflect.TypeFor[T]()) + s.unregister() } From e71e95b841a1c37bafb69dd1fc355a5541a9bc65 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Fri, 7 Mar 2025 13:01:35 -0800 Subject: [PATCH 47/87] util/eventbus: don't allow publishers to skip events while debugging If any debugging hook might see an event, Publisher.ShouldPublish should tell its caller to publish even if there are no ordinary subscribers. Updates #15160 Signed-off-by: David Anderson --- util/eventbus/bus.go | 4 ++++ util/eventbus/client.go | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index fc497add2..96cafc98b 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -173,6 +173,10 @@ func (b *Bus) dest(t reflect.Type) []*subscribeState { } func (b *Bus) shouldPublish(t reflect.Type) bool { + if b.routeDebug.active() { + return true + } + b.topicsMu.Lock() defer b.topicsMu.Unlock() return len(b.topics[t]) > 0 diff --git a/util/eventbus/client.go b/util/eventbus/client.go index 5cf7f97f5..a9ef40771 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -93,7 +93,7 @@ func (c *Client) publish() chan<- PublishedEvent { } func (c *Client) shouldPublish(t reflect.Type) bool { - return c.bus.shouldPublish(t) + return c.publishDebug.active() || c.bus.shouldPublish(t) } // Subscribe requests delivery of events of type T through the given From 346a35f6123cfa04104c283ff28050a75627a074 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Fri, 7 Mar 2025 08:16:53 -0800 Subject: [PATCH 48/87] util/eventbus: add debugger methods to list pub/sub types This lets debug tools list the types that clients are wielding, so that they can build a dataflow graph and other debugging views. Updates #15160 Signed-off-by: David Anderson --- util/eventbus/client.go | 14 ++++++++++++++ util/eventbus/debug.go | 22 ++++++++++++++++++++++ util/eventbus/subscribe.go | 14 ++++++++++++++ 3 files changed, 50 insertions(+) diff --git a/util/eventbus/client.go b/util/eventbus/client.go index a9ef40771..a7a88c0a1 100644 --- a/util/eventbus/client.go +++ b/util/eventbus/client.go @@ -59,6 +59,20 @@ func (c *Client) peekSubscribeState() *subscribeState { return c.sub } +func (c *Client) publishTypes() []reflect.Type { + c.mu.Lock() + defer c.mu.Unlock() + ret := make([]reflect.Type, 0, len(c.pub)) + for pub := range c.pub { + ret = append(ret, pub.publishType()) + } + return ret +} + +func (c *Client) subscribeTypes() []reflect.Type { + return c.peekSubscribeState().subscribeTypes() +} + func (c *Client) subscribeState() *subscribeState { c.mu.Lock() defer c.mu.Unlock() diff --git a/util/eventbus/debug.go b/util/eventbus/debug.go index d41fc0385..31123e6ba 100644 --- a/util/eventbus/debug.go +++ b/util/eventbus/debug.go @@ -5,6 +5,7 @@ package eventbus import ( "fmt" + "reflect" "slices" "sync" "sync/atomic" @@ -108,6 +109,27 @@ func (d *Debugger) WatchSubscribe(client *Client) *Subscriber[DeliveredEvent] { return newMonitor(client.subscribeState().debug.add) } +// PublishTypes returns the list of types being published by client. +// +// The returned types are those for which the client has obtained a +// [Publisher]. The client may not have ever sent the type in +// question. +func (d *Debugger) PublishTypes(client *Client) []reflect.Type { + d.checkClient(client) + return client.publishTypes() +} + +// SubscribeTypes returns the list of types being subscribed to by +// client. +// +// The returned types are those for which the client has obtained a +// [Subscriber]. The client may not have ever received the type in +// question, and here may not be any publishers of the type. +func (d *Debugger) SubscribeTypes(client *Client) []reflect.Type { + d.checkClient(client) + return client.subscribeTypes() +} + // A hook collects hook functions that can be run as a group. type hook[T any] struct { sync.Mutex diff --git a/util/eventbus/subscribe.go b/util/eventbus/subscribe.go index 60e91edd5..ba17e8548 100644 --- a/util/eventbus/subscribe.go +++ b/util/eventbus/subscribe.go @@ -120,6 +120,20 @@ func (s *subscribeState) snapshotQueue() []DeliveredEvent { } } +func (s *subscribeState) subscribeTypes() []reflect.Type { + if s == nil { + return nil + } + + s.outputsMu.Lock() + defer s.outputsMu.Unlock() + ret := make([]reflect.Type, 0, len(s.outputs)) + for t := range s.outputs { + ret = append(ret, t) + } + return ret +} + func (s *subscribeState) addSubscriber(t reflect.Type, sub subscriber) { s.outputsMu.Lock() defer s.outputsMu.Unlock() From eb3313e825c2d2e20f4c11bb7168ad72397e3d20 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Fri, 7 Mar 2025 17:12:07 -0700 Subject: [PATCH 49/87] tailcfg: add DERPRegion.NoMeasureNoHome, deprecate+document Avoid [cap 115] Fixes tailscale/corp#24697 Change-Id: Ib81994b5ded3dc87a1eef079eb268906a2acb3f8 Signed-off-by: Brad Fitzpatrick --- net/captivedetection/endpoints.go | 2 +- net/netcheck/netcheck.go | 5 ++++- net/netcheck/netcheck_test.go | 7 ++++--- tailcfg/derpmap.go | 28 ++++++++++++++++++++++++---- tailcfg/tailcfg.go | 3 ++- tailcfg/tailcfg_clone.go | 15 ++++++++------- tailcfg/tailcfg_view.go | 28 +++++++++++++++------------- 7 files changed, 58 insertions(+), 30 deletions(-) diff --git a/net/captivedetection/endpoints.go b/net/captivedetection/endpoints.go index 450ed4a1c..57b3e5335 100644 --- a/net/captivedetection/endpoints.go +++ b/net/captivedetection/endpoints.go @@ -89,7 +89,7 @@ func availableEndpoints(derpMap *tailcfg.DERPMap, preferredDERPRegionID int, log // Use the DERP IPs as captive portal detection endpoints. Using IPs is better than hostnames // because they do not depend on DNS resolution. for _, region := range derpMap.Regions { - if region.Avoid { + if region.Avoid || region.NoMeasureNoHome { continue } for _, node := range region.Nodes { diff --git a/net/netcheck/netcheck.go b/net/netcheck/netcheck.go index 107573e5d..a33ca2209 100644 --- a/net/netcheck/netcheck.go +++ b/net/netcheck/netcheck.go @@ -387,6 +387,9 @@ type probePlan map[string][]probe func sortRegions(dm *tailcfg.DERPMap, last *Report, preferredDERP int) (prev []*tailcfg.DERPRegion) { prev = make([]*tailcfg.DERPRegion, 0, len(dm.Regions)) for _, reg := range dm.Regions { + if reg.NoMeasureNoHome { + continue + } // include an otherwise avoid region if it is the current preferred region if reg.Avoid && reg.RegionID != preferredDERP { continue @@ -533,7 +536,7 @@ func makeProbePlanInitial(dm *tailcfg.DERPMap, ifState *netmon.State) (plan prob plan = make(probePlan) for _, reg := range dm.Regions { - if len(reg.Nodes) == 0 { + if reg.NoMeasureNoHome || len(reg.Nodes) == 0 { continue } diff --git a/net/netcheck/netcheck_test.go b/net/netcheck/netcheck_test.go index 88c19623d..3affa614d 100644 --- a/net/netcheck/netcheck_test.go +++ b/net/netcheck/netcheck_test.go @@ -455,7 +455,7 @@ func TestMakeProbePlan(t *testing.T) { basicMap := &tailcfg.DERPMap{ Regions: map[int]*tailcfg.DERPRegion{}, } - for rid := 1; rid <= 5; rid++ { + for rid := 1; rid <= 6; rid++ { var nodes []*tailcfg.DERPNode for nid := 0; nid < rid; nid++ { nodes = append(nodes, &tailcfg.DERPNode{ @@ -467,8 +467,9 @@ func TestMakeProbePlan(t *testing.T) { }) } basicMap.Regions[rid] = &tailcfg.DERPRegion{ - RegionID: rid, - Nodes: nodes, + RegionID: rid, + Nodes: nodes, + NoMeasureNoHome: rid == 6, } } diff --git a/tailcfg/derpmap.go b/tailcfg/derpmap.go index b3e54983f..e05559f3e 100644 --- a/tailcfg/derpmap.go +++ b/tailcfg/derpmap.go @@ -96,12 +96,32 @@ type DERPRegion struct { Latitude float64 `json:",omitempty"` Longitude float64 `json:",omitempty"` - // Avoid is whether the client should avoid picking this as its home - // region. The region should only be used if a peer is there. - // Clients already using this region as their home should migrate - // away to a new region without Avoid set. + // Avoid is whether the client should avoid picking this as its home region. + // The region should only be used if a peer is there. Clients already using + // this region as their home should migrate away to a new region without + // Avoid set. + // + // Deprecated: because of bugs in past implementations combined with unclear + // docs that caused people to think the bugs were intentional, this field is + // deprecated. It was never supposed to cause STUN/DERP measurement probes, + // but due to bugs, it sometimes did. And then some parts of the code began + // to rely on that property. But then we were unable to use this field for + // its original purpose, nor its later imagined purpose, because various + // parts of the codebase thought it meant one thing and others thought it + // meant another. But it did something in the middle instead. So we're retiring + // it. Use NoMeasureNoHome instead. Avoid bool `json:",omitempty"` + // NoMeasureNoHome says that this regions should not be measured for its + // latency distance (STUN, HTTPS, etc) or availability (e.g. captive portal + // checks) and should never be selected as the node's home region. However, + // if a peer declares this region as its home, then this client is allowed + // to connect to it for the purpose of communicating with that peer. + // + // This is what the now deprecated Avoid bool was supposed to mean + // originally but had implementation bugs and documentation omissions. + NoMeasureNoHome bool `json:",omitempty"` + // Nodes are the DERP nodes running in this region, in // priority order for the current client. Client TLS // connections should ideally only go to the first entry diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index b5f49c614..7556ba3d0 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -159,7 +159,8 @@ type CapabilityVersion int // - 112: 2025-01-14: Client interprets AllowedIPs of nil as meaning same as Addresses // - 113: 2025-01-20: Client communicates to control whether funnel is enabled by sending Hostinfo.IngressEnabled (#14688) // - 114: 2025-01-30: NodeAttrMaxKeyDuration CapMap defined, clients might use it (no tailscaled code change) (#14829) -const CurrentCapabilityVersion CapabilityVersion = 114 +// - 115: 2025-03-07: Client understands DERPRegion.NoMeasureNoHome. +const CurrentCapabilityVersion CapabilityVersion = 115 // ID is an integer ID for a user, node, or login allocated by the // control plane. diff --git a/tailcfg/tailcfg_clone.go b/tailcfg/tailcfg_clone.go index aeeacebec..da1f4f374 100644 --- a/tailcfg/tailcfg_clone.go +++ b/tailcfg/tailcfg_clone.go @@ -416,13 +416,14 @@ func (src *DERPRegion) Clone() *DERPRegion { // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _DERPRegionCloneNeedsRegeneration = DERPRegion(struct { - RegionID int - RegionCode string - RegionName string - Latitude float64 - Longitude float64 - Avoid bool - Nodes []*DERPNode + RegionID int + RegionCode string + RegionName string + Latitude float64 + Longitude float64 + Avoid bool + NoMeasureNoHome bool + Nodes []*DERPNode }{}) // Clone makes a deep copy of DERPMap. diff --git a/tailcfg/tailcfg_view.go b/tailcfg/tailcfg_view.go index 4b56b8c09..b1aacab23 100644 --- a/tailcfg/tailcfg_view.go +++ b/tailcfg/tailcfg_view.go @@ -880,25 +880,27 @@ func (v *DERPRegionView) UnmarshalJSON(b []byte) error { return nil } -func (v DERPRegionView) RegionID() int { return v.ж.RegionID } -func (v DERPRegionView) RegionCode() string { return v.ж.RegionCode } -func (v DERPRegionView) RegionName() string { return v.ж.RegionName } -func (v DERPRegionView) Latitude() float64 { return v.ж.Latitude } -func (v DERPRegionView) Longitude() float64 { return v.ж.Longitude } -func (v DERPRegionView) Avoid() bool { return v.ж.Avoid } +func (v DERPRegionView) RegionID() int { return v.ж.RegionID } +func (v DERPRegionView) RegionCode() string { return v.ж.RegionCode } +func (v DERPRegionView) RegionName() string { return v.ж.RegionName } +func (v DERPRegionView) Latitude() float64 { return v.ж.Latitude } +func (v DERPRegionView) Longitude() float64 { return v.ж.Longitude } +func (v DERPRegionView) Avoid() bool { return v.ж.Avoid } +func (v DERPRegionView) NoMeasureNoHome() bool { return v.ж.NoMeasureNoHome } func (v DERPRegionView) Nodes() views.SliceView[*DERPNode, DERPNodeView] { return views.SliceOfViews[*DERPNode, DERPNodeView](v.ж.Nodes) } // A compilation failure here means this code must be regenerated, with the command at the top of this file. var _DERPRegionViewNeedsRegeneration = DERPRegion(struct { - RegionID int - RegionCode string - RegionName string - Latitude float64 - Longitude float64 - Avoid bool - Nodes []*DERPNode + RegionID int + RegionCode string + RegionName string + Latitude float64 + Longitude float64 + Avoid bool + NoMeasureNoHome bool + Nodes []*DERPNode }{}) // View returns a read-only view of DERPMap. From f67725c3ff9e4fd66914619d7becd172958bc424 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 9 Mar 2025 12:41:30 -0600 Subject: [PATCH 50/87] .github: Bump peter-evans/create-pull-request from 7.0.6 to 7.0.7 (#15113) Bumps [peter-evans/create-pull-request](https://github.com/peter-evans/create-pull-request) from 7.0.6 to 7.0.7. - [Release notes](https://github.com/peter-evans/create-pull-request/releases) - [Commits](https://github.com/peter-evans/create-pull-request/compare/67ccf781d68cd99b580ae25a5c18a1cc84ffff1f...dd2324fc52d5d43c699a5636bcf19fceaa70c284) --- updated-dependencies: - dependency-name: peter-evans/create-pull-request dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/update-flake.yml | 2 +- .github/workflows/update-webclient-prebuilt.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/update-flake.yml b/.github/workflows/update-flake.yml index 4d9db490b..84b10e254 100644 --- a/.github/workflows/update-flake.yml +++ b/.github/workflows/update-flake.yml @@ -36,7 +36,7 @@ jobs: private_key: ${{ secrets.LICENSING_APP_PRIVATE_KEY }} - name: Send pull request - uses: peter-evans/create-pull-request@67ccf781d68cd99b580ae25a5c18a1cc84ffff1f #v7.0.6 + uses: peter-evans/create-pull-request@dd2324fc52d5d43c699a5636bcf19fceaa70c284 #v7.0.7 with: token: ${{ steps.generate-token.outputs.token }} author: Flakes Updater diff --git a/.github/workflows/update-webclient-prebuilt.yml b/.github/workflows/update-webclient-prebuilt.yml index f2d1e65a5..18d7ffdd9 100644 --- a/.github/workflows/update-webclient-prebuilt.yml +++ b/.github/workflows/update-webclient-prebuilt.yml @@ -35,7 +35,7 @@ jobs: - name: Send pull request id: pull-request - uses: peter-evans/create-pull-request@67ccf781d68cd99b580ae25a5c18a1cc84ffff1f #v7.0.6 + uses: peter-evans/create-pull-request@dd2324fc52d5d43c699a5636bcf19fceaa70c284 #v7.0.7 with: token: ${{ steps.generate-token.outputs.token }} author: OSS Updater From 5827e20fdf93c64ae15ef91d7936b18f2122889a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 9 Mar 2025 12:42:13 -0600 Subject: [PATCH 51/87] .github: Bump github/codeql-action from 3.28.9 to 3.28.10 (#15110) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.9 to 3.28.10. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/9e8d0789d4a0fa9ceb6b1738f7e269594bdd67f0...b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index a241d3578..318bc6698 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@9e8d0789d4a0fa9ceb6b1738f7e269594bdd67f0 # v3.28.9 + uses: github/codeql-action/init@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@9e8d0789d4a0fa9ceb6b1738f7e269594bdd67f0 # v3.28.9 + uses: github/codeql-action/autobuild@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@9e8d0789d4a0fa9ceb6b1738f7e269594bdd67f0 # v3.28.9 + uses: github/codeql-action/analyze@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10 From 71b1ae6bef921abc38ab13d70d7e30bd2170bde3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 9 Mar 2025 13:02:04 -0600 Subject: [PATCH 52/87] .github: Bump actions/upload-artifact from 4.6.0 to 4.6.1 (#15111) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.6.0 to 4.6.1. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08...4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 87b8959ba..b52a3af36 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -485,7 +485,7 @@ jobs: run: | echo "artifacts_path=$(realpath .)" >> $GITHUB_ENV - name: upload crash - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1 if: steps.run.outcome != 'success' && steps.build.outcome == 'success' with: name: artifacts From b9f4c5d2466f0a1196ad99fb3620d7095d8311cd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 9 Mar 2025 13:31:02 -0600 Subject: [PATCH 53/87] .github: Bump golangci/golangci-lint-action from 6.3.1 to 6.5.0 (#15046) Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.3.1 to 6.5.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/2e788936b09dd82dc280e845628a40d2ba6b204c...2226d7cb06a077cd73e56eedd38eecad18e5d837) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: Mario Minardi Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/golangci-lint.yml | 2 +- .golangci.yml | 7 ------- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 3ee6287b9..5318923d8 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -31,7 +31,7 @@ jobs: cache: false - name: golangci-lint - uses: golangci/golangci-lint-action@2e788936b09dd82dc280e845628a40d2ba6b204c # v6.3.1 + uses: golangci/golangci-lint-action@2226d7cb06a077cd73e56eedd38eecad18e5d837 # v6.5.0 with: version: v1.64 diff --git a/.golangci.yml b/.golangci.yml index 45248de16..15f8b5d83 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -26,16 +26,11 @@ issues: # Per-linter settings are contained in this top-level key linters-settings: - # Enable all rules by default; we don't use invisible unicode runes. - bidichk: - gofmt: rewrite-rules: - pattern: 'interface{}' replacement: 'any' - goimports: - govet: # Matches what we use in corp as of 2023-12-07 enable: @@ -78,8 +73,6 @@ linters-settings: # analyzer doesn't support type declarations #- github.com/tailscale/tailscale/types/logger.Logf - misspell: - revive: enable-all-rules: false ignore-generated-header: true From 69b27d2fcfeaa745de072f96dd6c30f4f085ecd9 Mon Sep 17 00:00:00 2001 From: James Tucker Date: Fri, 7 Mar 2025 14:27:13 -0700 Subject: [PATCH 54/87] cmd/natc: error and log when IP range is exhausted natc itself can't immediately fix the problem, but it can more correctly error that return bad addresses. Updates tailscale/corp#26968 Signed-off-by: James Tucker --- cmd/natc/natc.go | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/cmd/natc/natc.go b/cmd/natc/natc.go index 956d2455e..73ba116ff 100644 --- a/cmd/natc/natc.go +++ b/cmd/natc/natc.go @@ -41,6 +41,8 @@ import ( "tailscale.com/wgengine/netstack" ) +var ErrNoIPsAvailable = errors.New("no IPs available") + func main() { hostinfo.SetApp("natc") if !envknob.UseWIPCode() { @@ -277,14 +279,14 @@ func (c *connector) handleDNS(pc net.PacketConn, buf []byte, remoteAddr *net.UDP defer cancel() who, err := c.lc.WhoIs(ctx, remoteAddr.String()) if err != nil { - log.Printf("HandleDNS: WhoIs failed: %v\n", err) + log.Printf("HandleDNS(remote=%s): WhoIs failed: %v\n", remoteAddr.String(), err) return } var msg dnsmessage.Message err = msg.Unpack(buf) if err != nil { - log.Printf("HandleDNS: dnsmessage unpack failed: %v\n ", err) + log.Printf("HandleDNS(remote=%s): dnsmessage unpack failed: %v\n", remoteAddr.String(), err) return } @@ -297,19 +299,19 @@ func (c *connector) handleDNS(pc net.PacketConn, buf []byte, remoteAddr *net.UDP case dnsmessage.TypeAAAA, dnsmessage.TypeA: dstAddrs, err := lookupDestinationIP(q.Name.String()) if err != nil { - log.Printf("HandleDNS: lookup destination failed: %v\n ", err) + log.Printf("HandleDNS(remote=%s): lookup destination failed: %v\n", remoteAddr.String(), err) return } if c.ignoreDestination(dstAddrs) { bs, err := dnsResponse(&msg, dstAddrs) // TODO (fran): treat as SERVFAIL if err != nil { - log.Printf("HandleDNS: generate ignore response failed: %v\n", err) + log.Printf("HandleDNS(remote=%s): generate ignore response failed: %v\n", remoteAddr.String(), err) return } _, err = pc.WriteTo(bs, remoteAddr) if err != nil { - log.Printf("HandleDNS: write failed: %v\n", err) + log.Printf("HandleDNS(remote=%s): write failed: %v\n", remoteAddr.String(), err) } return } @@ -322,7 +324,7 @@ func (c *connector) handleDNS(pc net.PacketConn, buf []byte, remoteAddr *net.UDP resp, err := c.generateDNSResponse(&msg, who.Node.ID) // TODO (fran): treat as SERVFAIL if err != nil { - log.Printf("HandleDNS: connector handling failed: %v\n", err) + log.Printf("HandleDNS(remote=%s): connector handling failed: %v\n", remoteAddr.String(), err) return } // TODO (fran): treat as NXDOMAIN @@ -332,7 +334,7 @@ func (c *connector) handleDNS(pc net.PacketConn, buf []byte, remoteAddr *net.UDP // This connector handled the DNS request _, err = pc.WriteTo(resp, remoteAddr) if err != nil { - log.Printf("HandleDNS: write failed: %v\n", err) + log.Printf("HandleDNS(remote=%s): write failed: %v\n", remoteAddr.String(), err) } } @@ -529,6 +531,9 @@ func (ps *perPeerState) ipForDomain(domain string) ([]netip.Addr, error) { return addrs, nil } addrs := ps.assignAddrsLocked(domain) + if addrs == nil { + return nil, ErrNoIPsAvailable + } return addrs, nil } @@ -575,6 +580,9 @@ func (ps *perPeerState) assignAddrsLocked(domain string) []netip.Addr { ps.addrToDomain = &bart.Table[string]{} } v4 := ps.unusedIPv4Locked() + if !v4.IsValid() { + return nil + } as16 := ps.c.v6ULA.Addr().As16() as4 := v4.As4() copy(as16[12:], as4[:]) From e38e5c38cc55c7a2ba90429e7ce195e7ac7ec665 Mon Sep 17 00:00:00 2001 From: Brad Fitzpatrick Date: Mon, 10 Mar 2025 13:03:10 -0700 Subject: [PATCH 55/87] ssh/tailssh: fix typo in forwardedEnviron method, add docs And don't return a comma-separated string. That's kinda weird signature-wise, and not needed by half the callers anyway. The callers that care can do the join themselves. Updates #cleanup Change-Id: Ib5ad51a3c6b663d868eba14fe9dc54b2609cfb0d Signed-off-by: Brad Fitzpatrick --- ssh/tailssh/incubator.go | 38 +++++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/ssh/tailssh/incubator.go b/ssh/tailssh/incubator.go index e809e9185..4f630186d 100644 --- a/ssh/tailssh/incubator.go +++ b/ssh/tailssh/incubator.go @@ -254,32 +254,44 @@ func parseIncubatorArgs(args []string) (incubatorArgs, error) { return ia, nil } -func (ia incubatorArgs) forwadedEnviron() ([]string, string, error) { +// forwardedEnviron returns the concatenation of the current environment with +// any environment variables specified in ia.encodedEnv. +// +// It also returns allowedExtraKeys, containing the env keys that were passed in +// to ia.encodedEnv. +func (ia incubatorArgs) forwardedEnviron() (env, allowedExtraKeys []string, err error) { environ := os.Environ() + // pass through SSH_AUTH_SOCK environment variable to support ssh agent forwarding - allowListKeys := "SSH_AUTH_SOCK" + // TODO(bradfitz,percy): why is this listed specially? If the parent wanted to included + // it, couldn't it have just passed it to the incubator in encodedEnv? + // If it didn't, no reason for us to pass it to "su -w ..." if it's not in our env + // anyway? (Surely we don't want to inherit the tailscaled parent SSH_AUTH_SOCK, if any) + allowedExtraKeys = []string{"SSH_AUTH_SOCK"} if ia.encodedEnv != "" { unquoted, err := strconv.Unquote(ia.encodedEnv) if err != nil { - return nil, "", fmt.Errorf("unable to parse encodedEnv %q: %w", ia.encodedEnv, err) + return nil, nil, fmt.Errorf("unable to parse encodedEnv %q: %w", ia.encodedEnv, err) } var extraEnviron []string err = json.Unmarshal([]byte(unquoted), &extraEnviron) if err != nil { - return nil, "", fmt.Errorf("unable to parse encodedEnv %q: %w", ia.encodedEnv, err) + return nil, nil, fmt.Errorf("unable to parse encodedEnv %q: %w", ia.encodedEnv, err) } environ = append(environ, extraEnviron...) - for _, v := range extraEnviron { - allowListKeys = fmt.Sprintf("%s,%s", allowListKeys, strings.Split(v, "=")[0]) + for _, kv := range extraEnviron { + if k, _, ok := strings.Cut(kv, "="); ok { + allowedExtraKeys = append(allowedExtraKeys, k) + } } } - return environ, allowListKeys, nil + return environ, allowedExtraKeys, nil } // beIncubator is the entrypoint to the `tailscaled be-child ssh` subcommand. @@ -459,7 +471,7 @@ func tryExecLogin(dlogf logger.Logf, ia incubatorArgs) error { loginArgs := ia.loginArgs(loginCmdPath) dlogf("logging in with %+v", loginArgs) - environ, _, err := ia.forwadedEnviron() + environ, _, err := ia.forwardedEnviron() if err != nil { return err } @@ -498,14 +510,14 @@ func trySU(dlogf logger.Logf, ia incubatorArgs) (handled bool, err error) { defer sessionCloser() } - environ, allowListEnvKeys, err := ia.forwadedEnviron() + environ, allowListEnvKeys, err := ia.forwardedEnviron() if err != nil { return false, err } loginArgs := []string{ su, - "-w", allowListEnvKeys, + "-w", strings.Join(allowListEnvKeys, ","), "-l", ia.localUser, } @@ -546,7 +558,7 @@ func findSU(dlogf logger.Logf, ia incubatorArgs) string { return "" } - _, allowListEnvKeys, err := ia.forwadedEnviron() + _, allowListEnvKeys, err := ia.forwardedEnviron() if err != nil { return "" } @@ -555,7 +567,7 @@ func findSU(dlogf logger.Logf, ia incubatorArgs) string { // to make sure su supports the necessary arguments. err = exec.Command( su, - "-w", allowListEnvKeys, + "-w", strings.Join(allowListEnvKeys, ","), "-l", ia.localUser, "-c", "true", @@ -582,7 +594,7 @@ func handleSSHInProcess(dlogf logger.Logf, ia incubatorArgs) error { return err } - environ, _, err := ia.forwadedEnviron() + environ, _, err := ia.forwardedEnviron() if err != nil { return err } From a6e19f2881c758eae518ce94e6e0b905ab8ccee0 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 11 Mar 2025 07:09:46 -0700 Subject: [PATCH 56/87] ipn/ipnlocal: allow cache hits for testing ACME certs (#15023) PR #14771 added support for getting certs from alternate ACME servers, but the certStore caching mechanism breaks unless you install the CA in system roots, because we check the validity of the cert before allowing a cache hit, which includes checking for a valid chain back to a trusted CA. For ease of testing, allow cert cache hits when the chain is unknown to avoid re-issuing the cert on every TLS request served. We will still get a cache miss when the cert has expired, as enforced by a test, and this makes it much easier to test against non-prod ACME servers compared to having to manage the installation of non-prod CAs on clients. Updates #14771 Change-Id: I74fe6593fe399bd135cc822195155e99985ec08a Signed-off-by: Tom Proctor --- ipn/ipnlocal/cert.go | 27 ++++++++++++++++++++++++++- ipn/ipnlocal/cert_test.go | 24 +++++++++++++++++------- 2 files changed, 43 insertions(+), 8 deletions(-) diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index d360ed79c..4c026a9e7 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -471,6 +471,10 @@ func (b *LocalBackend) getCertPEM(ctx context.Context, cs certStore, logf logger return nil, err } + if !isDefaultDirectoryURL(ac.DirectoryURL) { + logf("acme: using Directory URL %q", ac.DirectoryURL) + } + a, err := ac.GetReg(ctx, "" /* pre-RFC param */) switch { case err == nil: @@ -737,7 +741,28 @@ func validateLeaf(leaf *x509.Certificate, intermediates *x509.CertPool, domain s // binary's baked-in roots (LetsEncrypt). See tailscale/tailscale#14690. return validateLeaf(leaf, intermediates, domain, now, bakedroots.Get()) } - return err == nil + + if err == nil { + return true + } + + // When pointed at a non-prod ACME server, we don't expect to have the CA + // in our system or baked-in roots. Verify only throws UnknownAuthorityError + // after first checking the leaf cert's expiry, hostnames etc, so we know + // that the only reason for an error is to do with constructing a full chain. + // Allow this error so that cert caching still works in testing environments. + if errors.As(err, &x509.UnknownAuthorityError{}) { + acmeURL := envknob.String("TS_DEBUG_ACME_DIRECTORY_URL") + if !isDefaultDirectoryURL(acmeURL) { + return true + } + } + + return false +} + +func isDefaultDirectoryURL(u string) bool { + return u == "" || u == acme.LetsEncryptURL } // validLookingCertDomain reports whether name looks like a valid domain name that diff --git a/ipn/ipnlocal/cert_test.go b/ipn/ipnlocal/cert_test.go index 868808cd6..c77570e87 100644 --- a/ipn/ipnlocal/cert_test.go +++ b/ipn/ipnlocal/cert_test.go @@ -47,10 +47,10 @@ var certTestFS embed.FS func TestCertStoreRoundTrip(t *testing.T) { const testDomain = "example.com" - // Use a fixed verification timestamp so validity doesn't fall off when the - // cert expires. If you update the test data below, this may also need to be - // updated. + // Use fixed verification timestamps so validity doesn't change over time. + // If you update the test data below, these may also need to be updated. testNow := time.Date(2023, time.February, 10, 0, 0, 0, 0, time.UTC) + testExpired := time.Date(2026, time.February, 10, 0, 0, 0, 0, time.UTC) // To re-generate a root certificate and domain certificate for testing, // use: @@ -78,14 +78,20 @@ func TestCertStoreRoundTrip(t *testing.T) { } tests := []struct { - name string - store certStore + name string + store certStore + debugACMEURL bool }{ - {"FileStore", certFileStore{dir: t.TempDir(), testRoots: roots}}, - {"StateStore", certStateStore{StateStore: new(mem.Store), testRoots: roots}}, + {"FileStore", certFileStore{dir: t.TempDir(), testRoots: roots}, false}, + {"FileStore_UnknownCA", certFileStore{dir: t.TempDir()}, true}, + {"StateStore", certStateStore{StateStore: new(mem.Store), testRoots: roots}, false}, + {"StateStore_UnknownCA", certStateStore{StateStore: new(mem.Store)}, true}, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { + if test.debugACMEURL { + t.Setenv("TS_DEBUG_ACME_DIRECTORY_URL", "https://acme-staging-v02.api.letsencrypt.org/directory") + } if err := test.store.WriteTLSCertAndKey(testDomain, testCert, testKey); err != nil { t.Fatalf("WriteTLSCertAndKey: unexpected error: %v", err) } @@ -99,6 +105,10 @@ func TestCertStoreRoundTrip(t *testing.T) { if diff := cmp.Diff(kp.KeyPEM, testKey); diff != "" { t.Errorf("Key (-got, +want):\n%s", diff) } + unexpected, err := test.store.Read(testDomain, testExpired) + if err != errCertExpired { + t.Fatalf("Read: expected expiry error: %v", string(unexpected.CertPEM)) + } }) } } From 660b0515b9e37594aac049576660f3d7ceafcce2 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Tue, 11 Mar 2025 13:24:11 -0400 Subject: [PATCH 57/87] safesocket, version: fix safesocket_darwin behavior for cmd/tailscale (#15275) fixes tailscale/tailscale#15269 Fixes the various CLIs for all of the various flavors of tailscaled on darwin. The logic in version is updated so that we have methods that return true only for the actual GUI app (which can beCLI) and the order of the checks in localTCPPortAndTokenDarwin are corrected so that the logic works with all 5 combinations of CLI and tailscaled. Signed-off-by: Jonathan Nobels --- safesocket/safesocket_darwin.go | 42 ++++++++++------- safesocket/safesocket_darwin_test.go | 68 ++++++++++++++++++++++------ version/prop.go | 42 ++++++++++------- 3 files changed, 104 insertions(+), 48 deletions(-) diff --git a/safesocket/safesocket_darwin.go b/safesocket/safesocket_darwin.go index fb35ad9df..e2b3ea458 100644 --- a/safesocket/safesocket_darwin.go +++ b/safesocket/safesocket_darwin.go @@ -34,17 +34,17 @@ type safesocketDarwin struct { mu sync.Mutex token string // safesocket auth token port int // safesocket port - sameuserproofFD *os.File // file descriptor for macos app store sameuserproof file - sharedDir string // shared directory for location of sameuserproof file + sameuserproofFD *os.File // File descriptor for macos app store sameuserproof file + sharedDir string // Shared directory for location of sameuserproof file - checkConn bool // Check macsys safesocket port before returning it - isMacSysExt func() bool // For testing only to force macsys - isMacGUIApp func() bool // For testing only to force macOS sandbox + checkConn bool // If true, check macsys safesocket port before returning it + isMacSysExt func() bool // Reports true if this binary is the macOS System Extension + isMacGUIApp func() bool // Reports true if running as a macOS GUI app (Tailscale.app) } var ssd = safesocketDarwin{ isMacSysExt: version.IsMacSysExt, - isMacGUIApp: func() bool { return version.IsMacAppStore() || version.IsMacSysApp() || version.IsMacSysExt() }, + isMacGUIApp: func() bool { return version.IsMacAppStoreGUI() || version.IsMacSysGUI() }, checkConn: true, sharedDir: "/Library/Tailscale", } @@ -63,22 +63,25 @@ var ssd = safesocketDarwin{ // calls InitListenerDarwin. // localTCPPortAndTokenDarwin returns the localhost TCP port number and auth token -// either generated, or sourced from the NEPacketTunnelProvider managed tailscaled process. +// either from the sameuserproof mechanism, or source and set directly from the +// NEPacketTunnelProvider managed tailscaled process when the CLI is invoked +// from the Tailscale.app GUI. func localTCPPortAndTokenDarwin() (port int, token string, err error) { ssd.mu.Lock() defer ssd.mu.Unlock() - if !ssd.isMacGUIApp() { - return 0, "", ErrNoTokenOnOS - } - - if ssd.port != 0 && ssd.token != "" { + switch { + case ssd.port != 0 && ssd.token != "": + // If something has explicitly set our credentials (typically non-standalone macos binary), use them. return ssd.port, ssd.token, nil + case !ssd.isMacGUIApp(): + // We're not a GUI app (probably cmd/tailscale), so try falling back to sameuserproof. + // If portAndTokenFromSameUserProof returns an error here, cmd/tailscale will + // attempt to use the default unix socket mechanism supported by tailscaled. + return portAndTokenFromSameUserProof() + default: + return 0, "", ErrTokenNotFound } - - // Credentials were not explicitly, this is likely a standalone CLI binary. - // Fallback to reading the sameuserproof file. - return portAndTokenFromSameUserProof() } // SetCredentials sets an token and port used to authenticate safesocket generated @@ -341,6 +344,11 @@ func readMacosSameUserProof() (port int, token string, err error) { } func portAndTokenFromSameUserProof() (port int, token string, err error) { + // When we're cmd/tailscale, we have no idea what tailscaled is, so we'll try + // macos, then macsys and finally, fallback to tailscaled via a unix socket + // if both of those return an error. You can run macos or macsys and + // tailscaled at the same time, but we are forced to choose one and the GUI + // clients are first in line here. You cannot run macos and macsys simultaneously. if port, token, err := readMacosSameUserProof(); err == nil { return port, token, nil } @@ -349,5 +357,5 @@ func portAndTokenFromSameUserProof() (port int, token string, err error) { return port, token, nil } - return 0, "", err + return 0, "", ErrTokenNotFound } diff --git a/safesocket/safesocket_darwin_test.go b/safesocket/safesocket_darwin_test.go index 2793d6aa3..e52959ad5 100644 --- a/safesocket/safesocket_darwin_test.go +++ b/safesocket/safesocket_darwin_test.go @@ -15,9 +15,12 @@ import ( // sets the port and token correctly and that LocalTCPPortAndToken // returns the given values. func TestSetCredentials(t *testing.T) { - wantPort := 123 - wantToken := "token" - tstest.Replace(t, &ssd.isMacGUIApp, func() bool { return true }) + const ( + wantToken = "token" + wantPort = 123 + ) + + tstest.Replace(t, &ssd.isMacGUIApp, func() bool { return false }) SetCredentials(wantToken, wantPort) gotPort, gotToken, err := LocalTCPPortAndToken() @@ -26,11 +29,47 @@ func TestSetCredentials(t *testing.T) { } if gotPort != wantPort { - t.Errorf("got port %d, want %d", gotPort, wantPort) + t.Errorf("port: got %d, want %d", gotPort, wantPort) } if gotToken != wantToken { - t.Errorf("got token %s, want %s", gotToken, wantToken) + t.Errorf("token: got %s, want %s", gotToken, wantToken) + } +} + +// TestFallbackToSameuserproof verifies that we fallback to the +// sameuserproof file via LocalTCPPortAndToken when we're running +// +// s cmd/tailscale +func TestFallbackToSameuserproof(t *testing.T) { + dir := t.TempDir() + const ( + wantToken = "token" + wantPort = 123 + ) + + // Mimics cmd/tailscale falling back to sameuserproof + tstest.Replace(t, &ssd.isMacGUIApp, func() bool { return false }) + tstest.Replace(t, &ssd.sharedDir, dir) + tstest.Replace(t, &ssd.checkConn, false) + + // Behave as macSysExt when initializing sameuserproof + tstest.Replace(t, &ssd.isMacSysExt, func() bool { return true }) + if err := initSameUserProofToken(dir, wantPort, wantToken); err != nil { + t.Fatalf("initSameUserProofToken: %v", err) + } + + gotPort, gotToken, err := LocalTCPPortAndToken() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if gotPort != wantPort { + t.Errorf("port: got %d, want %d", gotPort, wantPort) + } + + if gotToken != wantToken { + t.Errorf("token: got %s, want %s", gotToken, wantToken) } } @@ -38,7 +77,7 @@ func TestSetCredentials(t *testing.T) { // returns a listener and a non-zero port and non-empty token. func TestInitListenerDarwin(t *testing.T) { temp := t.TempDir() - tstest.Replace(t, &ssd.isMacGUIApp, func() bool { return true }) + tstest.Replace(t, &ssd.isMacGUIApp, func() bool { return false }) ln, err := InitListenerDarwin(temp) if err != nil || ln == nil { @@ -52,15 +91,14 @@ func TestInitListenerDarwin(t *testing.T) { } if port == 0 { - t.Errorf("expected non-zero port, got %d", port) + t.Errorf("port: got %d, want non-zero", port) } if token == "" { - t.Errorf("expected non-empty token, got empty string") + t.Errorf("token: got %s, want non-empty", token) } } -// TestTokenGeneration verifies token generation behavior func TestTokenGeneration(t *testing.T) { token, err := getToken() if err != nil { @@ -70,7 +108,7 @@ func TestTokenGeneration(t *testing.T) { // Verify token length (hex string is 2x byte length) wantLen := sameUserProofTokenLength * 2 if got := len(token); got != wantLen { - t.Errorf("token length = %d, want %d", got, wantLen) + t.Errorf("token length: got %d, want %d", got, wantLen) } // Verify token persistence @@ -79,7 +117,7 @@ func TestTokenGeneration(t *testing.T) { t.Fatalf("subsequent getToken: %v", err) } if subsequentToken != token { - t.Errorf("subsequent token = %q, want %q", subsequentToken, token) + t.Errorf("subsequent token: got %q, want %q", subsequentToken, token) } } @@ -107,10 +145,10 @@ func TestMacsysSameuserproof(t *testing.T) { } if gotPort != wantPort { - t.Errorf("got port = %d, want %d", gotPort, wantPort) + t.Errorf("port: got %d, want %d", gotPort, wantPort) } if wantToken != gotToken { - t.Errorf("got token = %s, want %s", wantToken, gotToken) + t.Errorf("token: got %s, want %s", wantToken, gotToken) } assertFileCount(t, dir, 1, "sameuserproof-") } @@ -138,7 +176,7 @@ func assertFileCount(t *testing.T, dir string, want int, prefix string) { files, err := os.ReadDir(dir) if err != nil { - t.Fatalf("unexpected error: %v", err) + t.Fatalf("[unexpected] error: %v", err) } count := 0 for _, file := range files { @@ -147,6 +185,6 @@ func assertFileCount(t *testing.T, dir string, want int, prefix string) { } } if count != want { - t.Errorf("expected 1 file, got %d", count) + t.Errorf("files: got %d, want 1", count) } } diff --git a/version/prop.go b/version/prop.go index 6026d1179..9327e6fe6 100644 --- a/version/prop.go +++ b/version/prop.go @@ -62,26 +62,21 @@ func IsSandboxedMacOS() bool { // Tailscale for macOS, either the main GUI process (non-sandboxed) or the // system extension (sandboxed). func IsMacSys() bool { - return IsMacSysExt() || IsMacSysApp() + return IsMacSysExt() || IsMacSysGUI() } var isMacSysApp lazy.SyncValue[bool] -// IsMacSysApp reports whether this process is the main, non-sandboxed GUI process +// IsMacSysGUI reports whether this process is the main, non-sandboxed GUI process // that ships with the Standalone variant of Tailscale for macOS. -func IsMacSysApp() bool { +func IsMacSysGUI() bool { if runtime.GOOS != "darwin" { return false } return isMacSysApp.Get(func() bool { - exe, err := os.Executable() - if err != nil { - return false - } - // Check that this is the GUI binary, and it is not sandboxed. The GUI binary - // shipped in the App Store will always have the App Sandbox enabled. - return strings.HasSuffix(exe, "/Contents/MacOS/Tailscale") && !IsMacAppStore() + return strings.Contains(os.Getenv("HOME"), "/Containers/io.tailscale.ipn.macsys/") || + strings.Contains(os.Getenv("XPC_SERVICE_NAME"), "io.tailscale.ipn.macsys") }) } @@ -95,10 +90,6 @@ func IsMacSysExt() bool { return false } return isMacSysExt.Get(func() bool { - if strings.Contains(os.Getenv("HOME"), "/Containers/io.tailscale.ipn.macsys/") || - strings.Contains(os.Getenv("XPC_SERVICE_NAME"), "io.tailscale.ipn.macsys") { - return true - } exe, err := os.Executable() if err != nil { return false @@ -109,8 +100,8 @@ func IsMacSysExt() bool { var isMacAppStore lazy.SyncValue[bool] -// IsMacAppStore whether this binary is from the App Store version of Tailscale -// for macOS. +// IsMacAppStore returns whether this binary is from the App Store version of Tailscale +// for macOS. Returns true for both the network extension and the GUI app. func IsMacAppStore() bool { if runtime.GOOS != "darwin" { return false @@ -124,6 +115,25 @@ func IsMacAppStore() bool { }) } +var isMacAppStoreGUI lazy.SyncValue[bool] + +// IsMacAppStoreGUI reports whether this binary is the GUI app from the App Store +// version of Tailscale for macOS. +func IsMacAppStoreGUI() bool { + if runtime.GOOS != "darwin" { + return false + } + return isMacAppStoreGUI.Get(func() bool { + exe, err := os.Executable() + if err != nil { + return false + } + // Check that this is the GUI binary, and it is not sandboxed. The GUI binary + // shipped in the App Store will always have the App Sandbox enabled. + return strings.Contains(exe, "/Tailscale") && !IsMacSysGUI() + }) +} + var isAppleTV lazy.SyncValue[bool] // IsAppleTV reports whether this binary is part of the Tailscale network extension for tvOS. From ce0d8b0fb9897d2481e08287e0a4de2afccb44ae Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 11:25:35 -0600 Subject: [PATCH 58/87] .github: Bump github/codeql-action from 3.28.10 to 3.28.11 (#15258) Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.10 to 3.28.11. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d...6bb031afdd8eb862ea3fc1848194185e076637e5) --- updated-dependencies: - dependency-name: github/codeql-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 318bc6698..f20719360 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -55,7 +55,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10 + uses: github/codeql-action/init@6bb031afdd8eb862ea3fc1848194185e076637e5 # v3.28.11 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -66,7 +66,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10 + uses: github/codeql-action/autobuild@6bb031afdd8eb862ea3fc1848194185e076637e5 # v3.28.11 # ℹ️ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl @@ -80,4 +80,4 @@ jobs: # make release - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3.28.10 + uses: github/codeql-action/analyze@6bb031afdd8eb862ea3fc1848194185e076637e5 # v3.28.11 From 03f7f1860ed4f39707688ade3e61d59ba3693d2d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 11:31:52 -0600 Subject: [PATCH 59/87] .github: Bump peter-evans/create-pull-request from 7.0.7 to 7.0.8 (#15257) Bumps [peter-evans/create-pull-request](https://github.com/peter-evans/create-pull-request) from 7.0.7 to 7.0.8. - [Release notes](https://github.com/peter-evans/create-pull-request/releases) - [Commits](https://github.com/peter-evans/create-pull-request/compare/dd2324fc52d5d43c699a5636bcf19fceaa70c284...271a8d0340265f705b14b6d32b9829c1cb33d45e) --- updated-dependencies: - dependency-name: peter-evans/create-pull-request dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/update-flake.yml | 2 +- .github/workflows/update-webclient-prebuilt.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/update-flake.yml b/.github/workflows/update-flake.yml index 84b10e254..f695c578e 100644 --- a/.github/workflows/update-flake.yml +++ b/.github/workflows/update-flake.yml @@ -36,7 +36,7 @@ jobs: private_key: ${{ secrets.LICENSING_APP_PRIVATE_KEY }} - name: Send pull request - uses: peter-evans/create-pull-request@dd2324fc52d5d43c699a5636bcf19fceaa70c284 #v7.0.7 + uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e #v7.0.8 with: token: ${{ steps.generate-token.outputs.token }} author: Flakes Updater diff --git a/.github/workflows/update-webclient-prebuilt.yml b/.github/workflows/update-webclient-prebuilt.yml index 18d7ffdd9..412836db7 100644 --- a/.github/workflows/update-webclient-prebuilt.yml +++ b/.github/workflows/update-webclient-prebuilt.yml @@ -35,7 +35,7 @@ jobs: - name: Send pull request id: pull-request - uses: peter-evans/create-pull-request@dd2324fc52d5d43c699a5636bcf19fceaa70c284 #v7.0.7 + uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e #v7.0.8 with: token: ${{ steps.generate-token.outputs.token }} author: OSS Updater From 8f0080c7a48ccf482eeebe7d5c4a9d80da1dba02 Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Tue, 11 Mar 2025 13:10:22 -0700 Subject: [PATCH 60/87] cmd/tsidp: allow CORS requests to openid-configuration (#15229) Add support for Cross-Origin XHR requests to the openid-configuration endpoint to enable clients like Grafana's auto-population of OIDC setup data from its contents. Updates https://github.com/tailscale/tailscale/issues/10263 Signed-off-by: Patrick O'Doherty --- cmd/tsidp/tsidp.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/cmd/tsidp/tsidp.go b/cmd/tsidp/tsidp.go index 96fac58fd..95ab2b2eb 100644 --- a/cmd/tsidp/tsidp.go +++ b/cmd/tsidp/tsidp.go @@ -765,6 +765,18 @@ var ( ) func (s *idpServer) serveOpenIDConfig(w http.ResponseWriter, r *http.Request) { + h := w.Header() + h.Set("Access-Control-Allow-Origin", "*") + h.Set("Access-Control-Allow-Method", "GET, OPTIONS") + // allow all to prevent errors from client sending their own bespoke headers + // and having the server reject the request. + h.Set("Access-Control-Allow-Headers", "*") + + // early return for pre-flight OPTIONS requests. + if r.Method == "OPTIONS" { + w.WriteHeader(http.StatusOK) + return + } if r.URL.Path != oidcConfigPath { http.Error(w, "tsidp: not found", http.StatusNotFound) return From 5ebc135397acbc2a217986b95f693e6a2c211fd8 Mon Sep 17 00:00:00 2001 From: Fran Bull Date: Wed, 5 Mar 2025 10:25:30 -0800 Subject: [PATCH 61/87] tsnet,wgengine: fix src to primary Tailscale IP for TCP dials Ensure that the src address for a connection is one of the primary addresses assigned by Tailscale. Not, for example, a virtual IP address. Updates #14667 Signed-off-by: Fran Bull --- cmd/k8s-operator/depaware.txt | 1 + tsnet/tsnet.go | 9 ++++++-- wgengine/netstack/netstack.go | 43 +++++++++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+), 2 deletions(-) diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 54d9bd248..0a787a780 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -904,6 +904,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/tstime/rate from tailscale.com/derp+ tailscale.com/tsweb/varz from tailscale.com/util/usermetric tailscale.com/types/appctype from tailscale.com/ipn/ipnlocal + tailscale.com/types/bools from tailscale.com/tsnet tailscale.com/types/dnstype from tailscale.com/ipn/ipnlocal+ tailscale.com/types/empty from tailscale.com/ipn+ tailscale.com/types/ipproto from tailscale.com/net/flowtrack+ diff --git a/tsnet/tsnet.go b/tsnet/tsnet.go index 680825708..15cf39cba 100644 --- a/tsnet/tsnet.go +++ b/tsnet/tsnet.go @@ -49,6 +49,7 @@ import ( "tailscale.com/net/socks5" "tailscale.com/net/tsdial" "tailscale.com/tsd" + "tailscale.com/types/bools" "tailscale.com/types/logger" "tailscale.com/types/logid" "tailscale.com/types/nettype" @@ -601,7 +602,9 @@ func (s *Server) start() (reterr error) { // Note: don't just return ns.DialContextTCP or we'll return // *gonet.TCPConn(nil) instead of a nil interface which trips up // callers. - tcpConn, err := ns.DialContextTCP(ctx, dst) + v4, v6 := s.TailscaleIPs() + src := bools.IfElse(dst.Addr().Is6(), v6, v4) + tcpConn, err := ns.DialContextTCPWithBind(ctx, src, dst) if err != nil { return nil, err } @@ -611,7 +614,9 @@ func (s *Server) start() (reterr error) { // Note: don't just return ns.DialContextUDP or we'll return // *gonet.UDPConn(nil) instead of a nil interface which trips up // callers. - udpConn, err := ns.DialContextUDP(ctx, dst) + v4, v6 := s.TailscaleIPs() + src := bools.IfElse(dst.Addr().Is6(), v6, v4) + udpConn, err := ns.DialContextUDPWithBind(ctx, src, dst) if err != nil { return nil, err } diff --git a/wgengine/netstack/netstack.go b/wgengine/netstack/netstack.go index 0bbd20b79..591bedde4 100644 --- a/wgengine/netstack/netstack.go +++ b/wgengine/netstack/netstack.go @@ -843,6 +843,27 @@ func (ns *Impl) DialContextTCP(ctx context.Context, ipp netip.AddrPort) (*gonet. return gonet.DialContextTCP(ctx, ns.ipstack, remoteAddress, ipType) } +// DialContextTCPWithBind creates a new gonet.TCPConn connected to the specified +// remoteAddress with its local address bound to localAddr on an available port. +func (ns *Impl) DialContextTCPWithBind(ctx context.Context, localAddr netip.Addr, remoteAddr netip.AddrPort) (*gonet.TCPConn, error) { + remoteAddress := tcpip.FullAddress{ + NIC: nicID, + Addr: tcpip.AddrFromSlice(remoteAddr.Addr().AsSlice()), + Port: remoteAddr.Port(), + } + localAddress := tcpip.FullAddress{ + NIC: nicID, + Addr: tcpip.AddrFromSlice(localAddr.AsSlice()), + } + var ipType tcpip.NetworkProtocolNumber + if remoteAddr.Addr().Is4() { + ipType = ipv4.ProtocolNumber + } else { + ipType = ipv6.ProtocolNumber + } + return gonet.DialTCPWithBind(ctx, ns.ipstack, localAddress, remoteAddress, ipType) +} + func (ns *Impl) DialContextUDP(ctx context.Context, ipp netip.AddrPort) (*gonet.UDPConn, error) { remoteAddress := &tcpip.FullAddress{ NIC: nicID, @@ -859,6 +880,28 @@ func (ns *Impl) DialContextUDP(ctx context.Context, ipp netip.AddrPort) (*gonet. return gonet.DialUDP(ns.ipstack, nil, remoteAddress, ipType) } +// DialContextUDPWithBind creates a new gonet.UDPConn. Connected to remoteAddr. +// With its local address bound to localAddr on an available port. +func (ns *Impl) DialContextUDPWithBind(ctx context.Context, localAddr netip.Addr, remoteAddr netip.AddrPort) (*gonet.UDPConn, error) { + remoteAddress := &tcpip.FullAddress{ + NIC: nicID, + Addr: tcpip.AddrFromSlice(remoteAddr.Addr().AsSlice()), + Port: remoteAddr.Port(), + } + localAddress := &tcpip.FullAddress{ + NIC: nicID, + Addr: tcpip.AddrFromSlice(localAddr.AsSlice()), + } + var ipType tcpip.NetworkProtocolNumber + if remoteAddr.Addr().Is4() { + ipType = ipv4.ProtocolNumber + } else { + ipType = ipv6.ProtocolNumber + } + + return gonet.DialUDP(ns.ipstack, localAddress, remoteAddress, ipType) +} + // getInjectInboundBuffsSizes returns packet memory and a sizes slice for usage // when calling tstun.Wrapper.InjectInboundPacketBuffer(). These are sized with // consideration for MTU and GSO support on ns.linkEP. They should be recycled From 06ae52d309843429df69bc903c318c448abc44d8 Mon Sep 17 00:00:00 2001 From: Naman Sood Date: Tue, 11 Mar 2025 17:23:21 -0400 Subject: [PATCH 62/87] words: append to the tail of the wordlists (#15278) Updates tailscale/corp#14698 Signed-off-by: Naman Sood --- words/scales.txt | 41 +++++++++++++++++++++++++++++++++++++++++ words/tails.txt | 26 ++++++++++++++++++++++++++ 2 files changed, 67 insertions(+) diff --git a/words/scales.txt b/words/scales.txt index 2fe849bb9..fb19cb88d 100644 --- a/words/scales.txt +++ b/words/scales.txt @@ -399,3 +399,44 @@ rankine piano ruler scoville +oratrice +teeth +cliff +degree +company +economy +court +justitia +themis +carat +carob +karat +barley +corn +penny +pound +mark +pence +mine +stairs +escalator +elevator +skilift +gondola +firefighter +newton +smoot +city +truck +everest +wall +fence +fort +trench +matrix +census +likert +sidemirror +wage +salary +fujita diff --git a/words/tails.txt b/words/tails.txt index 497533241..5b93bdd96 100644 --- a/words/tails.txt +++ b/words/tails.txt @@ -694,3 +694,29 @@ ussuri kitty tanuki neko +wind +airplane +time +gumiho +eel +moray +twin +hair +braid +gate +end +queue +miku +at +fin +solarflare +asymptote +reverse +bone +stern +quaver +note +mining +coat +follow +stalk From 52710945f524d96138c45d860139a544c39ee9d8 Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Wed, 12 Mar 2025 10:37:03 -0400 Subject: [PATCH 63/87] control/controlclient, ipn: add client audit logging (#14950) updates tailscale/corp#26435 Adds client support for sending audit logs to control via /machine/audit-log. Specifically implements audit logging for user initiated disconnections. This will require further work to optimize the peristant storage and exclusion via build tags for mobile: tailscale/corp#27011 tailscale/corp#27012 Signed-off-by: Jonathan Nobels --- cmd/k8s-operator/depaware.txt | 1 + cmd/tailscaled/depaware.txt | 1 + control/controlclient/auto.go | 7 + control/controlclient/controlclient_test.go | 41 ++ control/controlclient/direct.go | 50 +- control/controlclient/errors.go | 51 +++ ipn/auditlog/auditlog.go | 466 +++++++++++++++++++ ipn/auditlog/auditlog_test.go | 481 ++++++++++++++++++++ ipn/ipnauth/actor.go | 7 +- ipn/ipnauth/policy.go | 10 +- ipn/ipnlocal/local.go | 68 ++- tailcfg/tailcfg.go | 30 ++ tsd/tsd.go | 4 + 13 files changed, 1204 insertions(+), 13 deletions(-) create mode 100644 control/controlclient/errors.go create mode 100644 ipn/auditlog/auditlog.go create mode 100644 ipn/auditlog/auditlog_test.go diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 0a787a780..1c27fddea 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -814,6 +814,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/internal/client/tailscale from tailscale.com/cmd/k8s-operator tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ + tailscale.com/ipn/auditlog from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/conffile from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/desktop from tailscale.com/ipn/ipnlocal+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index c0f592ea1..026758a47 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -271,6 +271,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/hostinfo from tailscale.com/client/web+ tailscale.com/internal/noiseconn from tailscale.com/control/controlclient tailscale.com/ipn from tailscale.com/client/local+ + tailscale.com/ipn/auditlog from tailscale.com/ipn/ipnlocal+ tailscale.com/ipn/conffile from tailscale.com/cmd/tailscaled+ 💣 tailscale.com/ipn/desktop from tailscale.com/cmd/tailscaled+ 💣 tailscale.com/ipn/ipnauth from tailscale.com/ipn/ipnlocal+ diff --git a/control/controlclient/auto.go b/control/controlclient/auto.go index da123f8c4..e0168c19d 100644 --- a/control/controlclient/auto.go +++ b/control/controlclient/auto.go @@ -119,6 +119,7 @@ type Auto struct { updateCh chan struct{} // readable when we should inform the server of a change observer Observer // called to update Client status; always non-nil observerQueue execqueue.ExecQueue + shutdownFn func() // to be called prior to shutdown or nil unregisterHealthWatch func() @@ -189,6 +190,7 @@ func NewNoStart(opts Options) (_ *Auto, err error) { mapDone: make(chan struct{}), updateDone: make(chan struct{}), observer: opts.Observer, + shutdownFn: opts.Shutdown, } c.authCtx, c.authCancel = context.WithCancel(context.Background()) c.authCtx = sockstats.WithSockStats(c.authCtx, sockstats.LabelControlClientAuto, opts.Logf) @@ -755,6 +757,7 @@ func (c *Auto) Shutdown() { return } c.logf("client.Shutdown ...") + shutdownFn := c.shutdownFn direct := c.direct c.closed = true @@ -767,6 +770,10 @@ func (c *Auto) Shutdown() { c.unpauseWaiters = nil c.mu.Unlock() + if shutdownFn != nil { + shutdownFn() + } + c.unregisterHealthWatch() <-c.authDone <-c.mapDone diff --git a/control/controlclient/controlclient_test.go b/control/controlclient/controlclient_test.go index 6885b5851..f8882a4e7 100644 --- a/control/controlclient/controlclient_test.go +++ b/control/controlclient/controlclient_test.go @@ -4,6 +4,8 @@ package controlclient import ( + "errors" + "fmt" "io" "reflect" "slices" @@ -147,3 +149,42 @@ func TestCanSkipStatus(t *testing.T) { t.Errorf("Status fields = %q; this code was only written to handle fields %q", f, want) } } + +func TestRetryableErrors(t *testing.T) { + errorTests := []struct { + err error + want bool + }{ + {errNoNoiseClient, true}, + {errNoNodeKey, true}, + {fmt.Errorf("%w: %w", errNoNoiseClient, errors.New("no noise")), true}, + {fmt.Errorf("%w: %w", errHTTPPostFailure, errors.New("bad post")), true}, + {fmt.Errorf("%w: %w", errNoNodeKey, errors.New("not node key")), true}, + {errBadHTTPResponse(429, "too may requests"), true}, + {errBadHTTPResponse(500, "internal server eror"), true}, + {errBadHTTPResponse(502, "bad gateway"), true}, + {errBadHTTPResponse(503, "service unavailable"), true}, + {errBadHTTPResponse(504, "gateway timeout"), true}, + {errBadHTTPResponse(1234, "random error"), false}, + } + + for _, tt := range errorTests { + t.Run(tt.err.Error(), func(t *testing.T) { + if isRetryableErrorForTest(tt.err) != tt.want { + t.Fatalf("retriable: got %v, want %v", tt.err, tt.want) + } + }) + } +} + +type retryableForTest interface { + Retryable() bool +} + +func isRetryableErrorForTest(err error) bool { + var ae retryableForTest + if errors.As(err, &ae) { + return ae.Retryable() + } + return false +} diff --git a/control/controlclient/direct.go b/control/controlclient/direct.go index e7d1d25f8..68ab9ca17 100644 --- a/control/controlclient/direct.go +++ b/control/controlclient/direct.go @@ -156,6 +156,11 @@ type Options struct { // If we receive a new DialPlan from the server, this value will be // updated. DialPlan ControlDialPlanner + + // Shutdown is an optional function that will be called before client shutdown is + // attempted. It is used to allow the client to clean up any resources or complete any + // tasks that are dependent on a live client. + Shutdown func() } // ControlDialPlanner is the interface optionally supplied when creating a @@ -1662,11 +1667,11 @@ func (c *Auto) SetDeviceAttrs(ctx context.Context, attrs tailcfg.AttrUpdate) err func (c *Direct) SetDeviceAttrs(ctx context.Context, attrs tailcfg.AttrUpdate) error { nc, err := c.getNoiseClient() if err != nil { - return err + return fmt.Errorf("%w: %w", errNoNoiseClient, err) } nodeKey, ok := c.GetPersist().PublicNodeKeyOK() if !ok { - return errors.New("no node key") + return errNoNodeKey } if c.panicOnUse { panic("tainted client") @@ -1697,6 +1702,47 @@ func (c *Direct) SetDeviceAttrs(ctx context.Context, attrs tailcfg.AttrUpdate) e return nil } +// SendAuditLog implements [auditlog.Transport] by sending an audit log synchronously to the control plane. +// +// See docs on [tailcfg.AuditLogRequest] and [auditlog.Logger] for background. +func (c *Auto) SendAuditLog(ctx context.Context, auditLog tailcfg.AuditLogRequest) (err error) { + return c.direct.sendAuditLog(ctx, auditLog) +} + +func (c *Direct) sendAuditLog(ctx context.Context, auditLog tailcfg.AuditLogRequest) (err error) { + nc, err := c.getNoiseClient() + if err != nil { + return fmt.Errorf("%w: %w", errNoNoiseClient, err) + } + + nodeKey, ok := c.GetPersist().PublicNodeKeyOK() + if !ok { + return errNoNodeKey + } + + req := &tailcfg.AuditLogRequest{ + Version: tailcfg.CurrentCapabilityVersion, + NodeKey: nodeKey, + Action: auditLog.Action, + Details: auditLog.Details, + } + + if c.panicOnUse { + panic("tainted client") + } + + res, err := nc.post(ctx, "/machine/audit-log", nodeKey, req) + if err != nil { + return fmt.Errorf("%w: %w", errHTTPPostFailure, err) + } + defer res.Body.Close() + if res.StatusCode != 200 { + all, _ := io.ReadAll(res.Body) + return errBadHTTPResponse(res.StatusCode, string(all)) + } + return nil +} + func addLBHeader(req *http.Request, nodeKey key.NodePublic) { if !nodeKey.IsZero() { req.Header.Add(tailcfg.LBHeader, nodeKey.String()) diff --git a/control/controlclient/errors.go b/control/controlclient/errors.go new file mode 100644 index 000000000..9b4dab844 --- /dev/null +++ b/control/controlclient/errors.go @@ -0,0 +1,51 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package controlclient + +import ( + "errors" + "fmt" + "net/http" +) + +// apiResponseError is an error type that can be returned by controlclient +// api requests. +// +// It wraps an underlying error and a flag for clients to query if the +// error is retryable via the Retryable() method. +type apiResponseError struct { + err error + retryable bool +} + +// Error implements [error]. +func (e *apiResponseError) Error() string { + return e.err.Error() +} + +// Retryable reports whether the error is retryable. +func (e *apiResponseError) Retryable() bool { + return e.retryable +} + +func (e *apiResponseError) Unwrap() error { return e.err } + +var ( + errNoNodeKey = &apiResponseError{errors.New("no node key"), true} + errNoNoiseClient = &apiResponseError{errors.New("no noise client"), true} + errHTTPPostFailure = &apiResponseError{errors.New("http failure"), true} +) + +func errBadHTTPResponse(code int, msg string) error { + retryable := false + switch code { + case http.StatusTooManyRequests, + http.StatusInternalServerError, + http.StatusBadGateway, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout: + retryable = true + } + return &apiResponseError{fmt.Errorf("http error %d: %s", code, msg), retryable} +} diff --git a/ipn/auditlog/auditlog.go b/ipn/auditlog/auditlog.go new file mode 100644 index 000000000..30f39211f --- /dev/null +++ b/ipn/auditlog/auditlog.go @@ -0,0 +1,466 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package auditlog provides a mechanism for logging audit events. +package auditlog + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "sort" + "sync" + "time" + + "tailscale.com/ipn" + "tailscale.com/tailcfg" + "tailscale.com/types/logger" + "tailscale.com/util/rands" + "tailscale.com/util/set" +) + +// transaction represents an audit log that has not yet been sent to the control plane. +type transaction struct { + // EventID is the unique identifier for the event being logged. + // This is used on the client side only and is not sent to control. + EventID string `json:",omitempty"` + // Retries is the number of times the logger has attempted to send this log. + // This is used on the client side only and is not sent to control. + Retries int `json:",omitempty"` + + // Action is the action to be logged. It must correspond to a known action in the control plane. + Action tailcfg.ClientAuditAction `json:",omitempty"` + // Details is an opaque string specific to the action being logged. Empty strings may not + // be valid depending on the action being logged. + Details string `json:",omitempty"` + // TimeStamp is the time at which the audit log was generated on the node. + TimeStamp time.Time `json:",omitzero"` +} + +// Transport provides a means for a client to send audit logs to a consumer (typically the control plane). +type Transport interface { + // SendAuditLog sends an audit log to a consumer of audit logs. + // Errors should be checked with [IsRetryableError] for retryability. + SendAuditLog(context.Context, tailcfg.AuditLogRequest) error +} + +// LogStore provides a means for a [Logger] to persist logs to disk or memory. +type LogStore interface { + // Save saves the given data to a persistent store. Save will overwrite existing data + // for the given key. + save(key ipn.ProfileID, txns []*transaction) error + + // Load retrieves the data from a persistent store. Returns a nil slice and + // no error if no data exists for the given key. + load(key ipn.ProfileID) ([]*transaction, error) +} + +// Opts contains the configuration options for a [Logger]. +type Opts struct { + // RetryLimit is the maximum number of attempts the logger will make to send a log before giving up. + RetryLimit int + // Store is the persistent store used to save logs to disk. Must be non-nil. + Store LogStore + // Logf is the logger used to log messages from the audit logger. Must be non-nil. + Logf logger.Logf +} + +// IsRetryableError returns true if the given error is retryable +// See [controlclient.apiResponseError]. Potentially retryable errors implement the Retryable() method. +func IsRetryableError(err error) bool { + var retryable interface{ Retryable() bool } + return errors.As(err, &retryable) && retryable.Retryable() +} + +type backoffOpts struct { + min, max time.Duration + multiplier float64 +} + +// .5, 1, 2, 4, 8, 10, 10, 10, 10, 10... +var defaultBackoffOpts = backoffOpts{ + min: time.Millisecond * 500, + max: 10 * time.Second, + multiplier: 2, +} + +// Logger provides a queue-based mechanism for submitting audit logs to the control plane - or +// another suitable consumer. Logs are stored to disk and retried until they are successfully sent, +// or until they permanently fail. +// +// Each individual profile/controlclient tuple should construct and manage a unique [Logger] instance. +type Logger struct { + logf logger.Logf + retryLimit int // the maximum number of attempts to send a log before giving up. + flusher chan struct{} // channel used to signal a flush operation. + done chan struct{} // closed when the flush worker exits. + ctx context.Context // canceled when the logger is stopped. + ctxCancel context.CancelFunc // cancels ctx. + backoffOpts // backoff settings for retry operations. + + // mu protects the fields below. + mu sync.Mutex + store LogStore // persistent storage for unsent logs. + profileID ipn.ProfileID // empty if [Logger.SetProfileID] has not been called. + transport Transport // nil until [Logger.Start] is called. +} + +// NewLogger creates a new [Logger] with the given options. +func NewLogger(opts Opts) *Logger { + ctx, cancel := context.WithCancel(context.Background()) + + al := &Logger{ + retryLimit: opts.RetryLimit, + logf: logger.WithPrefix(opts.Logf, "auditlog: "), + store: opts.Store, + flusher: make(chan struct{}, 1), + done: make(chan struct{}), + ctx: ctx, + ctxCancel: cancel, + backoffOpts: defaultBackoffOpts, + } + al.logf("created") + return al +} + +// FlushAndStop synchronously flushes all pending logs and stops the audit logger. +// This will block until a final flush operation completes or context is done. +// If the logger is already stopped, this will return immediately. All unsent +// logs will be persisted to the store. +func (al *Logger) FlushAndStop(ctx context.Context) { + al.stop() + al.flush(ctx) +} + +// SetProfileID sets the profileID for the logger. This must be called before any logs can be enqueued. +// The profileID of a logger cannot be changed once set. +func (al *Logger) SetProfileID(profileID ipn.ProfileID) error { + al.mu.Lock() + defer al.mu.Unlock() + if al.profileID != "" { + return errors.New("profileID already set") + } + + al.profileID = profileID + return nil +} + +// Start starts the audit logger with the given transport. +// It returns an error if the logger is already started. +func (al *Logger) Start(t Transport) error { + al.mu.Lock() + defer al.mu.Unlock() + + if al.transport != nil { + return errors.New("already started") + } + + al.transport = t + pending, err := al.storedCountLocked() + if err != nil { + al.logf("[unexpected] failed to restore logs: %v", err) + } + go al.flushWorker() + if pending > 0 { + al.flushAsync() + } + return nil +} + +// ErrAuditLogStorageFailure is returned when the logger fails to persist logs to the store. +var ErrAuditLogStorageFailure = errors.New("audit log storage failure") + +// Enqueue queues an audit log to be sent to the control plane (or another suitable consumer/transport). +// This will return an error if the underlying store fails to save the log or we fail to generate a unique +// eventID for the log. +func (al *Logger) Enqueue(action tailcfg.ClientAuditAction, details string) error { + txn := &transaction{ + Action: action, + Details: details, + TimeStamp: time.Now(), + } + // Generate a suitably random eventID for the transaction. + txn.EventID = fmt.Sprint(txn.TimeStamp, rands.HexString(16)) + return al.enqueue(txn) +} + +// flushAsync requests an asynchronous flush. +// It is a no-op if a flush is already pending. +func (al *Logger) flushAsync() { + select { + case al.flusher <- struct{}{}: + default: + } +} + +func (al *Logger) flushWorker() { + defer close(al.done) + + var retryDelay time.Duration + retry := time.NewTimer(0) + retry.Stop() + + for { + select { + case <-al.ctx.Done(): + return + case <-al.flusher: + err := al.flush(al.ctx) + switch { + case errors.Is(err, context.Canceled): + // The logger was stopped, no need to retry. + return + case err != nil: + retryDelay = max(al.backoffOpts.min, min(retryDelay*time.Duration(al.backoffOpts.multiplier), al.backoffOpts.max)) + al.logf("retrying after %v, %v", retryDelay, err) + retry.Reset(retryDelay) + default: + retryDelay = 0 + retry.Stop() + } + case <-retry.C: + al.flushAsync() + } + } +} + +// flush attempts to send all pending logs to the control plane. +// l.mu must not be held. +func (al *Logger) flush(ctx context.Context) error { + al.mu.Lock() + pending, err := al.store.load(al.profileID) + t := al.transport + al.mu.Unlock() + + if err != nil { + // This will catch nil profileIDs + return fmt.Errorf("failed to restore pending logs: %w", err) + } + if len(pending) == 0 { + return nil + } + if t == nil { + return errors.New("no transport") + } + + complete, unsent := al.sendToTransport(ctx, pending, t) + al.markTransactionsDone(complete) + + al.mu.Lock() + defer al.mu.Unlock() + if err = al.appendToStoreLocked(unsent); err != nil { + al.logf("[unexpected] failed to persist logs: %v", err) + } + + if len(unsent) != 0 { + return fmt.Errorf("failed to send %d logs", len(unsent)) + } + + if len(complete) != 0 { + al.logf("complete %d audit log transactions", len(complete)) + } + return nil +} + +// sendToTransport sends all pending logs to the control plane. Returns a pair of slices +// containing the logs that were successfully sent (or failed permanently) and those that were not. +// +// This may require multiple round trips to the control plane and can be a long running transaction. +func (al *Logger) sendToTransport(ctx context.Context, pending []*transaction, t Transport) (complete []*transaction, unsent []*transaction) { + for i, txn := range pending { + req := tailcfg.AuditLogRequest{ + Action: tailcfg.ClientAuditAction(txn.Action), + Details: txn.Details, + Timestamp: txn.TimeStamp, + } + + if err := t.SendAuditLog(ctx, req); err != nil { + switch { + case errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded): + // The contex is done. All further attempts will fail. + unsent = append(unsent, pending[i:]...) + return complete, unsent + case IsRetryableError(err) && txn.Retries+1 < al.retryLimit: + // We permit a maximum number of retries for each log. All retriable + // errors should be transient and we should be able to send the log eventually, but + // we don't want logs to be persisted indefinitely. + txn.Retries++ + unsent = append(unsent, txn) + default: + complete = append(complete, txn) + al.logf("failed permanently: %v", err) + } + } else { + // No error - we're done. + complete = append(complete, txn) + } + } + + return complete, unsent +} + +func (al *Logger) stop() { + al.mu.Lock() + t := al.transport + al.mu.Unlock() + + if t == nil { + // No transport means no worker goroutine and done will not be + // closed if we cancel the context. + return + } + + al.ctxCancel() + <-al.done + al.logf("stopped for profileID: %v", al.profileID) +} + +// appendToStoreLocked persists logs to the store. This will deduplicate +// logs so it is safe to call this with the same logs multiple time, to +// requeue failed transactions for example. +// +// l.mu must be held. +func (al *Logger) appendToStoreLocked(txns []*transaction) error { + if len(txns) == 0 { + return nil + } + + if al.profileID == "" { + return errors.New("no logId set") + } + + persisted, err := al.store.load(al.profileID) + if err != nil { + al.logf("[unexpected] append failed to restore logs: %v", err) + } + + // The order is important here. We want the latest transactions first, which will + // ensure when we dedup, the new transactions are seen and the older transactions + // are discarded. + txnsOut := append(txns, persisted...) + txnsOut = deduplicateAndSort(txnsOut) + + return al.store.save(al.profileID, txnsOut) +} + +// storedCountLocked returns the number of logs persisted to the store. +// al.mu must be held. +func (al *Logger) storedCountLocked() (int, error) { + persisted, err := al.store.load(al.profileID) + return len(persisted), err +} + +// markTransactionsDone removes logs from the store that are complete (sent or failed permanently). +// al.mu must not be held. +func (al *Logger) markTransactionsDone(sent []*transaction) { + al.mu.Lock() + defer al.mu.Unlock() + + ids := set.Set[string]{} + for _, txn := range sent { + ids.Add(txn.EventID) + } + + persisted, err := al.store.load(al.profileID) + if err != nil { + al.logf("[unexpected] markTransactionsDone failed to restore logs: %v", err) + } + var unsent []*transaction + for _, txn := range persisted { + if !ids.Contains(txn.EventID) { + unsent = append(unsent, txn) + } + } + al.store.save(al.profileID, unsent) +} + +// deduplicateAndSort removes duplicate logs from the given slice and sorts them by timestamp. +// The first log entry in the slice will be retained, subsequent logs with the same EventID will be discarded. +func deduplicateAndSort(txns []*transaction) []*transaction { + seen := set.Set[string]{} + deduped := make([]*transaction, 0, len(txns)) + for _, txn := range txns { + if !seen.Contains(txn.EventID) { + deduped = append(deduped, txn) + seen.Add(txn.EventID) + } + } + // Sort logs by timestamp - oldest to newest. This will put the oldest logs at + // the front of the queue. + sort.Slice(deduped, func(i, j int) bool { + return deduped[i].TimeStamp.Before(deduped[j].TimeStamp) + }) + return deduped +} + +func (al *Logger) enqueue(txn *transaction) error { + al.mu.Lock() + defer al.mu.Unlock() + + if err := al.appendToStoreLocked([]*transaction{txn}); err != nil { + return fmt.Errorf("%w: %w", ErrAuditLogStorageFailure, err) + } + + // If a.transport is nil if the logger is stopped. + if al.transport != nil { + al.flushAsync() + } + + return nil +} + +var _ LogStore = (*logStateStore)(nil) + +// logStateStore is a concrete implementation of [LogStore] +// using [ipn.StateStore] as the underlying storage. +type logStateStore struct { + store ipn.StateStore +} + +// NewLogStore creates a new LogStateStore with the given [ipn.StateStore]. +func NewLogStore(store ipn.StateStore) LogStore { + return &logStateStore{ + store: store, + } +} + +func (s *logStateStore) generateKey(key ipn.ProfileID) string { + return "auditlog-" + string(key) +} + +// Save saves the given logs to an [ipn.StateStore]. This overwrites +// any existing entries for the given key. +func (s *logStateStore) save(key ipn.ProfileID, txns []*transaction) error { + if key == "" { + return errors.New("empty key") + } + + data, err := json.Marshal(txns) + if err != nil { + return err + } + k := ipn.StateKey(s.generateKey(key)) + return s.store.WriteState(k, data) +} + +// Load retrieves the logs from an [ipn.StateStore]. +func (s *logStateStore) load(key ipn.ProfileID) ([]*transaction, error) { + if key == "" { + return nil, errors.New("empty key") + } + + k := ipn.StateKey(s.generateKey(key)) + data, err := s.store.ReadState(k) + + switch { + case errors.Is(err, ipn.ErrStateNotExist): + return nil, nil + case err != nil: + return nil, err + } + + var txns []*transaction + err = json.Unmarshal(data, &txns) + return txns, err +} diff --git a/ipn/auditlog/auditlog_test.go b/ipn/auditlog/auditlog_test.go new file mode 100644 index 000000000..3d3bf95cb --- /dev/null +++ b/ipn/auditlog/auditlog_test.go @@ -0,0 +1,481 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package auditlog + +import ( + "context" + "errors" + "fmt" + "sync" + "testing" + "time" + + qt "github.com/frankban/quicktest" + "tailscale.com/ipn/store/mem" + "tailscale.com/tailcfg" + "tailscale.com/tstest" +) + +// loggerForTest creates an auditLogger for you and cleans it up +// (and ensures no goroutines are leaked) when the test is done. +func loggerForTest(t *testing.T, opts Opts) *Logger { + t.Helper() + tstest.ResourceCheck(t) + + if opts.Logf == nil { + opts.Logf = t.Logf + } + + if opts.Store == nil { + t.Fatalf("opts.Store must be set") + } + + a := NewLogger(opts) + + t.Cleanup(func() { + a.FlushAndStop(context.Background()) + }) + return a +} + +func TestNonRetryableErrors(t *testing.T) { + errorTests := []struct { + desc string + err error + want bool + }{ + {"DeadlineExceeded", context.DeadlineExceeded, false}, + {"Canceled", context.Canceled, false}, + {"Canceled wrapped", fmt.Errorf("%w: %w", context.Canceled, errors.New("ctx cancelled")), false}, + {"Random error", errors.New("random error"), false}, + } + + for _, tt := range errorTests { + t.Run(tt.desc, func(t *testing.T) { + if IsRetryableError(tt.err) != tt.want { + t.Fatalf("retriable: got %v, want %v", !tt.want, tt.want) + } + }) + } +} + +// TestEnqueueAndFlush enqueues n logs and flushes them. +// We expect all logs to be flushed and for no +// logs to remain in the store once FlushAndStop returns. +func TestEnqueueAndFlush(t *testing.T) { + c := qt.New(t) + mockTransport := newMockTransport(nil) + al := loggerForTest(t, Opts{ + RetryLimit: 200, + Logf: t.Logf, + Store: NewLogStore(&mem.Store{}), + }) + + c.Assert(al.SetProfileID("test"), qt.IsNil) + c.Assert(al.Start(mockTransport), qt.IsNil) + + wantSent := 10 + + for i := range wantSent { + err := al.Enqueue(tailcfg.AuditNodeDisconnect, fmt.Sprintf("log %d", i)) + c.Assert(err, qt.IsNil) + } + + al.FlushAndStop(context.Background()) + + al.mu.Lock() + defer al.mu.Unlock() + gotStored, err := al.storedCountLocked() + c.Assert(err, qt.IsNil) + + if wantStored := 0; gotStored != wantStored { + t.Fatalf("stored: got %d, want %d", gotStored, wantStored) + } + + if gotSent := mockTransport.sentCount(); gotSent != wantSent { + t.Fatalf("sent: got %d, want %d", gotSent, wantSent) + } +} + +// TestEnqueueAndFlushWithFlushCancel calls FlushAndCancel with a cancelled +// context. We expect nothing to be sent and all logs to be stored. +func TestEnqueueAndFlushWithFlushCancel(t *testing.T) { + c := qt.New(t) + mockTransport := newMockTransport(&retriableError) + al := loggerForTest(t, Opts{ + RetryLimit: 200, + Logf: t.Logf, + Store: NewLogStore(&mem.Store{}), + }) + + c.Assert(al.SetProfileID("test"), qt.IsNil) + c.Assert(al.Start(mockTransport), qt.IsNil) + + for i := range 10 { + err := al.Enqueue(tailcfg.AuditNodeDisconnect, fmt.Sprintf("log %d", i)) + c.Assert(err, qt.IsNil) + } + + // Cancel the context before calling FlushAndStop - nothing should get sent. + // This mimics a timeout before flush() has a chance to execute. + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + al.FlushAndStop(ctx) + + al.mu.Lock() + defer al.mu.Unlock() + gotStored, err := al.storedCountLocked() + c.Assert(err, qt.IsNil) + + if wantStored := 10; gotStored != wantStored { + t.Fatalf("stored: got %d, want %d", gotStored, wantStored) + } + + if gotSent, wantSent := mockTransport.sentCount(), 0; gotSent != wantSent { + t.Fatalf("sent: got %d, want %d", gotSent, wantSent) + } +} + +// TestDeduplicateAndSort tests that the most recent log is kept when deduplicating logs +func TestDeduplicateAndSort(t *testing.T) { + c := qt.New(t) + al := loggerForTest(t, Opts{ + RetryLimit: 100, + Logf: t.Logf, + Store: NewLogStore(&mem.Store{}), + }) + + c.Assert(al.SetProfileID("test"), qt.IsNil) + + logs := []*transaction{ + {EventID: "1", Details: "log 1", TimeStamp: time.Now().Add(-time.Minute * 1), Retries: 1}, + } + + al.mu.Lock() + defer al.mu.Unlock() + al.appendToStoreLocked(logs) + + // Update the transaction and re-append it + logs[0].Retries = 2 + al.appendToStoreLocked(logs) + + fromStore, err := al.store.load("test") + c.Assert(err, qt.IsNil) + + // We should see only one transaction + if wantStored, gotStored := len(logs), len(fromStore); gotStored != wantStored { + t.Fatalf("stored: got %d, want %d", gotStored, wantStored) + } + + // We should see the latest transaction + if wantRetryCount, gotRetryCount := 2, fromStore[0].Retries; gotRetryCount != wantRetryCount { + t.Fatalf("reties: got %d, want %d", gotRetryCount, wantRetryCount) + } +} + +func TestChangeProfileId(t *testing.T) { + c := qt.New(t) + al := loggerForTest(t, Opts{ + RetryLimit: 100, + Logf: t.Logf, + Store: NewLogStore(&mem.Store{}), + }) + c.Assert(al.SetProfileID("test"), qt.IsNil) + + // Changing a profile ID must fail + c.Assert(al.SetProfileID("test"), qt.IsNotNil) +} + +// TestSendOnRestore pushes a n logs to the persistent store, and ensures they +// are sent as soon as Start is called then checks to ensure the sent logs no +// longer exist in the store. +func TestSendOnRestore(t *testing.T) { + c := qt.New(t) + mockTransport := newMockTransport(nil) + al := loggerForTest(t, Opts{ + RetryLimit: 100, + Logf: t.Logf, + Store: NewLogStore(&mem.Store{}), + }) + al.SetProfileID("test") + + wantTotal := 10 + + for range 10 { + al.Enqueue(tailcfg.AuditNodeDisconnect, "log") + } + + c.Assert(al.Start(mockTransport), qt.IsNil) + + al.FlushAndStop(context.Background()) + + al.mu.Lock() + defer al.mu.Unlock() + gotStored, err := al.storedCountLocked() + c.Assert(err, qt.IsNil) + + if wantStored := 0; gotStored != wantStored { + t.Fatalf("stored: got %d, want %d", gotStored, wantStored) + } + + if gotSent, wantSent := mockTransport.sentCount(), wantTotal; gotSent != wantSent { + t.Fatalf("sent: got %d, want %d", gotSent, wantSent) + } +} + +// TestFailureExhaustion enqueues n logs, with the transport in a failable state. +// We then set it to a non-failing state, call FlushAndStop and expect all logs to be sent. +func TestFailureExhaustion(t *testing.T) { + c := qt.New(t) + mockTransport := newMockTransport(&retriableError) + + al := loggerForTest(t, Opts{ + RetryLimit: 1, + Logf: t.Logf, + Store: NewLogStore(&mem.Store{}), + }) + + c.Assert(al.SetProfileID("test"), qt.IsNil) + c.Assert(al.Start(mockTransport), qt.IsNil) + + for range 10 { + err := al.Enqueue(tailcfg.AuditNodeDisconnect, "log") + c.Assert(err, qt.IsNil) + } + + al.FlushAndStop(context.Background()) + al.mu.Lock() + defer al.mu.Unlock() + gotStored, err := al.storedCountLocked() + c.Assert(err, qt.IsNil) + + if wantStored := 0; gotStored != wantStored { + t.Fatalf("stored: got %d, want %d", gotStored, wantStored) + } + + if gotSent, wantSent := mockTransport.sentCount(), 0; gotSent != wantSent { + t.Fatalf("sent: got %d, want %d", gotSent, wantSent) + } +} + +// TestEnqueueAndFailNoRetry enqueues a set of logs, all of which will fail and are not +// retriable. We then call FlushAndStop and expect all to be unsent. +func TestEnqueueAndFailNoRetry(t *testing.T) { + c := qt.New(t) + mockTransport := newMockTransport(&nonRetriableError) + + al := loggerForTest(t, Opts{ + RetryLimit: 100, + Logf: t.Logf, + Store: NewLogStore(&mem.Store{}), + }) + + c.Assert(al.SetProfileID("test"), qt.IsNil) + c.Assert(al.Start(mockTransport), qt.IsNil) + + for i := range 10 { + err := al.Enqueue(tailcfg.AuditNodeDisconnect, fmt.Sprintf("log %d", i)) + c.Assert(err, qt.IsNil) + } + + al.FlushAndStop(context.Background()) + al.mu.Lock() + defer al.mu.Unlock() + gotStored, err := al.storedCountLocked() + c.Assert(err, qt.IsNil) + + if wantStored := 0; gotStored != wantStored { + t.Fatalf("stored: got %d, want %d", gotStored, wantStored) + } + + if gotSent, wantSent := mockTransport.sentCount(), 0; gotSent != wantSent { + t.Fatalf("sent: got %d, want %d", gotSent, wantSent) + } +} + +// TestEnqueueAndRetry enqueues a set of logs, all of which will fail and are retriable. +// Mid-test, we set the transport to not-fail and expect the queue to flush properly +// We set the backoff parameters to 0 seconds so retries are immediate. +func TestEnqueueAndRetry(t *testing.T) { + c := qt.New(t) + mockTransport := newMockTransport(&retriableError) + + al := loggerForTest(t, Opts{ + RetryLimit: 100, + Logf: t.Logf, + Store: NewLogStore(&mem.Store{}), + }) + + al.backoffOpts = backoffOpts{ + min: 1 * time.Millisecond, + max: 4 * time.Millisecond, + multiplier: 2.0, + } + + c.Assert(al.SetProfileID("test"), qt.IsNil) + c.Assert(al.Start(mockTransport), qt.IsNil) + + err := al.Enqueue(tailcfg.AuditNodeDisconnect, fmt.Sprintf("log 1")) + c.Assert(err, qt.IsNil) + + // This will wait for at least 2 retries + gotRetried, wantRetried := mockTransport.waitForSendAttemptsToReach(3), true + if gotRetried != wantRetried { + t.Fatalf("retried: got %v, want %v", gotRetried, wantRetried) + } + + mockTransport.setErrorCondition(nil) + + al.FlushAndStop(context.Background()) + al.mu.Lock() + defer al.mu.Unlock() + + gotStored, err := al.storedCountLocked() + c.Assert(err, qt.IsNil) + + if wantStored := 0; gotStored != wantStored { + t.Fatalf("stored: got %d, want %d", gotStored, wantStored) + } + + if gotSent, wantSent := mockTransport.sentCount(), 1; gotSent != wantSent { + t.Fatalf("sent: got %d, want %d", gotSent, wantSent) + } +} + +// TestEnqueueBeforeSetProfileID tests that logs enqueued before SetProfileId are not sent +func TestEnqueueBeforeSetProfileID(t *testing.T) { + c := qt.New(t) + al := loggerForTest(t, Opts{ + RetryLimit: 100, + Logf: t.Logf, + Store: NewLogStore(&mem.Store{}), + }) + + err := al.Enqueue(tailcfg.AuditNodeDisconnect, "log") + c.Assert(err, qt.IsNotNil) + al.FlushAndStop(context.Background()) + + al.mu.Lock() + defer al.mu.Unlock() + gotStored, err := al.storedCountLocked() + c.Assert(err, qt.IsNotNil) + + if wantStored := 0; gotStored != wantStored { + t.Fatalf("stored: got %d, want %d", gotStored, wantStored) + } +} + +// TestLogStoring tests that audit logs are persisted sorted by timestamp, oldest to newest +func TestLogSorting(t *testing.T) { + c := qt.New(t) + mockStore := NewLogStore(&mem.Store{}) + + logs := []*transaction{ + {EventID: "1", Details: "log 3", TimeStamp: time.Now().Add(-time.Minute * 1)}, + {EventID: "1", Details: "log 3", TimeStamp: time.Now().Add(-time.Minute * 2)}, + {EventID: "2", Details: "log 2", TimeStamp: time.Now().Add(-time.Minute * 3)}, + {EventID: "3", Details: "log 1", TimeStamp: time.Now().Add(-time.Minute * 4)}, + } + + wantLogs := []transaction{ + {Details: "log 1"}, + {Details: "log 2"}, + {Details: "log 3"}, + } + + mockStore.save("test", logs) + + gotLogs, err := mockStore.load("test") + c.Assert(err, qt.IsNil) + gotLogs = deduplicateAndSort(gotLogs) + + for i := range gotLogs { + if want, got := wantLogs[i].Details, gotLogs[i].Details; want != got { + t.Fatalf("Details: got %v, want %v", got, want) + } + } +} + +// mock implementations for testing + +// newMockTransport returns a mock transport for testing +// If err is no nil, SendAuditLog will return this error if the send is attempted +// before the context is cancelled. +func newMockTransport(err error) *mockAuditLogTransport { + return &mockAuditLogTransport{ + err: err, + attempts: make(chan int, 1), + } +} + +type mockAuditLogTransport struct { + attempts chan int // channel to notify of send attempts + + mu sync.Mutex + sendAttmpts int // number of attempts to send logs + sendCount int // number of logs sent by the transport + err error // error to return when sending logs +} + +// waitForSendAttemptsToReach blocks until the number of send attempts reaches n +// This should be use only in tests where the transport is expected to retry sending logs +func (t *mockAuditLogTransport) waitForSendAttemptsToReach(n int) bool { + for attempts := range t.attempts { + if attempts >= n { + return true + } + } + return false +} + +func (t *mockAuditLogTransport) setErrorCondition(err error) { + t.mu.Lock() + defer t.mu.Unlock() + t.err = err +} + +func (t *mockAuditLogTransport) sentCount() int { + t.mu.Lock() + defer t.mu.Unlock() + return t.sendCount +} + +func (t *mockAuditLogTransport) SendAuditLog(ctx context.Context, _ tailcfg.AuditLogRequest) (err error) { + t.mu.Lock() + t.sendAttmpts += 1 + defer func() { + a := t.sendAttmpts + t.mu.Unlock() + select { + case t.attempts <- a: + default: + } + }() + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if t.err != nil { + return t.err + } + t.sendCount += 1 + return nil +} + +var ( + retriableError = mockError{errors.New("retriable error")} + nonRetriableError = mockError{errors.New("permanent failure error")} +) + +type mockError struct { + error +} + +func (e mockError) Retryable() bool { + return e == retriableError +} diff --git a/ipn/ipnauth/actor.go b/ipn/ipnauth/actor.go index 8a0e77645..108bdd341 100644 --- a/ipn/ipnauth/actor.go +++ b/ipn/ipnauth/actor.go @@ -10,12 +10,11 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/ipn" + "tailscale.com/tailcfg" ) // AuditLogFunc is any function that can be used to log audit actions performed by an [Actor]. -// -// TODO(nickkhyl,barnstar): define a named string type for the action (in tailcfg?) and use it here. -type AuditLogFunc func(action, details string) +type AuditLogFunc func(action tailcfg.ClientAuditAction, details string) error // Actor is any actor using the [ipnlocal.LocalBackend]. // @@ -45,7 +44,7 @@ type Actor interface { // // If the auditLogger is non-nil, it is used to write details about the action // to the audit log when required by the policy. - CheckProfileAccess(profile ipn.LoginProfileView, requestedAccess ProfileAccess, auditLogger AuditLogFunc) error + CheckProfileAccess(profile ipn.LoginProfileView, requestedAccess ProfileAccess, auditLogFn AuditLogFunc) error // IsLocalSystem reports whether the actor is the Windows' Local System account. // diff --git a/ipn/ipnauth/policy.go b/ipn/ipnauth/policy.go index f09be0fcb..aa4ec4100 100644 --- a/ipn/ipnauth/policy.go +++ b/ipn/ipnauth/policy.go @@ -9,6 +9,7 @@ import ( "tailscale.com/client/tailscale/apitype" "tailscale.com/ipn" + "tailscale.com/tailcfg" "tailscale.com/util/syspolicy" ) @@ -48,7 +49,7 @@ func (a actorWithPolicyChecks) CheckProfileAccess(profile ipn.LoginProfileView, // // TODO(nickkhyl): unexport it when we move [ipn.Actor] implementations from [ipnserver] // and corp to this package. -func CheckDisconnectPolicy(actor Actor, profile ipn.LoginProfileView, reason string, auditLogger AuditLogFunc) error { +func CheckDisconnectPolicy(actor Actor, profile ipn.LoginProfileView, reason string, auditFn AuditLogFunc) error { if alwaysOn, _ := syspolicy.GetBoolean(syspolicy.AlwaysOn, false); !alwaysOn { return nil } @@ -58,15 +59,16 @@ func CheckDisconnectPolicy(actor Actor, profile ipn.LoginProfileView, reason str if reason == "" { return errors.New("disconnect not allowed: reason required") } - if auditLogger != nil { + if auditFn != nil { var details string if username, _ := actor.Username(); username != "" { // best-effort; we don't have it on all platforms details = fmt.Sprintf("%q is being disconnected by %q: %v", profile.Name(), username, reason) } else { details = fmt.Sprintf("%q is being disconnected: %v", profile.Name(), reason) } - // TODO(nickkhyl,barnstar): use a const for DISCONNECT_NODE. - auditLogger("DISCONNECT_NODE", details) + if err := auditFn(tailcfg.AuditNodeDisconnect, details); err != nil { + return err + } } return nil } diff --git a/ipn/ipnlocal/local.go b/ipn/ipnlocal/local.go index e9f263996..f866527d1 100644 --- a/ipn/ipnlocal/local.go +++ b/ipn/ipnlocal/local.go @@ -57,10 +57,12 @@ import ( "tailscale.com/health/healthmsg" "tailscale.com/hostinfo" "tailscale.com/ipn" + "tailscale.com/ipn/auditlog" "tailscale.com/ipn/conffile" "tailscale.com/ipn/ipnauth" "tailscale.com/ipn/ipnstate" "tailscale.com/ipn/policy" + memstore "tailscale.com/ipn/store/mem" "tailscale.com/log/sockstatlog" "tailscale.com/logpolicy" "tailscale.com/net/captivedetection" @@ -450,6 +452,12 @@ type LocalBackend struct { // Each callback is called exactly once in unspecified order and without b.mu held. // Returned errors are logged but otherwise ignored and do not affect the shutdown process. shutdownCbs set.HandleSet[func() error] + + // auditLogger, if non-nil, manages audit logging for the backend. + // + // It queues, persists, and sends audit logs + // to the control client. auditLogger has the same lifespan as b.cc. + auditLogger *auditlog.Logger } // HealthTracker returns the health tracker for the backend. @@ -1679,6 +1687,15 @@ func (b *LocalBackend) SetControlClientStatus(c controlclient.Client, st control b.logf("Failed to save new controlclient state: %v", err) } } + + // Update the audit logger with the current profile ID. + if b.auditLogger != nil && prefsChanged { + pid := b.pm.CurrentProfile().ID() + if err := b.auditLogger.SetProfileID(pid); err != nil { + b.logf("Failed to set profile ID in audit logger: %v", err) + } + } + // initTKALocked is dependent on CurrentProfile.ID, which is initialized // (for new profiles) on the first call to b.pm.SetPrefs. if err := b.initTKALocked(); err != nil { @@ -2386,6 +2403,27 @@ func (b *LocalBackend) Start(opts ipn.Options) error { debugFlags = append([]string{"netstack"}, debugFlags...) } + var auditLogShutdown func() + // Audit logging is only available if the client has set up a proper persistent + // store for the logs in sys. + store, ok := b.sys.AuditLogStore.GetOK() + if !ok { + b.logf("auditlog: [unexpected] no persistent audit log storage configured. using memory store.") + store = auditlog.NewLogStore(&memstore.Store{}) + } + + al := auditlog.NewLogger(auditlog.Opts{ + Logf: b.logf, + RetryLimit: 32, + Store: store, + }) + b.auditLogger = al + auditLogShutdown = func() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + al.FlushAndStop(ctx) + } + // TODO(apenwarr): The only way to change the ServerURL is to // re-run b.Start, because this is the only place we create a // new controlclient. EditPrefs allows you to overwrite ServerURL, @@ -2411,6 +2449,7 @@ func (b *LocalBackend) Start(opts ipn.Options) error { C2NHandler: http.HandlerFunc(b.handleC2N), DialPlan: &b.dialPlan, // pointer because it can't be copied ControlKnobs: b.sys.ControlKnobs(), + Shutdown: auditLogShutdown, // Don't warn about broken Linux IP forwarding when // netstack is being used. @@ -4263,6 +4302,21 @@ func (b *LocalBackend) MaybeClearAppConnector(mp *ipn.MaskedPrefs) error { return err } +var errNoAuditLogger = errors.New("no audit logger configured") + +func (b *LocalBackend) getAuditLoggerLocked() ipnauth.AuditLogFunc { + logger := b.auditLogger + return func(action tailcfg.ClientAuditAction, details string) error { + if logger == nil { + return errNoAuditLogger + } + if err := logger.Enqueue(action, details); err != nil { + return fmt.Errorf("failed to enqueue audit log %v %q: %w", action, details, err) + } + return nil + } +} + // EditPrefs applies the changes in mp to the current prefs, // acting as the tailscaled itself rather than a specific user. func (b *LocalBackend) EditPrefs(mp *ipn.MaskedPrefs) (ipn.PrefsView, error) { @@ -4288,9 +4342,8 @@ func (b *LocalBackend) EditPrefsAs(mp *ipn.MaskedPrefs, actor ipnauth.Actor) (ip unlock := b.lockAndGetUnlock() defer unlock() if mp.WantRunningSet && !mp.WantRunning && b.pm.CurrentPrefs().WantRunning() { - // TODO(barnstar,nickkhyl): replace loggerFn with the actual audit logger. - loggerFn := func(action, details string) { b.logf("[audit]: %s: %s", action, details) } - if err := actor.CheckProfileAccess(b.pm.CurrentProfile(), ipnauth.Disconnect, loggerFn); err != nil { + if err := actor.CheckProfileAccess(b.pm.CurrentProfile(), ipnauth.Disconnect, b.getAuditLoggerLocked()); err != nil { + b.logf("check profile access failed: %v", err) return ipn.PrefsView{}, err } @@ -5874,6 +5927,15 @@ func (b *LocalBackend) requestEngineStatusAndWait() { func (b *LocalBackend) setControlClientLocked(cc controlclient.Client) { b.cc = cc b.ccAuto, _ = cc.(*controlclient.Auto) + if b.auditLogger != nil { + if err := b.auditLogger.SetProfileID(b.pm.CurrentProfile().ID()); err != nil { + b.logf("audit logger set profile ID failure: %v", err) + } + + if err := b.auditLogger.Start(b.ccAuto); err != nil { + b.logf("audit logger start failure: %v", err) + } + } } // resetControlClientLocked sets b.cc to nil and returns the old value. If the diff --git a/tailcfg/tailcfg.go b/tailcfg/tailcfg.go index 7556ba3d0..83fab9c97 100644 --- a/tailcfg/tailcfg.go +++ b/tailcfg/tailcfg.go @@ -2982,3 +2982,33 @@ const LBHeader = "Ts-Lb" // correspond to those IPs. Any services that don't correspond to a service // this client is hosting can be ignored. type ServiceIPMappings map[ServiceName][]netip.Addr + +// ClientAuditAction represents an auditable action that a client can report to the +// control plane. These actions must correspond to the supported actions +// in the control plane. +type ClientAuditAction string + +const ( + // AuditNodeDisconnect action is sent when a node has disconnected + // from the control plane. The details must include a reason in the Details + // field, either generated, or entered by the user. + AuditNodeDisconnect = ClientAuditAction("DISCONNECT_NODE") +) + +// AuditLogRequest represents an audit log request to be sent to the control plane. +// +// This is JSON-encoded and sent over the control plane connection to: +// POST https:///machine/audit-log +type AuditLogRequest struct { + // Version is the client's current CapabilityVersion. + Version CapabilityVersion `json:",omitempty"` + // NodeKey is the client's current node key. + NodeKey key.NodePublic `json:",omitzero"` + // Action is the action to be logged. It must correspond to a known action in the control plane. + Action ClientAuditAction `json:",omitempty"` + // Details is an opaque string, specific to the action being logged. Empty strings may not + // be valid depending on the action being logged. + Details string `json:",omitempty"` + // Timestamp is the time at which the audit log was generated on the node. + Timestamp time.Time `json:",omitzero"` +} diff --git a/tsd/tsd.go b/tsd/tsd.go index 1d1f35017..9ab35af55 100644 --- a/tsd/tsd.go +++ b/tsd/tsd.go @@ -25,6 +25,7 @@ import ( "tailscale.com/drive" "tailscale.com/health" "tailscale.com/ipn" + "tailscale.com/ipn/auditlog" "tailscale.com/ipn/conffile" "tailscale.com/ipn/desktop" "tailscale.com/net/dns" @@ -50,6 +51,7 @@ type System struct { Router SubSystem[router.Router] Tun SubSystem[*tstun.Wrapper] StateStore SubSystem[ipn.StateStore] + AuditLogStore SubSystem[auditlog.LogStore] Netstack SubSystem[NetstackImpl] // actually a *netstack.Impl DriveForLocal SubSystem[drive.FileSystemForLocal] DriveForRemote SubSystem[drive.FileSystemForRemote] @@ -106,6 +108,8 @@ func (s *System) Set(v any) { s.MagicSock.Set(v) case ipn.StateStore: s.StateStore.Set(v) + case auditlog.LogStore: + s.AuditLogStore.Set(v) case NetstackImpl: s.Netstack.Set(v) case drive.FileSystemForLocal: From 640b2fa3aebc6abf5ba4efb01f053b290886991c Mon Sep 17 00:00:00 2001 From: Andrew Dunham Date: Wed, 12 Mar 2025 17:04:57 -0400 Subject: [PATCH 64/87] net/netmon, wgengine/magicsock: be quieter with portmapper logs This adds a new helper to the netmon package that allows us to rate-limit log messages, so that they only print once per (major) LinkChange event. We then use this when constructing the portmapper, so that we don't keep spamming logs forever on the same network. Updates #13145 Signed-off-by: Andrew Dunham Change-Id: I6e7162509148abea674f96efd76be9dffb373ae4 --- net/netmon/loghelper.go | 42 ++++++++++++++++++ net/netmon/loghelper_test.go | 78 +++++++++++++++++++++++++++++++++ wgengine/magicsock/magicsock.go | 12 ++++- 3 files changed, 131 insertions(+), 1 deletion(-) create mode 100644 net/netmon/loghelper.go create mode 100644 net/netmon/loghelper_test.go diff --git a/net/netmon/loghelper.go b/net/netmon/loghelper.go new file mode 100644 index 000000000..824faeef0 --- /dev/null +++ b/net/netmon/loghelper.go @@ -0,0 +1,42 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package netmon + +import ( + "sync" + + "tailscale.com/types/logger" +) + +// LinkChangeLogLimiter returns a new [logger.Logf] that logs each unique +// format string to the underlying logger only once per major LinkChange event. +// +// The returned function should be called when the logger is no longer needed, +// to release resources from the Monitor. +func LinkChangeLogLimiter(logf logger.Logf, nm *Monitor) (_ logger.Logf, unregister func()) { + var formatSeen sync.Map // map[string]bool + unregister = nm.RegisterChangeCallback(func(cd *ChangeDelta) { + // If we're in a major change or a time jump, clear the seen map. + if cd.Major || cd.TimeJumped { + formatSeen.Clear() + } + }) + + return func(format string, args ...any) { + // We only store 'true' in the map, so if it's present then it + // means we've already logged this format string. + _, loaded := formatSeen.LoadOrStore(format, true) + if loaded { + // TODO(andrew-d): we may still want to log this + // message every N minutes (1x/hour?) even if it's been + // seen, so that debugging doesn't require searching + // back in the logs for an unbounded amount of time. + // + // See: https://github.com/tailscale/tailscale/issues/13145 + return + } + + logf(format, args...) + }, unregister +} diff --git a/net/netmon/loghelper_test.go b/net/netmon/loghelper_test.go new file mode 100644 index 000000000..31777f4bc --- /dev/null +++ b/net/netmon/loghelper_test.go @@ -0,0 +1,78 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package netmon + +import ( + "bytes" + "fmt" + "testing" +) + +func TestLinkChangeLogLimiter(t *testing.T) { + mon, err := New(t.Logf) + if err != nil { + t.Fatal(err) + } + defer mon.Close() + + var logBuffer bytes.Buffer + logf := func(format string, args ...any) { + t.Logf("captured log: "+format, args...) + + if format[len(format)-1] != '\n' { + format += "\n" + } + fmt.Fprintf(&logBuffer, format, args...) + } + + logf, unregister := LinkChangeLogLimiter(logf, mon) + defer unregister() + + // Log once, which should write to our log buffer. + logf("hello %s", "world") + if got := logBuffer.String(); got != "hello world\n" { + t.Errorf("unexpected log buffer contents: %q", got) + } + + // Log again, which should not write to our log buffer. + logf("hello %s", "andrew") + if got := logBuffer.String(); got != "hello world\n" { + t.Errorf("unexpected log buffer contents: %q", got) + } + + // Log a different message, which should write to our log buffer. + logf("other message") + if got := logBuffer.String(); got != "hello world\nother message\n" { + t.Errorf("unexpected log buffer contents: %q", got) + } + + // Synthesize a fake major change event, which should clear the format + // string cache and allow the next log to write to our log buffer. + // + // InjectEvent doesn't work because it's not a major event, so we + // instead reach into the netmon and grab the callback, and then call + // it ourselves. + mon.mu.Lock() + var cb func(*ChangeDelta) + for _, c := range mon.cbs { + cb = c + break + } + mon.mu.Unlock() + + cb(&ChangeDelta{Major: true}) + + logf("hello %s", "world") + if got := logBuffer.String(); got != "hello world\nother message\nhello world\n" { + t.Errorf("unexpected log buffer contents: %q", got) + } + + // Unregistering the callback should clear our 'cbs' set. + unregister() + mon.mu.Lock() + if len(mon.cbs) != 0 { + t.Errorf("expected no callbacks, got %v", mon.cbs) + } + mon.mu.Unlock() +} diff --git a/wgengine/magicsock/magicsock.go b/wgengine/magicsock/magicsock.go index acf7114e1..e8e966582 100644 --- a/wgengine/magicsock/magicsock.go +++ b/wgengine/magicsock/magicsock.go @@ -177,6 +177,10 @@ type Conn struct { // port mappings from NAT devices. portMapper *portmapper.Client + // portMapperLogfUnregister is the function to call to unregister + // the portmapper log limiter. + portMapperLogfUnregister func() + // derpRecvCh is used by receiveDERP to read DERP messages. // It must have buffer size > 0; see issue 3736. derpRecvCh chan derpReadResult @@ -532,10 +536,15 @@ func NewConn(opts Options) (*Conn, error) { c.idleFunc = opts.IdleFunc c.testOnlyPacketListener = opts.TestOnlyPacketListener c.noteRecvActivity = opts.NoteRecvActivity + + // Don't log the same log messages possibly every few seconds in our + // portmapper. + portmapperLogf := logger.WithPrefix(c.logf, "portmapper: ") + portmapperLogf, c.portMapperLogfUnregister = netmon.LinkChangeLogLimiter(portmapperLogf, opts.NetMon) portMapOpts := &portmapper.DebugKnobs{ DisableAll: func() bool { return opts.DisablePortMapper || c.onlyTCP443.Load() }, } - c.portMapper = portmapper.NewClient(logger.WithPrefix(c.logf, "portmapper: "), opts.NetMon, portMapOpts, opts.ControlKnobs, c.onPortMapChanged) + c.portMapper = portmapper.NewClient(portmapperLogf, opts.NetMon, portMapOpts, opts.ControlKnobs, c.onPortMapChanged) c.portMapper.SetGatewayLookupFunc(opts.NetMon.GatewayAndSelfIP) c.netMon = opts.NetMon c.health = opts.HealthTracker @@ -2481,6 +2490,7 @@ func (c *Conn) Close() error { } c.stopPeriodicReSTUNTimerLocked() c.portMapper.Close() + c.portMapperLogfUnregister() c.peerMap.forEachEndpoint(func(ep *endpoint) { ep.stopAndReset() From d83024a63fe6f7ef6836ece13f13cf748014ebb9 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Fri, 7 Mar 2025 08:18:33 -0800 Subject: [PATCH 65/87] util/eventbus: add a debug HTTP handler for the bus Updates #15160 Signed-off-by: David Anderson --- util/eventbus/assets/event.html | 6 + util/eventbus/assets/htmx-websocket.min.js.gz | Bin 0 -> 4249 bytes util/eventbus/assets/htmx.min.js.gz | Bin 0 -> 16409 bytes util/eventbus/assets/main.html | 97 +++++++ util/eventbus/assets/monitor.html | 5 + util/eventbus/assets/style.css | 90 +++++++ util/eventbus/bus.go | 4 +- util/eventbus/debug.go | 11 +- util/eventbus/debughttp.go | 238 ++++++++++++++++++ util/eventbus/fetch-htmx.go | 93 +++++++ 10 files changed, 541 insertions(+), 3 deletions(-) create mode 100644 util/eventbus/assets/event.html create mode 100644 util/eventbus/assets/htmx-websocket.min.js.gz create mode 100644 util/eventbus/assets/htmx.min.js.gz create mode 100644 util/eventbus/assets/main.html create mode 100644 util/eventbus/assets/monitor.html create mode 100644 util/eventbus/assets/style.css create mode 100644 util/eventbus/debughttp.go create mode 100644 util/eventbus/fetch-htmx.go diff --git a/util/eventbus/assets/event.html b/util/eventbus/assets/event.html new file mode 100644 index 000000000..8e016f583 --- /dev/null +++ b/util/eventbus/assets/event.html @@ -0,0 +1,6 @@ +
  • +
    + {{.Count}}: {{.Type}} from {{.Event.From.Name}}, {{len .Event.To}} recipients + {{.Event.Event}} +
    +
  • diff --git a/util/eventbus/assets/htmx-websocket.min.js.gz b/util/eventbus/assets/htmx-websocket.min.js.gz new file mode 100644 index 0000000000000000000000000000000000000000..4ed53be492425280da60f662a48eebee9cc8e0b1 GIT binary patch literal 4249 zcmV;K5N7WmiwFP!00002|D-!zbK5wQ@A?&;JVaEip`5*{ebA0_<8d?_UnOzQ^2}V7 z&vgrtEeUH1U;xmH%A@~&g%5%NNh!`u9%374G#ZWmMsxCd@H;LSJbOSTVg6Mila#Z; z?8Lvp?S@Lg8Wu>NOORDra-m?w1q2ushmG2PO#v2&Fxl;P6TeVSb~4@OW=SQ<8i6vY zM3pH$nGRl027_c(G4ppaf~Nt%6X1As>&fRuDF082`aE6UIxH5u;Ya12j+-rzSs;Wbc36;KHoDGJPivkb?u+aLoK zpPx!tQ6UuwTvMq~V4l`UekzF|+h-9=N_vAUw{T*9Y7(a4SzsY7BMh+vSh?YQT3I zu1ilp9L!+2s*2)=e^aGU3_Vrm|8Yk=I5W(F4PkkK5=c=1>JbSgkrH!#7qO(&QqdL; zsRU)~12D41_MF+|0QjgP$#A;|!l4(BHC%IGE%_x8n#!4;Nfi-mRPe_i=C2$Tp+$;C zeFJhwRki_xDrDPa0=HtX&yy8gBB%|5w3li00SnxkgA*f*TmFc-Uz`z%Fw~r%E^);L zUKU7LRnG0A;dJ0ZEycC`z9WiQsZH0Yu7c6T9%>Y>)N&B6Om}?(X_cxEDm6z-Mgiy{ z_+6l?1PeC|m}0~kcDR(z>UFtN3Z9{sisO9wY(YNp9Al=exR{e{lYFUA>@SoObXh69 zX4(v2i^~lya)Hb-Wemfe{J{zKa5QcNB-4OxW+0Q5XjIVwg-jUzHA*D1{JY6mAj?mX z;&gIgh6d2sFIg5;U9;aeRG}*XRi_5XsQI=CZp6WOD6v-&n9|{LxgjU z>I1Gxw*S4p-(PP&Bv^R4(lT2Zh{a<58!0Mmq}KDujO$fn7;e7)u#-c37%jH(;#hqj z^^k;F&(T`T2L6g5F=_-eIR6Jw_|YLr3uJ4x`3KOmvylIO8<;uBJ9=O9WSXRBqe*iM znqC8RcYY7E*$jqW)S@eOjJBX}?gwq13a)T4@OJfeTM(RXNg&1@HRGHaE0m+8Yep32 zwYrU=l9a72HVxv&hZ0^kit!RieKYAGvbXPVKYnng>EMjq+#1N&@RL22ae>ATdm(A7 zkzZJIWio-QRY(tHoGC(?_HKGOII<;Cqh%Zl&ekXZzf!5>Sc9?}Va|bZ4?k)AV}vjD za`r+C<&x+7OS`oU$ZT%US-QX@PbTnBRL<&~P(Ug{=z61ICHS_*Wpm|TF?LeA@Zp$F zcp6%kYvAb)aLj;JMWG#7I5>mhFjgC<8}-ipI^3K-h$Z5ccE3=x1NHzM01Ju1Ya}ku^etNSe>L=_H|schc6W==!#+vzutW4awnxM$FJ9`WH=d)3|G!$ zrYwoX|0{{m(Llj7qD3s-aH+Jvhcih0po!%F6Nb|voIyM~)w~Y*pzkliqCXjx+W9jQv!^iD`>5 zWwk75W&*+$X>Rcuhtl-!sM`4CPxfA-57=Xj4KsW|uLNOo#l`l<%_jaV#&V3IV`L7a zki|N|De#J|xHFJj@_-9J*tJo5(Mpi@)<*U9l3(1A-{0?|FREUUW-df?u|PWvsHb5D z7ebJIs_lDsbrTNsMH{U>I`M|R2F{m@9VwH3r%>a#qxUvok~>oV!1;1G(#C(NM5SdZ z0&7?~>IpXj32=bhE#{OcEcQvWG*v`C$WZ|OE&0A!DHMTt_lWg(iE|z1)^akLz~`#v z({&ER&5#aPVD6o7R!POTp!L-H{l4s}k*OyA(Gg}yRVOerMVA~`@N{kJ0EdQ}W{HWT z1TRsjJv@1UI*{r>7$N)d$#cvm!ul#%J}=a?KH1^zp=RrUS7^hF2obBcOB4sK_F{j@ zE7sjB`Q|Pon7xHZPV&qio@_g!kh>m zx{>*}eRqRC%;n&PEZq95yPt8eS> z{_UYI@qjBQP2uU_QMB)H+6o1{1cVk>y8yMJGL=0Akzk@5aNE zVdvTb(E?c>MMGYg#=%g_aTt30!=H8l6kn_+qGm@}QmF}l_39af8&B6mcQ57NM|)If zI`m%gcid}ua)Bh@FSPZA7qeN!lFamluO7XV9vsDgMukpw2bF}E$jo^RBSs)O5RjCu3#<-`TsxeeRU7hjORFlx3jikw_2OK$ILYgY0vGJQ*d_$kOqN_b*1+_Kv+?7g$ z)M`T{ED@Qt4S5Qe0kAd!l*v+G&A*JG{|in42ueR4D z+n}tUWPLk@w%_#wx{eksikI$1Ie5Q2@qFhx8%{penWyI7*IZwc{8P`l&FWXLItX

    #5>|N2udB*J1?oKb5L)OY8MKTGhNR zKDR_4A=x0wwMhEq@9jXtjhgSK<;!z?6k!o4t3t_;h_X_~xCNMoC6{U#uQbttUg%Jy zb|A)1cAvFu>cC2aI#*7lq-&Nu9mamKZ98f0@U;{uCFSfl5*~COjVGN_fobWp9#kf|qFfE-Ogm%I)E7~zA@x(WeEWb3}hk*w-hT1Qk_Uspy)hgDV zpT_46!JU|dL-wM^VVvzWkg~OFD#S-M?iMD0=J|dK6|)Vr`H4E^bKYyaMdVHhjjfb> zMc1zU*l_aMDyp||Qv2$ax-cZZtw&#&w#omc4lW8kZo%78ze-DCe70*J?kU(ekE`u4 z;CQ~QJ+o~#vrWlJr0GcxyQUl8?n?o(<;1&hlywQs&H!JTIjW;yo#SrEBLXQ*VG$I0 zKn7*u6etXv9OACCKT!e*QvbR`t#M&5gFfh&@CPIT_|)!Z-u2wC-ou6Ehf}TAK2+A* z@m0NMW;(Df?VZ5b0wwK|#lho>kTh-FaSIfFEnx;9iQ1%F$^vX-_4Q1E@{}6=cD0G9mTt-l*Vm|wz^^p%6S~MTC`HpqHyhqKyCQ??)qa2aJ7O@*SD@d z)@oOft~g_AxTYz-K9$8<%ku5buXBqkr0L!Dd;#>VsUyvxSHG@B*5H!z`!E$f*tt1Ll z)Mto!okR_KKeebt(K4g4ocH$-xCFEkMVKIImj<)=WEy4+clRUT*q1ITBVvE6Eo|F7 z{_Lkhp6gW|mY;6D_u@{9!sJ5ImrJ!U8ZHU^Qla6nuc3T0-;P{;y8Q6#yQ@$CgxmLX z_XR`N#RE+dh`|mRvdvkM`=a_#0monD8q?5ng~zBWy=OtJ~Ur z{!&{KQb=AiLmuOXGcqL4wM05IS*J>h^QNO(Ywm_fPj@e!t|_1kS67ZgSEo0zkIvo# zH0{C*BqH0GyEUC};SFzz$~MPNWDsDXQp4s27MLk6E{Y-<-VNa_rWnp(ct3`_oG9|c z=~;6C{g?e^*i)Jfi#729%}mtl=H%W2^>m=&wk(K3xQ#{Z_U`@wM2L7axea=cR(11G vBy=^gL2j^g=XeZr0A8tOxBKGCr;Ow|A#b7B{w`9IyQ(18qC);S# z&_N?bHI-2@<*bo|AIX-px4fL?Y*W|iTJR}t8vNi)|DDhAhZNOfK?Q!4zn8c8YhJOL z`}Mr8@nc%^hBgvEuFHBuo7@hDrZU;$5Ez-l_ih)hc_X8w5L9ji8?X#(wW**heob}1 zv)M^qnlWOImmv9KQ3na-XaBRgDcAUGM&}iyr?--_rZRKI_Bx3*U)WjIeDR($YCr6H zOgWoZi+sDR8p*}oDJ!pPI?JWl&{U_Ll+z`>sQ#q+bxrlsTp znrd)Bl}W^vi4>$9+QcXo^p6c~q@SykBDDc@=I1~{ls2mSiY@#slNQ0cue-TAElSI} z<~OI;WsSpw&x@(3*7CAk*fG}?qnP@hvnkD^$Rw(%*|bthQC)2$Z7w;kNj{FIs^2J$ z8dWq(qnm0bmr)umX|-6&Xp-Jka(N=?j51l3wb=?;ttj8f{5ZX7&gqmhMyC>&IGS(j z`tbKkN-CoCrg>4ZQrunMt!W-z)%+?-XH}zs;DXk4D!Is`@$&XiCr;AAcvi~t&`XNa zW-54Hzo5(Vx>6M?*2t1cyX&v`bkkfwAw9qal>k;RHjSj>Ln+FY$*K8OS^regn@dr$ zrc&q5Olw8ulFypUWhp;4^rNVuVv~=f1(i`6&1g*}jV5VB>-jnEd~?R?JG~VP#s$5s z)Y4*uIPf=2a@FA(d%<}lv9QJ(7i&6gV5s`|;iTWNB^8w{)!t!Kf;MZ;8v43qGX*>O zcsu1Ynn&Lq{}tf~3%03FAAk2BlPuF&s!Nyi*Z*`m*g6%03oJE3ID1bUNoQyLN*$?2 zH?S^6L*Fn-g#t<#@NXKEQ304C`Yt=p{u;H#j~QZ6@_X@PrV%LlEFqoTQT2B|Cs*o+ zUJ9~Q9}0nzb5{JADYQv8`p5ISCbz|pnMR*vQT&)8Rw93@S{nV4A8a~)f3Mni$dCN2 zz76gpe=mN_)GCl~RM#EUM_w`8m`i-25)t*0pYW%`eB?JZO@DdFtoSkejnKm2^KChs zUE)+>6RF5AWO~JCcPE_5vSLkM({x(0=To&&wE$O=`!ak<@9zfXrq?k zr&2WeDN~8BX*r{!$>;F(hOIYJ4`-@VBo(ZzUzW1WD`*+e_nJ*OF6=A{+gzLY^b<1 z$maA`<|Rd>Oy8_lbXJv;)_3_2GHsT8Q_oJ+nXB`qG$-`*T6Ma#C-zhw6-=B{z>gL< zn*5b)^?nOtum)V;X|&#WC?t*6YH6d1F8!K_Y*nsfM+qcbyzc=!Mk6|keB6i$QJZAH z^NPh$I*5|OR36M!^=ckdLN*McdS==i*p#-Wav;h3@_nJoW=&m9X?*xyLP)e~qGT%x z0Q9Sxm(u1Rr-$DqNlWX74*Y75Rkep|Jx-EuAJhN1r{?O*HNX8o$56RbE@}0|s^)$9 zK9Pi=;1H440l+OqHObv%L^AAfb6IZ|Ss~;TD%s*wrVH_sJGTH4q zNfB&bG?Izetn3U6KX*I$;cB=FQ?<7UG?Gw-awwB}0%{)*U>cbGp3j1LUM$NQzd7ez zCZ<2Hq#-c9MPoF!L!0sGMs?zHSK-jr+NrixThD^8csu2*HE&=?l3cawTH)Br&9bU# zOozh*wWrCJlBu`1bhlI2jH^|tS}8M2A&6S@lqLE+ar};ujHvK7jgmsq|CP&g844f| z2$67Ze$0?;&Gv6&$ghUOxFUo^-$uJ#l{M?Sl5sSMlB{AgdV4mHS@M(|Cw6TF0P!owdMJ1-(|vq$a1aYp#WLN{m?kMp zk``t!4hYfhd&71eEJNm`|NO)>TPeIo8wLIPr@_6D)V%9S<9~cQ`gHvC)9rW1pX4X;iG7++zDTsa8wJbW=0=gXW`FqoGI}$3dG_vuLS4e1HKZ9{3BJ0J zqGAhm>LtCEM@zY?BX16G{bM4t>9VS4>TESxP1!;&pB^V$nw9G{WwRiEd^}0KF+C5* z6ernK&{9&phjG*ZD-k6{Lt~mbt|5RWnKeG5q!}w$G^NOzDu|MqnuQ{4C{uXK`zmBs zHg{}Ffb=Z<^R~y#sGgYmLGCZp?&GcZ4&DbA4&BrMQ03j4sRbh4CU3=O zo0hfyvyc-uVR`7#K!Zwc6}zRhFx5=}o<4?_}i^;doo_sn|IZuzOG>Y8FE@PP#fMeiLs0Bel-3kP7eMuN% zO;@alIts1th5uEBS)cOQ;KFzOq8bCpQ(F@f!rR*B}iV4#iV)>^iJzfMxHY_r!*_YMb}T$5%mh zDXLYRXd39~Q!_g9iC#@5t0mDB7*uS4)S6Ue_Sp^lAo!XJc^5N9K91Rh$T6GPb*8Sl z3%J6>o$38=7iaJBB96O=wFqc7+ro?3tTjbY%4?5ZzoddJg0siK-`QUp18 z*f7&}2}@@OqZ3ia;|B6kL5k|hj7DM1kLPa{YQ*!2T!LJx8+xP$QHtqOVU2^Me|$Rn z<|vc2k+DovN$D$9ag;HjY z9H`&?WBUy}hr}-|o3F^aAdcpMPOgyIqiKD3N=T4S}-P%?}3nDA~|htuZX}f$h9S z4&(kMhZfwTGrj0E0}|$K)d1ITKeBQ*Qy}4n@VyxVGKj%2#8(gF`&g)9h|Vw+3F10W zw%&)KBM=?=8F-x$LXK4hEanMIP&1(pq?^Rg0hs#pMGI z-y`do6hFksAp1=&Oq2fdQCxYDmwG2ur`Urz<60u8mTtF0cG{J1TP*+~bU575SRJS2 zzT2nn2ws}a6+(D?oSKDIcdmEY{(5f}sw*7cQ*%K>!U8X9HS;=_Y4k*~Q@^TTM^7U+ zA?fw-*beIOyAVzJH^@4csn8f&SOG|c`kFzMXh^||P*_U{|0_w=RhhJs+$lIS5}@&< z*rJH@SW|Y#MMzVLfTqHFH4$S9#9XZ~M;Z->61(O|y$Nq-ELA|j2pv6E-O7q>XltrI ziIS}xjVxyaiAT|J=sG@pJRFKK9X+0?yr-(d;USRp%$DizSkdK%vl$yHa-)7C9C1%9 zV2HehN^X|bT*e7={|suX zIWu~zSZ@1UAI^*Fs>Ul14x3D}{mi~0s8Ze#){3C+3?Mx?V4Xp%1--8LrZEFC-}ert zzd+xBOi6E0tL~uwyLV8AV1{lj^&AwzYa9yb8$HO@T(qkV6j@kCaE?8!V%al@C72#; zDUktvpJXZnk8-1-Gij0Dtngc1|J6Kh_TZ3`#wabUU|~w;u^B*8=oYdpdlyglNgah4 zt<1kwT=kkdG7srE3-j16OOn2Jz5*R^iw=0En3_^hbKQU7s6CFQ+FG?P*~_z&kMB<3 zU;g&t?BdPko3r=7eKymyIsk(+zxSmQP2##QA8c+9pCEaW12yyDa}leDa~|`iiR<*eU6#qlb*5MljL0Kj=nSy zZx~u5{)LfvteNT_dc{VsX|LRwZ{&I-eKqC{4RsCzzqL5t$O`4MYAjrgFR;wSm;DK% zppa<_)TcYo;zyTDI%xFy8bpsqZ1gA^a5ezx-vC>svjO7+Ek-`DrdZ=^8iKV5sMU*5 zwYQ-$k7a~Ino>mHkC6T+G+I>w(M}LV% zl*T-bs#%1Ijo(UvtPY7(&r!1Vl>U}7%A-UP1q})Lm#_p7*8#KQ=Z+q0fNltF`92T1 zL}R)`&aa}R=qX^#DZEk+WhlG{aE6Ejpi2Zh4%~&6sB*>gF$x3{FeF8wVCzZ=;-rvT zUxxw{L{JCkG;UG=AfoVGYX-DM`;-e~&XwxRho*C586#V0VTq?+u5NSAuTW>Cszs?t zwBo-|v;#JPSkX0}Z2tgu{Wlm#s&39IDK2T8v<;1`jMg%Q`>?y`b4i8ityQ;xuhIx4 zL|pIQ0|kmX^+*NV3E1Gg=V%}lrCY6X$$6Xfhd=JB(rLk6wIH5nu68UF_mexa=wDjDT)EdH#ELg zOF3S;vH3-Gbd9 zt)+ZE`@Nh}CKW1(qf!9`WwR)ap3JK2K{X?f?rZqb(>iTJv6ki%a8>})*VJJo_ z7Qhn-wDl1KiKvfy-_p2FX*z|~@6+Fot68w#z6}JJk-&PZ1sx7q57W)8NIiNKrO~HP zk0Q@VC6ndieYtW9*(irXdE}_Kd0Zz}=LyoLP^`>HYLMhnG!moe(F8|R4TseMAr?9s zB0cn!6mVl!Niof2*?iVKoJapbz9J&S<$CVRaRb31P2FX0O`((Gvqah^Dc;j~4nI76 z0rGwSAiNKibmMX(`J7KT%_Oog{Y{L*vu8Blm#~Mh%`&JQoVC(118>zxfVf4W3F|kj zFuTRFc_kVNpoH>4p7hU}hWdK0saBpbKBJd+YsD%7bY7nRa{1f)vzMpC;eO`H**oCL zRDQA*{!TqteFxf#H=5csk}f@?C)A3OB#GsdC;yoo%8|lmr8wa;`dr3rG)i{IJysMS zfR-~2f~L)pKKuGfm)A?G_ysg)Dl{>)W#kQw1yV=~UE(KXqEcpGSNb_1G%F)vW*KMN zx^{1feqKK{ ztGUjiL{D0d>BL6~R~;JQ3Sj=ErX|~~^}Q1%#Y?jc?k3eZw^t5-g&IOn+fI#;L@iRU z^_j({it>DM_VQQ92>oPgw`jcL5U>%@X&IhHv{MaG2bI;1lyQ|}D{z?2^(RQ>AqLGF z5e^bdBN=2sHWB=jq2YFqu`kF_WNYw|7!HLsN)$bFRu~RiG$J@bM9}Y*o`vkKEIqOW z3~TQr!~#{)FVyIg3?CUQzQ!#9WO{o3{qj=~m~_g$=xZ09m4b2~?`zMWd8f|r)Bsgs z&gImpI<3y)Uj5}yv}Ly#~oKa_`D&N;f{@Jr^%Lpu8~1D0=7y~Sn7zODPPsV z-?&b?7f9XH%aqjmHk&?lyHCGxyYtTNK26-Ux5v60VUP2FOSV6m<;0`>8}+Q1JC1V} zYJB?PPeTiB)hbJ3IT6Df`<#ePLo-p?y+cA%w9u7kx9ccki9w_x9}}|WYRZPHdu<`# z$gb#@cz^meynpLNmeP`co<#!U061>yV&KV^lWp5-4YRp8I2r(2ZPl|?+)dJA1&}&h zveqhT%Bifbja*dAR<B(>&D z+%k;_1-L>1gc!S6ZLu3A+rWaCXw?srh9AHtRGTI;ZR`ltF7|(R_QLG#hIom_-aU=n zX_n-;kWV(|oJn`iO0pTtiK&xE>33R%Ny_o@#M4T9BBH-IrJFBAdTfL$b5f2UPj)*L zQ|X;p64o`*{7W>#C0YXT&C_LCr0Z8{Z>VrbM2fXU!)mkD18a`Uf;qaiQ z_%j&}ss4$~v6;Zafp|wE(J8MtVokP+IHOEz*G_@l9DW^mzD>^#f~@?Yi~2kVQ4oe=wL(k#CNOqTfzvN+T)i(< zLx6EMHC^i-*P8EOoU|S#UOG`6M^Euul}Ycd;uq!(T84XO&ZbK)JeeSk@8Fc%EF%b{S?IzW zZ?9@T{hUVt15tWqeUi(Kr^kCHS<^HqVNLLS z`tWZ(B)vfmR2aP^VCxf5q;MNl4l`P4=pG*Uf&Jr80@97ogXMU z`qUh0Omj_+#-EyWV!pqp(hBd zj>iA^)YxLbs`}g5e*dVx+hlU&G@exYl7sQ2xL=x6&ExN>_D}+8&T|>FBth~`5T>jA zyjN!QSe0U&1bB8|yU8hQ*g?+J>lpX$YZyk>lb%`Aq{gwU=M4R~lJuCiZ!=Zxf zDD#wOOvWPRX@%!xd<^zo<47HQZGfeTm&XN?Jx0|$a8iM2RE?r!Ycq_00yy{GXXVup z)tfqcjp}luNVmg7qmMhbYICHdC6ObXW9tScMdkUca ze^F-*--szC(oop7yE%JwJk1@G7Ai=XNJQAi8l6h zd^~}pr>Ifg(7s?(NT*ta)8QKWl?Dw_w`D|);zyboI@oofC!q4LeOrD!x>7Nb>y1nY zioz=eEmLg`5jm_>$?i>JEe<)MQr#glzCDR~dShs}ni$Zu7G5gA)mk?Hj$O;x2N$!q!CA(K&6Q``Bb_ypwgHg*a?GG38KRz$7TLBcKDcb|l$ukS=%y zqBv5u0KrE#j!?b;{xw6xo9zpk*4u|S^9)Mw~ndscXm1=CRmSVb8wB{^Q4eQ?o zj*7LJ{zg+|O6-Z&p}D|L!R@QzS%$BU9gbfTqd}^7SHLdv^m=o(sOZw{A6$ z4ajl?S+#@>5z^{@F#(&lSWBCsM!Ow+&MP)EzqP$+6PRuKeHtbX`T(BI!;#I1M1yFw zY}>X3mrc@y)6sS^_b&z(@MhquE(HgY^MT5`$bF0X$A)emst@^kvBH{Q#(8hW@qBX6 zh-tIr_3YuAUVjQz(|!~!`De;rH7lUp*x#qvhk67p+4e~v2L-xdG1zQ|%R)48F zo^!Es2J>Tw?3&Hhs*+JV3CceLJUs?Cqno_WJVO+vmW}S`FDoUDm^4 zEDau07w~T9fBx3cdLAX1Xtimt&d%RO!=Z^$0EQ13Eh|crFdBlo+-6l-^M#`0jSV5} z6nc)QTx&yJ^UuNA8{*{@T-#OPYIVr`pyNW2>RA=`o`;Q;=qT(f>~=eje*9aG5;eD# zg3eoG`L3^lOvj6B;^>(=t()!PegeKn)}#mn0*0yafgP(n1x|_G^nT%|0UY(`g8Y= zUGi0yrL)kX9Wk7S>EV!mVZ^et#BlVcQq(`tJ9WZVRF*%`JKx1j?n`Yv%2KNrJ>O-4 zGBs62<3q<-Sx*4Xef3$>d5z!4Zt*tsNxo;P)-tZsX-d;gS~}XcqH&EEE3h*gAiE&e zQ(UL|%P=Y9deTA}P4XgUkE6P5Fs@isvB5k^w)4y;fy499wEX<8T(`3K2&zWB8riaJ z;!W6!x_07C>a~@$e)d9T!{rqFVNTjm`BgbCyxV_GL7!OlTF-B+$wp%lKsAqNYC&n@ zduNur9US`_*gq{Ee~ChFl+-TNJc4SWA&T^g7_up}13jD7boPo1D;e<9#*?;F%$}w? zd2$?kopj%SDN|I_@=IJ;R}q2I6`x!EDSVrKXsh6;|dZB-7kX zi_XpWxH0w@)(+ylgxR|x%lU4MxawQO*p zY6N!74>bC~s8k0tPmcXiQIwC<&>+eHZ8v#5X*)>>oR*Yz2hY0fG%06!)h0!??{DLu z(-uvMR#JyCQXjVxg>%l)M1qV!fK_ZJehss5x` zsL=rzugkhaJFg^kh$C(ECD3E*>X@N#$s>ha%9_p|O;W4)@5}9II8J5^Tt$Qe-WM5=mQ2S zea~k!N<-)k+e)_(R~N;bzBuq&I{Q+KE`rTCU|G$}nL7FA7gEQb3d_MgtR^ajdK5uC zHIaRIp!~V9T!Dhi%I&--#6@qoMu)NHJr8wm>-_?V9{%%v-19l8}6^2YR9advhE z*6*Ih;1|LJ*&sjKG>L(0kZF)D0H8(cPp!BTwEWz*Em%Zl9J|yDqm~PP=_RuZbs)2S zQ&G6f-TI-Cw_(z0Du7AH&@N!kB7hOU4!STGg103;TmBS?4wX`|1w!Z4)i=JN-X{BT zPy#8Cg~kKf;hJ2eBxkt%)lOMp%Zx!11RNyJa>WP1r$K0b8hjr16gi9@tP zHNSi)V!E2TO_#>dje?u{R1M@Qd||G#g?1JGbMhc}g6#%i!}k`C-Z2VC?Q2!=l3 z;?d~Q11%kXRZFVWWb(-K{5+G^3HTJlfoi25`V!c!IguH~$-*vr!7sG|CSh46sVF6V zPj6+MB(5FdJ6WE06|V~79*Xf2#-if$0rh+blI@uy^sH;8o=%T0^0l;P58dn7o#H4R zsGAwM&dH$k?C@Mt=}M@Nus~FJikprq{~dEo%R8pk_Kl)n%~%>^5*Vh&)_PV)HFMf^ zPI^3PUXR!)N(XZ;R_OpG2aiy4@Mw}oqX_0mZ>4W%s6QRy;b)qpXe6?EUCKCt5XzK1 zjnY9hq9!(Sp#a~=DE-5a(DPn)Ln&C#kb}^=SNM)13W(jO;u{ny;nt?pr*5v9 z&+^0JEv4se+gjUDg^;a+T?!im_Dx?HWO_~Rrj1}=vEMgKEtHAsmt z6x9c*z3$bKc#w#dmWU0Vm5lM(2j{wEco%fsdkGmXl>mnz12-F=Uva+65O&H6D>N^F z)@TQ4b0Z9W4;KW=PPMQ0+!S|46}qftO4M=rI>JBMBDvlb_=98_qMXY#QWy${pe4z} zVdD@@vV9Iwv{(}RWj`y3oSJMIpeHXsrs?)-b9H53!$CcpS`_{5$Mj4w2xBf=O>46ts0TC{ zd96nVlEWVLQz#psJo0Q|=GKOBiNB^a*3xO&LhN$ql@u(r4outw5T0L>Ue1Qb;8GIC znG|SM71dx7}fD0AEg&HU|(Q{(lY&o)gi5=CnfV!-NRiJ zLug6}gCmRu($2ZEq9FZl}49``oqEov-MMji?EFJA#cb1pR$%1rr8_ zwi{9BGBqI|SPILL3;ozq&RYe;T{V{;lp5vW)B2&%sY43FIn|3Nk{dVV(!i}B6LqEW{Y#)kVe zMeq!3VqkW7bq4_`x-#xQcQRci^3V>;tcRZGXy@aVkIrOGk{5Ef`!a&U+aF#9=tE17 zJhWtNmIu{7a`N&XdAHnGw|Reysy)pd_8df{TlMRH7v(jL(yf|nF3o#@r#0B-6dE(O zJs3?tBbewtPC_zdhqzz9-ZsYAsh!LMh?YXmPGwGU0xyq*ZyX@8Ot5DqUS@s#m@ z+ndfmyL8ZM5w`k+)NyKPpI?uAYsx2+zLzmig7D$7RZSBqHkoF8O<5d$e|j0E9PMNZz_hvp?dcM|{^ju8yw+$GNcWV};CXmX)nEG6 z^-eZIP$myQp1+OKed8h+EUS4r>!^g}vI4gol3*gPHRFb(}zX_?*5y=Uj)EY=CD7XMq5=%~&O zRRUL0uVTGR>p;=oS^el3X7FB~g52}p81m!f@G;U}^nq4Db`&&(KHYo1plq)21+1Hse%3z)p=crAW(Fzzp&`4Gq ziP+4D{VQ6}8Y;&oec~x5t(~c}c+@uPU`X6GJq=`m)v8?3Zla(K-77h-Y8pM%=X6P@ zpRf3B(5onC75B_oGblx8t-of@bC3CPhA`v=`1#prd7&Nw4OaT3q0GL_IjsP_mNdd|zE>E@%$6nYxj8n3i7aS|+oScP)rEd-|Q16t9Wn8#JhQ=;mh4Fk-ixreU9+!r zHZeW+GLmH5+^3^fXSgQx0cZzs;szSD&H8RYMi;K3+z~`)5zf zdTPHC?e5F^gJn6qMU>L!Hk^|_2Su3Mld( z@2BF5X)s#i)9w+*s?yqHb`ZWOK2~jh5PY?k(S(5ML*OA}ioC$jXrd2vk8kEuBhbA- z_v=!Wt7b3IJw&+kBGWrOXBBP|pH}X1z?9yLG2A^E1EDriDEHUIi!%}myWAT{7N>{X z96UV`U|{T^r(KM(?nSYJNcEt%V~*zQB{M~b%yc6WZTRf8ci?8hi_l-`tQfrhxaizO za<>zH+&0e~VQYg)(4K4%P|bU5tz zcpbo6sda9a{Dwr5uOt0Xnz=<^>DcXl(BOwc)7Wr?=A|jO%xbc^ee5c;b0@R7wl*oA z{fbu;rs>yp^Y&{0SAT6ON`|k(crCa@VyZuW?5^d$cDyK2UPE50Yw5d(Thi*hFdWg8 zxOwOYb*C;SoB_EF;n|w6V-o?w#b0>FwNYRmB@hv>5E=}~#{NXyNvhsaE%{AnOeJwU zqsLV4k7@qTjY)ypT>p4zJYWZR*t=FsJFZSC6GNy7qNb7M-fmD23NbEVtqoe^=p*~g z_zfGZ_^g^&R17LM&__QXM57warS{SM+Gn{e!hp*d-uHo5w}GiEg*;M}1HR3RFXm}7 z#o#E)rjDxWtL6G#S@s(37hs)V$(VJyEgROC6Ph8B0IDA&zL7NorSc#_guHaTcXor_VI;~F13o`8<+>wb?h zsDTmwZV^yn8NBpM?SjY2hQ=kx?!z1%qgtOCg6MEKjM;9-(LH*%V+FSY=QTO**1(sB zRVCq759+%exC8vAmdCzA#iAST%{b!)KL3%_|tIS__8W0zKTGNRpzj z>?k_?c>d<_`9|_nsO9_({o{1T3^!u6jvvq8TKmh7=Wh?-G-zfiy62T{zp}Fw>v^#I zAse8pa%S|PA4MIdXd>k|9ioEsV9!i+GRQz5Du&F^B7>~_~Q#R`h!tQzZmX=4#nau=ItjdWZ~<6VA0 zcRLdXL46H}2dXS4fZ+U^w&8HReESH*7X92sF0{wHNHwFZQt+1%a_9#vnzS<+ivlN%S5bfRq0H!>F;bL2m4JljB=u1Ube9CT|c5QqtU*RqNY zjE7j~MqzlP&@r1>w4*+Zl%TT*epVnrFt3o|>qw3}^qPcVb0BYvhQkBs(wo(bnqMVB zI~{qPDxX*th3icvyQ|NGu(u#dOlctB`5^f1T};#Mg38=~Id$)`P_|iJQ4yI{HcC^- zLvq;V8OGe?tAh~jKXDWs5-W1ysR_rAy{n!&%fA0sk}9McsPAH#mSO>5+o)ExG>NN= z=|NEs*P+*|mz`-Z1|OOq=& zw#~%}d8(hznG3!;Wl~hMF?vmRQ0=*qldl~XAIr%XN2VHVTld;vpbBQ;d;0pxQA2&$ zGtK&dgmoe0Epy?}n((l(@VK$QH-8a{N&`g9JdeRd2haefteJl>JY&}X~dOQvtxUr42?tKH7X4-RVZ1mAKRH;(?cTD2+*iMQQu zL*rA7iGX2i(ePnP$TiaD!bI?Zdd4DOha8ktyZYPrc$0!dfGpdyTFworsPm{P$!1?a z2w~$4ZhT89jZ2h{`4R8l+IR5CTRo-cUQruIubRiteFK*V0{Thv+`4(si7@j4^2)dc zza)W{NJm^JJs=-|)xiNxS`&etm?fL05F#Q4`$LvqQ*o8cRJ$TpYyr+dIZvsa=2coX z&4y|MFaIHhzo6s?Xk5DOF#}G&Pl$FR+5-aEizPt!*4-`tw3`dFjB902VF-g&nK6XO?cJYc7TddVq+fwL zyu4e}XhI@IxK_w?AKf0_+}s?324*8_ss(6Z7+~Zh$@Y#+X?&Nu6ureC$yD_&5w~8E zkLZ+YT)TcJA2VZ1g2V+1Sdwkc;@{Ihm`gD&{zU#@@hOblzV!ixf5JX)$dbj^+PdK( z&!6E?5^Fhh1Cjh9E*9YZ6FC1wDW&lgO)kvKQs``J_LONuI@T9{76*AjKw2L`&@;3N zG2{OYX#=C5hFP3Ck2t*3o6BV>KY}z6M&DReE0d?>ST}V-wz`%4w{+u`&QG1q&P}?u z>aYAci*J&2!_a;d!aZWCYcKIbQ2six60$oBu;v=qCPG)~O(mBng3c(DRarNAl}+8( zZg&xW+Szuhr8h-9#Gc56!Xmr*BqB^)#5PuG9>ygnFgSmbv<E>=7~ZuzTFO-&sr(KEk&|vpv9{sKPs}eh zfs2gs*9M_!b&D0-eI8V7fWpPQuGsFgHGlq<+>Jj^iW3&E)6eOzE`HK$vb|l3eC^JQ zbwyBcn4f5B*PUK1J^lqi8eH|ay2=NH@p;9{`rJ!8PmOv2OKj>o#mBGmv$j<$x96zl z6d01c1}CN0ndvX{TO(&IUMI!7SfsCEw_j(UDP6xYK0J-3KYx_*W%>ua-X;7H9`>lc z2D9BD?~I|N8#ZkiI2|L##r~4sqZ>4QVv>Q1*uGIwQ*AqsQDwvDjQfT|`-juE_5=p% z8Z&y%JZCc($%3}&>m(`UQt%sf55w9+mcRIVrHc6jE9K=il$oAhn^U941&ss!^KUl( z97yV(#?5K@*PGWZbQ|yBWOO9h~YySNPq~cnmQJwZuj@DRVE!r*u79rp_h?2es#j5kxaJ;rYC&0-T+aW8=DQ6 zlwz1rl>5Ox3a-ut%;>d2`F|s)&(t}{R|@5b^hRMoaCiJ$(qr`c zzH8#AXlOsko@YvRs^-30VZSMuqYardMM=>W;7YRD3 z((HB>a2^z)*?Dg?!>UA8J4=*{rVE zB6!^vd%m~qSc2;64OmBOhj1@ehf$t^iEXa6moDIyRYH34PU8r?iX#_p&|2XVEz{9X zyH=%&Py14T;a;*BEQzcmiNy`I6`EQ<^eUrrS~!_1QY!|b^8jU;>$FO=iVjgr14mgK zz2hjm51t$Bg+Lov782hr{MxECq!+4l=HZStS%P2-B`Ql$U;o z#pgN6{rzA|QBttsutU=B!w9i?z3LapmD2CWOfCmbUNzlqiR0rPM z1)2nd_m^Fvnyd_ZT$5E64zie&Ri=koEY)uhc4koAA8N5sNdZWh&8UkiL(fJsFRHBB zOsBMI=9`)v%+)|wnOiIO7`EIrgplw4@Bc}*HM|%1O2#}OVAtUNQnF^w#mWpjUZ+iJ zo>$(0<254O7KNJ&b*L7|uHAO|Vc%sSN|Nnu;4QFh>w6!*4xiEX9!K>^RHR&HWZD5Z zF^0Jb;K;*489MX& z0owpZXd#Gf6Z`x&ErOHxPzOQC949}x)<#yh&JnBU5e}Mq`6kMO)=kcX-7#0zo(w25 z*k^D7Sa<*>1}A(*2hqsj=20}53%(jeqsr$KEgK1V!O$}A7}>TioaHah+-O^|O7>Zx z;A=mAkoSDwRtXE~D4hK^Cur)jcTMb>n^}=VS~YK2T`{;V`$mSGO^P-hokHfdm9S{n zb?3g$^#jr_Dy(PO0|9H1L#m`Ybnn=hP86k~Rx_|`e*vS_oB$Mac^_4i#+sc4wh>IL zdTHoaYq&d7JkTV;z|)Fq_?>CxETG2y0Pru*-f2DLTM&r_ z&-MJyh<NIiEDDE_k*)9l*aV8hOp@(E@ojine@RPAhqrAGiC3{qRbo=3nwhem)`*+ zj|7H*>w5;w&y4!tY4FT%dc$qeP*%j;dksus4>0V3KWMZOgYTs&tL0Rjt@&Erko4JJ zN_JJTnGTe$y=(W%bRTO(IUB^i!8X(+lI_nj?ui@bse{a7M5(1UAjX1GFyoE6q;2BG n&tu7!VF0}1!N>DVC;Hu_HYv>Oe8-*v_K$6x6#qW}00960o7pi+ literal 0 HcmV?d00001 diff --git a/util/eventbus/assets/main.html b/util/eventbus/assets/main.html new file mode 100644 index 000000000..51d6b22ad --- /dev/null +++ b/util/eventbus/assets/main.html @@ -0,0 +1,97 @@ + + + + + + + + +

    Event bus

    + +
    +

    General

    + {{with $.PublishQueue}} + {{len .}} pending + {{end}} + + +
    + +
    +

    Clients

    + + + + + + + + + + + {{range .Clients}} + + + + + + + {{end}} +
    NamePublishingSubscribingPending
    {{.Name}} +
      + {{range .Publish}} +
    • {{.}}
    • + {{end}} +
    +
    +
      + {{range .Subscribe}} +
    • {{.}}
    • + {{end}} +
    +
    + {{len ($.SubscribeQueue .Client)}} +
    +
    + +
    +

    Types

    + + {{range .Types}} + +
    +

    {{.Name}}

    +

    Definition

    + {{prettyPrintStruct .}} + +

    Published by:

    + {{if len (.Publish)}} +
      + {{range .Publish}} +
    • {{.Name}}
    • + {{end}} +
    + {{else}} +
      +
    • No publishers.
    • +
    + {{end}} + +

    Received by:

    + {{if len (.Subscribe)}} +
      + {{range .Subscribe}} +
    • {{.Name}}
    • + {{end}} +
    + {{else}} +
      +
    • No subscribers.
    • +
    + {{end}} +
    + {{end}} + +
    + + diff --git a/util/eventbus/assets/monitor.html b/util/eventbus/assets/monitor.html new file mode 100644 index 000000000..1af5bdce6 --- /dev/null +++ b/util/eventbus/assets/monitor.html @@ -0,0 +1,5 @@ +
    +
      +
    + +
    diff --git a/util/eventbus/assets/style.css b/util/eventbus/assets/style.css new file mode 100644 index 000000000..690bd4f17 --- /dev/null +++ b/util/eventbus/assets/style.css @@ -0,0 +1,90 @@ +/* CSS reset, thanks Josh Comeau: https://www.joshwcomeau.com/css/custom-css-reset/ */ +*, *::before, *::after { box-sizing: border-box; } +* { margin: 0; } +input, button, textarea, select { font: inherit; } +p, h1, h2, h3, h4, h5, h6 { overflow-wrap: break-word; } +p { text-wrap: pretty; } +h1, h2, h3, h4, h5, h6 { text-wrap: balance; } +#root, #__next { isolation: isolate; } +body { + line-height: 1.5; + -webkit-font-smoothing: antialiased; +} +img, picture, video, canvas, svg { + display: block; + max-width: 100%; +} + +/* Local styling begins */ + +body { + padding: 12px; +} + +div { + width: 100%; +} + +section { + display: flex; + flex-direction: column; + flex-gap: 6px; + align-items: flex-start; + padding: 12px 0; +} + +section > * { + margin-left: 24px; +} + +section > h2, section > h3 { + margin-left: 0; + padding-bottom: 6px; + padding-top: 12px; +} + +details { + padding-bottom: 12px; +} + +table { + table-layout: fixed; + width: calc(100% - 48px); + border-collapse: collapse; + border: 1px solid black; +} + +th, td { + padding: 12px; + border: 1px solid black; +} + +td.list { + vertical-align: top; +} + +ul { + list-style: none; +} + +td ul { + margin: 0; + padding: 0; +} + +code { + padding: 12px; + white-space: pre; +} + +#monitor { + width: calc(100% - 48px); + resize: vertical; + padding: 12px; + overflow: scroll; + height: 15lh; + border: 1px inset; + min-height: 1em; + display: flex; + flex-direction: column-reverse; +} diff --git a/util/eventbus/bus.go b/util/eventbus/bus.go index 96cafc98b..45d12da2f 100644 --- a/util/eventbus/bus.go +++ b/util/eventbus/bus.go @@ -73,8 +73,8 @@ func (b *Bus) Client(name string) *Client { } // Debugger returns the debugging facility for the bus. -func (b *Bus) Debugger() Debugger { - return Debugger{b} +func (b *Bus) Debugger() *Debugger { + return &Debugger{b} } // Close closes the bus. Implicitly closes all clients, publishers and diff --git a/util/eventbus/debug.go b/util/eventbus/debug.go index 31123e6ba..832d72ac0 100644 --- a/util/eventbus/debug.go +++ b/util/eventbus/debug.go @@ -4,11 +4,14 @@ package eventbus import ( + "cmp" "fmt" "reflect" "slices" "sync" "sync/atomic" + + "tailscale.com/tsweb" ) // A Debugger offers access to a bus's privileged introspection and @@ -29,7 +32,11 @@ type Debugger struct { // Clients returns a list of all clients attached to the bus. func (d *Debugger) Clients() []*Client { - return d.bus.listClients() + ret := d.bus.listClients() + slices.SortFunc(ret, func(a, b *Client) int { + return cmp.Compare(a.Name(), b.Name()) + }) + return ret } // PublishQueue returns the contents of the publish queue. @@ -130,6 +137,8 @@ func (d *Debugger) SubscribeTypes(client *Client) []reflect.Type { return client.subscribeTypes() } +func (d *Debugger) RegisterHTTP(td *tsweb.DebugHandler) { registerHTTPDebugger(d, td) } + // A hook collects hook functions that can be run as a group. type hook[T any] struct { sync.Mutex diff --git a/util/eventbus/debughttp.go b/util/eventbus/debughttp.go new file mode 100644 index 000000000..bbd929efb --- /dev/null +++ b/util/eventbus/debughttp.go @@ -0,0 +1,238 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus + +import ( + "bytes" + "cmp" + "embed" + "fmt" + "html/template" + "io" + "io/fs" + "log" + "net/http" + "path/filepath" + "reflect" + "slices" + "strings" + "sync" + + "github.com/coder/websocket" + "tailscale.com/tsweb" +) + +type httpDebugger struct { + *Debugger +} + +func registerHTTPDebugger(d *Debugger, td *tsweb.DebugHandler) { + dh := httpDebugger{d} + td.Handle("bus", "Event bus", dh) + td.HandleSilent("bus/monitor", http.HandlerFunc(dh.serveMonitor)) + td.HandleSilent("bus/style.css", serveStatic("style.css")) + td.HandleSilent("bus/htmx.min.js", serveStatic("htmx.min.js.gz")) + td.HandleSilent("bus/htmx-websocket.min.js", serveStatic("htmx-websocket.min.js.gz")) +} + +//go:embed assets/*.html +var templatesSrc embed.FS + +var templates = sync.OnceValue(func() *template.Template { + d, err := fs.Sub(templatesSrc, "assets") + if err != nil { + panic(fmt.Errorf("getting eventbus debughttp templates subdir: %w", err)) + } + ret := template.New("").Funcs(map[string]any{ + "prettyPrintStruct": prettyPrintStruct, + }) + return template.Must(ret.ParseFS(d, "*")) +}) + +//go:generate go run fetch-htmx.go + +//go:embed assets/*.css assets/*.min.js.gz +var static embed.FS + +func serveStatic(name string) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch { + case strings.HasSuffix(name, ".css"): + w.Header().Set("Content-Type", "text/css") + case strings.HasSuffix(name, ".min.js.gz"): + w.Header().Set("Content-Type", "text/javascript") + w.Header().Set("Content-Encoding", "gzip") + case strings.HasSuffix(name, ".js"): + w.Header().Set("Content-Type", "text/javascript") + default: + http.Error(w, "not found", http.StatusNotFound) + return + } + + f, err := static.Open(filepath.Join("assets", name)) + if err != nil { + http.Error(w, fmt.Sprintf("opening asset: %v", err), http.StatusInternalServerError) + return + } + defer f.Close() + if _, err := io.Copy(w, f); err != nil { + http.Error(w, fmt.Sprintf("serving asset: %v", err), http.StatusInternalServerError) + return + } + }) +} + +func render(w http.ResponseWriter, name string, data any) { + err := templates().ExecuteTemplate(w, name+".html", data) + if err != nil { + err := fmt.Errorf("rendering template: %v", err) + log.Print(err) + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +func (h httpDebugger) ServeHTTP(w http.ResponseWriter, r *http.Request) { + type clientInfo struct { + *Client + Publish []reflect.Type + Subscribe []reflect.Type + } + type typeInfo struct { + reflect.Type + Publish []*Client + Subscribe []*Client + } + type info struct { + *Debugger + Clients map[string]*clientInfo + Types map[string]*typeInfo + } + + data := info{ + Debugger: h.Debugger, + Clients: map[string]*clientInfo{}, + Types: map[string]*typeInfo{}, + } + + getTypeInfo := func(t reflect.Type) *typeInfo { + if data.Types[t.Name()] == nil { + data.Types[t.Name()] = &typeInfo{ + Type: t, + } + } + return data.Types[t.Name()] + } + + for _, c := range h.Clients() { + ci := &clientInfo{ + Client: c, + Publish: h.PublishTypes(c), + Subscribe: h.SubscribeTypes(c), + } + slices.SortFunc(ci.Publish, func(a, b reflect.Type) int { return cmp.Compare(a.Name(), b.Name()) }) + slices.SortFunc(ci.Subscribe, func(a, b reflect.Type) int { return cmp.Compare(a.Name(), b.Name()) }) + data.Clients[c.Name()] = ci + + for _, t := range ci.Publish { + ti := getTypeInfo(t) + ti.Publish = append(ti.Publish, c) + } + for _, t := range ci.Subscribe { + ti := getTypeInfo(t) + ti.Subscribe = append(ti.Subscribe, c) + } + } + + render(w, "main", data) +} + +func (h httpDebugger) serveMonitor(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("Upgrade") == "websocket" { + h.serveMonitorStream(w, r) + return + } + + render(w, "monitor", nil) +} + +func (h httpDebugger) serveMonitorStream(w http.ResponseWriter, r *http.Request) { + conn, err := websocket.Accept(w, r, nil) + if err != nil { + return + } + defer conn.CloseNow() + wsCtx := conn.CloseRead(r.Context()) + + mon := h.WatchBus() + defer mon.Close() + + i := 0 + for { + select { + case <-r.Context().Done(): + return + case <-wsCtx.Done(): + return + case <-mon.Done(): + return + case event := <-mon.Events(): + msg, err := conn.Writer(r.Context(), websocket.MessageText) + if err != nil { + return + } + data := map[string]any{ + "Count": i, + "Type": reflect.TypeOf(event.Event), + "Event": event, + } + i++ + if err := templates().ExecuteTemplate(msg, "event.html", data); err != nil { + log.Println(err) + return + } + if err := msg.Close(); err != nil { + return + } + } + } +} + +func prettyPrintStruct(t reflect.Type) string { + if t.Kind() != reflect.Struct { + return t.String() + } + var rec func(io.Writer, int, reflect.Type) + rec = func(out io.Writer, indent int, t reflect.Type) { + ind := strings.Repeat(" ", indent) + fmt.Fprintf(out, "%s", t.String()) + fs := collectFields(t) + if len(fs) > 0 { + io.WriteString(out, " {\n") + for _, f := range fs { + fmt.Fprintf(out, "%s %s ", ind, f.Name) + if f.Type.Kind() == reflect.Struct { + rec(out, indent+1, f.Type) + } else { + fmt.Fprint(out, f.Type) + } + io.WriteString(out, "\n") + } + fmt.Fprintf(out, "%s}", ind) + } + } + + var ret bytes.Buffer + rec(&ret, 0, t) + return ret.String() +} + +func collectFields(t reflect.Type) (ret []reflect.StructField) { + for _, f := range reflect.VisibleFields(t) { + if !f.IsExported() { + continue + } + ret = append(ret, f) + } + return ret +} diff --git a/util/eventbus/fetch-htmx.go b/util/eventbus/fetch-htmx.go new file mode 100644 index 000000000..f80d50257 --- /dev/null +++ b/util/eventbus/fetch-htmx.go @@ -0,0 +1,93 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ignore + +// Program fetch-htmx fetches and installs local copies of the HTMX +// library and its dependencies, used by the debug UI. It is meant to +// be run via go generate. +package main + +import ( + "compress/gzip" + "crypto/sha512" + "encoding/base64" + "fmt" + "io" + "log" + "net/http" + "os" +) + +func main() { + // Hash from https://htmx.org/docs/#installing + htmx, err := fetchHashed("https://unpkg.com/htmx.org@2.0.4", "HGfztofotfshcF7+8n44JQL2oJmowVChPTg48S+jvZoztPfvwD79OC/LTtG6dMp+") + if err != nil { + log.Fatalf("fetching htmx: %v", err) + } + + // Hash SHOULD be from https://htmx.org/extensions/ws/ , but the + // hash is currently incorrect, see + // https://github.com/bigskysoftware/htmx-extensions/issues/153 + // + // Until that bug is resolved, hash was obtained by rebuilding the + // extension from git source, and verifying that the hash matches + // what unpkg is serving. + ws, err := fetchHashed("https://unpkg.com/htmx-ext-ws@2.0.2", "932iIqjARv+Gy0+r6RTGrfCkCKS5MsF539Iqf6Vt8L4YmbnnWI2DSFoMD90bvXd0") + if err != nil { + log.Fatalf("fetching htmx-websockets: %v", err) + } + + if err := writeGz("assets/htmx.min.js.gz", htmx); err != nil { + log.Fatalf("writing htmx.min.js.gz: %v", err) + } + if err := writeGz("assets/htmx-websocket.min.js.gz", ws); err != nil { + log.Fatalf("writing htmx-websocket.min.js.gz: %v", err) + } +} + +func writeGz(path string, bs []byte) error { + f, err := os.Create(path) + if err != nil { + return err + } + defer f.Close() + + g, err := gzip.NewWriterLevel(f, gzip.BestCompression) + if err != nil { + return err + } + + if _, err := g.Write(bs); err != nil { + return err + } + + if err := g.Flush(); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + return nil +} + +func fetchHashed(url, wantHash string) ([]byte, error) { + resp, err := http.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("fetching %q returned error status: %s", url, resp.Status) + } + ret, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("reading file from %q: %v", url, err) + } + h := sha512.Sum384(ret) + got := base64.StdEncoding.EncodeToString(h[:]) + if got != wantHash { + return nil, fmt.Errorf("wrong hash for %q: got %q, want %q", url, got, wantHash) + } + return ret, nil +} From 6d217d81d166b1355f197f1feaba6f99598c82cc Mon Sep 17 00:00:00 2001 From: David Anderson Date: Fri, 7 Mar 2025 09:49:09 -0800 Subject: [PATCH 66/87] util/eventbus: add a helper program for bus development The demo program generates a stream of made up bus events between a number of bus actors, as a way to generate some interesting activity to show on the bus debug page. Signed-off-by: David Anderson --- util/eventbus/debug-demo/main.go | 103 +++++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) create mode 100644 util/eventbus/debug-demo/main.go diff --git a/util/eventbus/debug-demo/main.go b/util/eventbus/debug-demo/main.go new file mode 100644 index 000000000..a6d232d88 --- /dev/null +++ b/util/eventbus/debug-demo/main.go @@ -0,0 +1,103 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// debug-demo is a program that serves a bus's debug interface over +// HTTP, then generates some fake traffic from a handful of +// clients. It is an aid to development, to have something to present +// on the debug interfaces while writing them. +package main + +import ( + "log" + "math/rand/v2" + "net/http" + "net/netip" + "time" + + "tailscale.com/tsweb" + "tailscale.com/types/key" + "tailscale.com/util/eventbus" +) + +func main() { + b := eventbus.New() + c := b.Client("RouteMonitor") + go testPub[RouteAdded](c, 5*time.Second) + go testPub[RouteRemoved](c, 5*time.Second) + c = b.Client("ControlClient") + go testPub[PeerAdded](c, 3*time.Second) + go testPub[PeerRemoved](c, 6*time.Second) + c = b.Client("Portmapper") + go testPub[PortmapAcquired](c, 10*time.Second) + go testPub[PortmapLost](c, 15*time.Second) + go testSub[RouteAdded](c) + c = b.Client("WireguardConfig") + go testSub[PeerAdded](c) + go testSub[PeerRemoved](c) + c = b.Client("Magicsock") + go testPub[PeerPathChanged](c, 5*time.Second) + go testSub[RouteAdded](c) + go testSub[RouteRemoved](c) + go testSub[PortmapAcquired](c) + go testSub[PortmapLost](c) + + m := http.NewServeMux() + d := tsweb.Debugger(m) + b.Debugger().RegisterHTTP(d) + + m.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + http.Redirect(w, r, "/debug/bus", http.StatusFound) + }) + log.Printf("Serving debug interface at http://localhost:8185/debug/bus") + http.ListenAndServe(":8185", m) +} + +func testPub[T any](c *eventbus.Client, every time.Duration) { + p := eventbus.Publish[T](c) + for { + jitter := time.Duration(rand.N(2000)) * time.Millisecond + time.Sleep(jitter) + var zero T + log.Printf("%s publish: %T", c.Name(), zero) + p.Publish(zero) + time.Sleep(every) + } +} + +func testSub[T any](c *eventbus.Client) { + s := eventbus.Subscribe[T](c) + for v := range s.Events() { + log.Printf("%s received: %T", c.Name(), v) + } +} + +type RouteAdded struct { + Prefix netip.Prefix + Via netip.Addr + Priority int +} +type RouteRemoved struct { + Prefix netip.Addr +} + +type PeerAdded struct { + ID int + Key key.NodePublic +} +type PeerRemoved struct { + ID int + Key key.NodePublic +} + +type PortmapAcquired struct { + Endpoint netip.Addr +} +type PortmapLost struct { + Endpoint netip.Addr +} + +type PeerPathChanged struct { + ID int + EndpointID int + Quality int +} From 45ecc0f85a96d09b4a0ca9839b2598314ad7ac34 Mon Sep 17 00:00:00 2001 From: Will Norris Date: Wed, 12 Mar 2025 15:00:26 -0700 Subject: [PATCH 67/87] tsweb: add title to DebugHandler and helper registration methods Allow customizing the title on the debug index page. Also add methods for registering http.HandlerFunc to make it a little easier on callers. Updates tailscale/corp#27058 Change-Id: Ia101a4a3005adb9118051b3416f5a64a4a45987d Signed-off-by: Will Norris --- tsweb/debug.go | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/tsweb/debug.go b/tsweb/debug.go index 9e6ce4df4..843324482 100644 --- a/tsweb/debug.go +++ b/tsweb/debug.go @@ -34,6 +34,7 @@ type DebugHandler struct { kvs []func(io.Writer) // output one
  • ...
  • each, see KV() urls []string // one
  • ...
  • block with link each sections []func(io.Writer, *http.Request) // invoked in registration order prior to outputting + title string // title displayed on index page } // Debugger returns the DebugHandler registered on mux at /debug/, @@ -44,7 +45,8 @@ func Debugger(mux *http.ServeMux) *DebugHandler { return d } ret := &DebugHandler{ - mux: mux, + mux: mux, + title: fmt.Sprintf("%s debug", version.CmdName()), } mux.Handle("/debug/", ret) @@ -85,7 +87,7 @@ func (d *DebugHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { AddBrowserHeaders(w) f := func(format string, args ...any) { fmt.Fprintf(w, format, args...) } - f("

    %s debug

      ", version.CmdName()) + f("

      %s

        ", html.EscapeString(d.title)) for _, kv := range d.kvs { kv(w) } @@ -103,14 +105,20 @@ func (d *DebugHandler) handle(slug string, handler http.Handler) string { return href } -// Handle registers handler at /debug/ and creates a descriptive -// entry in /debug/ for it. +// Handle registers handler at /debug/ and adds a link to it +// on /debug/ with the provided description. func (d *DebugHandler) Handle(slug, desc string, handler http.Handler) { href := d.handle(slug, handler) d.URL(href, desc) } -// HandleSilent registers handler at /debug/. It does not create +// Handle registers handler at /debug/ and adds a link to it +// on /debug/ with the provided description. +func (d *DebugHandler) HandleFunc(slug, desc string, handler http.HandlerFunc) { + d.Handle(slug, desc, handler) +} + +// HandleSilent registers handler at /debug/. It does not add // a descriptive entry in /debug/ for it. This should be used // sparingly, for things that need to be registered but would pollute // the list of debug links. @@ -118,6 +126,14 @@ func (d *DebugHandler) HandleSilent(slug string, handler http.Handler) { d.handle(slug, handler) } +// HandleSilent registers handler at /debug/. It does not add +// a descriptive entry in /debug/ for it. This should be used +// sparingly, for things that need to be registered but would pollute +// the list of debug links. +func (d *DebugHandler) HandleSilentFunc(slug string, handler http.HandlerFunc) { + d.HandleSilent(slug, handler) +} + // KV adds a key/value list item to /debug/. func (d *DebugHandler) KV(k string, v any) { val := html.EscapeString(fmt.Sprintf("%v", v)) @@ -149,6 +165,11 @@ func (d *DebugHandler) Section(f func(w io.Writer, r *http.Request)) { d.sections = append(d.sections, f) } +// Title sets the title at the top of the debug page. +func (d *DebugHandler) Title(title string) { + d.title = title +} + func gcHandler(w http.ResponseWriter, r *http.Request) { w.Write([]byte("running GC...\n")) if f, ok := w.(http.Flusher); ok { From cd391b37a6b5bce82943dca32e9de05427c02a72 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Thu, 13 Mar 2025 14:14:03 +0000 Subject: [PATCH 68/87] ipn/ipnlocal, envknob: make it possible to configure the cert client to act in read-only mode (#15250) * ipn/ipnlocal,envknob: add some primitives for HA replica cert share. Add an envknob for configuring an instance's cert store as read-only, so that it does not attempt to issue or renew TLS credentials, only reads them from its cert store. This will be used by the Kubernetes Operator's HA Ingress to enable multiple replicas serving the same HTTPS endpoint to be able to share the same cert. Also some minor refactor to allow adding more tests for cert retrieval logic. Signed-off-by: Irbe Krumina --- envknob/envknob.go | 17 +++++ ipn/ipnlocal/cert.go | 37 ++++++++- ipn/ipnlocal/cert_test.go | 155 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 206 insertions(+), 3 deletions(-) diff --git a/envknob/envknob.go b/envknob/envknob.go index e74bfea71..2662da2b4 100644 --- a/envknob/envknob.go +++ b/envknob/envknob.go @@ -417,6 +417,23 @@ func App() string { return "" } +// IsCertShareReadOnlyMode returns true if this replica should never attempt to +// issue or renew TLS credentials for any of the HTTPS endpoints that it is +// serving. It should only return certs found in its cert store. Currently, +// this is used by the Kubernetes Operator's HA Ingress via VIPServices, where +// multiple Ingress proxy instances serve the same HTTPS endpoint with a shared +// TLS credentials. The TLS credentials should only be issued by one of the +// replicas. +// For HTTPS Ingress the operator and containerboot ensure +// that read-only replicas will not be serving the HTTPS endpoints before there +// is a shared cert available. +func IsCertShareReadOnlyMode() bool { + m := String("TS_CERT_SHARE_MODE") + return m == modeRO +} + +const modeRO = "ro" + // CrashOnUnexpected reports whether the Tailscale client should panic // on unexpected conditions. If TS_DEBUG_CRASH_ON_UNEXPECTED is set, that's // used. Otherwise the default value is true for unstable builds. diff --git a/ipn/ipnlocal/cert.go b/ipn/ipnlocal/cert.go index 4c026a9e7..111dc5a2d 100644 --- a/ipn/ipnlocal/cert.go +++ b/ipn/ipnlocal/cert.go @@ -119,6 +119,9 @@ func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string } if pair, err := getCertPEMCached(cs, domain, now); err == nil { + if envknob.IsCertShareReadOnlyMode() { + return pair, nil + } // If we got here, we have a valid unexpired cert. // Check whether we should start an async renewal. shouldRenew, err := b.shouldStartDomainRenewal(cs, domain, now, pair, minValidity) @@ -134,7 +137,7 @@ func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string if minValidity == 0 { logf("starting async renewal") // Start renewal in the background, return current valid cert. - go b.getCertPEM(context.Background(), cs, logf, traceACME, domain, now, minValidity) + b.goTracker.Go(func() { getCertPEM(context.Background(), b, cs, logf, traceACME, domain, now, minValidity) }) return pair, nil } // If the caller requested a specific validity duration, fall through @@ -142,7 +145,11 @@ func (b *LocalBackend) GetCertPEMWithValidity(ctx context.Context, domain string logf("starting sync renewal") } - pair, err := b.getCertPEM(ctx, cs, logf, traceACME, domain, now, minValidity) + if envknob.IsCertShareReadOnlyMode() { + return nil, fmt.Errorf("retrieving cached TLS certificate failed and cert store is configured in read-only mode, not attempting to issue a new certificate: %w", err) + } + + pair, err := getCertPEM(ctx, b, cs, logf, traceACME, domain, now, minValidity) if err != nil { logf("getCertPEM: %v", err) return nil, err @@ -358,7 +365,29 @@ type certStateStore struct { testRoots *x509.CertPool } +// TLSCertKeyReader is an interface implemented by state stores where it makes +// sense to read the TLS cert and key in a single operation that can be +// distinguished from generic state value reads. Currently this is only implemented +// by the kubestore.Store, which, in some cases, need to read cert and key from a +// non-cached TLS Secret. +type TLSCertKeyReader interface { + ReadTLSCertAndKey(domain string) ([]byte, []byte, error) +} + func (s certStateStore) Read(domain string, now time.Time) (*TLSCertKeyPair, error) { + // If we're using a store that supports atomic reads, use that + if kr, ok := s.StateStore.(TLSCertKeyReader); ok { + cert, key, err := kr.ReadTLSCertAndKey(domain) + if err != nil { + return nil, err + } + if !validCertPEM(domain, key, cert, s.testRoots, now) { + return nil, errCertExpired + } + return &TLSCertKeyPair{CertPEM: cert, KeyPEM: key, Cached: true}, nil + } + + // Otherwise fall back to separate reads certPEM, err := s.ReadState(ipn.StateKey(domain + ".crt")) if err != nil { return nil, err @@ -446,7 +475,9 @@ func getCertPEMCached(cs certStore, domain string, now time.Time) (p *TLSCertKey return cs.Read(domain, now) } -func (b *LocalBackend) getCertPEM(ctx context.Context, cs certStore, logf logger.Logf, traceACME func(any), domain string, now time.Time, minValidity time.Duration) (*TLSCertKeyPair, error) { +// getCertPem checks if a cert needs to be renewed and if so, renews it. +// It can be overridden in tests. +var getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf logger.Logf, traceACME func(any), domain string, now time.Time, minValidity time.Duration) (*TLSCertKeyPair, error) { acmeMu.Lock() defer acmeMu.Unlock() diff --git a/ipn/ipnlocal/cert_test.go b/ipn/ipnlocal/cert_test.go index c77570e87..e2398f670 100644 --- a/ipn/ipnlocal/cert_test.go +++ b/ipn/ipnlocal/cert_test.go @@ -6,6 +6,7 @@ package ipnlocal import ( + "context" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" @@ -14,11 +15,17 @@ import ( "embed" "encoding/pem" "math/big" + "os" + "path/filepath" "testing" "time" "github.com/google/go-cmp/cmp" + "tailscale.com/envknob" "tailscale.com/ipn/store/mem" + "tailscale.com/tstest" + "tailscale.com/types/logger" + "tailscale.com/util/must" ) func TestValidLookingCertDomain(t *testing.T) { @@ -221,3 +228,151 @@ func TestDebugACMEDirectoryURL(t *testing.T) { }) } } + +func TestGetCertPEMWithValidity(t *testing.T) { + const testDomain = "example.com" + b := &LocalBackend{ + store: &mem.Store{}, + varRoot: t.TempDir(), + ctx: context.Background(), + logf: t.Logf, + } + certDir, err := b.certDir() + if err != nil { + t.Fatalf("certDir error: %v", err) + } + if _, err := b.getCertStore(); err != nil { + t.Fatalf("getCertStore error: %v", err) + } + testRoot, err := certTestFS.ReadFile("testdata/rootCA.pem") + if err != nil { + t.Fatal(err) + } + roots := x509.NewCertPool() + if !roots.AppendCertsFromPEM(testRoot) { + t.Fatal("Unable to add test CA to the cert pool") + } + testX509Roots = roots + defer func() { testX509Roots = nil }() + tests := []struct { + name string + now time.Time + // storeCerts is true if the test cert and key should be written to store. + storeCerts bool + readOnlyMode bool // TS_READ_ONLY_CERTS env var + wantAsyncRenewal bool // async issuance should be started + wantIssuance bool // sync issuance should be started + wantErr bool + }{ + { + name: "valid_no_renewal", + now: time.Date(2023, time.February, 20, 0, 0, 0, 0, time.UTC), + storeCerts: true, + wantAsyncRenewal: false, + wantIssuance: false, + wantErr: false, + }, + { + name: "issuance_needed", + now: time.Date(2023, time.February, 20, 0, 0, 0, 0, time.UTC), + storeCerts: false, + wantAsyncRenewal: false, + wantIssuance: true, + wantErr: false, + }, + { + name: "renewal_needed", + now: time.Date(2025, time.May, 1, 0, 0, 0, 0, time.UTC), + storeCerts: true, + wantAsyncRenewal: true, + wantIssuance: false, + wantErr: false, + }, + { + name: "renewal_needed_read_only_mode", + now: time.Date(2025, time.May, 1, 0, 0, 0, 0, time.UTC), + storeCerts: true, + readOnlyMode: true, + wantAsyncRenewal: false, + wantIssuance: false, + wantErr: false, + }, + { + name: "no_certs_read_only_mode", + now: time.Date(2025, time.May, 1, 0, 0, 0, 0, time.UTC), + storeCerts: false, + readOnlyMode: true, + wantAsyncRenewal: false, + wantIssuance: false, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + if tt.readOnlyMode { + envknob.Setenv("TS_CERT_SHARE_MODE", "ro") + } + + os.RemoveAll(certDir) + if tt.storeCerts { + os.MkdirAll(certDir, 0755) + if err := os.WriteFile(filepath.Join(certDir, "example.com.crt"), + must.Get(os.ReadFile("testdata/example.com.pem")), 0644); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(certDir, "example.com.key"), + must.Get(os.ReadFile("testdata/example.com-key.pem")), 0644); err != nil { + t.Fatal(err) + } + } + + b.clock = tstest.NewClock(tstest.ClockOpts{Start: tt.now}) + + allDone := make(chan bool, 1) + defer b.goTracker.AddDoneCallback(func() { + b.mu.Lock() + defer b.mu.Unlock() + if b.goTracker.RunningGoroutines() > 0 { + return + } + select { + case allDone <- true: + default: + } + })() + + // Set to true if get getCertPEM is called. GetCertPEM can be called in a goroutine for async + // renewal or in the main goroutine if issuance is required to obtain valid TLS credentials. + getCertPemWasCalled := false + getCertPEM = func(ctx context.Context, b *LocalBackend, cs certStore, logf logger.Logf, traceACME func(any), domain string, now time.Time, minValidity time.Duration) (*TLSCertKeyPair, error) { + getCertPemWasCalled = true + return nil, nil + } + prevGoRoutines := b.goTracker.StartedGoroutines() + _, err = b.GetCertPEMWithValidity(context.Background(), testDomain, 0) + if (err != nil) != tt.wantErr { + t.Errorf("b.GetCertPemWithValidity got err %v, wants error: '%v'", err, tt.wantErr) + } + // GetCertPEMWithValidity calls getCertPEM in a goroutine if async renewal is needed. That's the + // only goroutine it starts, so this can be used to test if async renewal was started. + gotAsyncRenewal := b.goTracker.StartedGoroutines()-prevGoRoutines != 0 + if gotAsyncRenewal { + select { + case <-time.After(5 * time.Second): + t.Fatal("timed out waiting for goroutines to finish") + case <-allDone: + } + } + // Verify that async renewal was triggered if expected. + if tt.wantAsyncRenewal != gotAsyncRenewal { + t.Fatalf("wants getCertPem to be called async: %v, got called %v", tt.wantAsyncRenewal, gotAsyncRenewal) + } + // Verify that (non-async) issuance was started if expected. + gotIssuance := getCertPemWasCalled && !gotAsyncRenewal + if tt.wantIssuance != gotIssuance { + t.Errorf("wants getCertPem to be called: %v, got called %v", tt.wantIssuance, gotIssuance) + } + }) + } +} From eb680edbcea41342f0fb9659c2f6374c494b34d8 Mon Sep 17 00:00:00 2001 From: Paul Scott <408401+icio@users.noreply.github.com> Date: Thu, 13 Mar 2025 14:21:29 +0000 Subject: [PATCH 69/87] cmd/testwrapper: print failed tests preventing retry (#15270) Updates tailscale/corp#26637 Signed-off-by: Paul Scott --- cmd/testwrapper/testwrapper.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cmd/testwrapper/testwrapper.go b/cmd/testwrapper/testwrapper.go index 1501c7e97..53c1b1d05 100644 --- a/cmd/testwrapper/testwrapper.go +++ b/cmd/testwrapper/testwrapper.go @@ -259,6 +259,7 @@ func main() { fmt.Printf("\n\nAttempt #%d: Retrying flaky tests:\n\nflakytest failures JSON: %s\n\n", thisRun.attempt, j) } + fatalFailures := make(map[string]struct{}) // pkg.Test key toRetry := make(map[string][]*testAttempt) // pkg -> tests to retry for _, pt := range thisRun.tests { ch := make(chan *testAttempt) @@ -301,11 +302,24 @@ func main() { if tr.isMarkedFlaky { toRetry[tr.pkg] = append(toRetry[tr.pkg], tr) } else { + fatalFailures[tr.pkg+"."+tr.testName] = struct{}{} failed = true } } if failed { fmt.Println("\n\nNot retrying flaky tests because non-flaky tests failed.") + + // Print the list of non-flakytest failures. + // We will later analyze the retried GitHub Action runs to see + // if non-flakytest failures succeeded upon retry. This will + // highlight tests which are flaky but not yet flagged as such. + if len(fatalFailures) > 0 { + tests := slicesx.MapKeys(fatalFailures) + sort.Strings(tests) + j, _ := json.Marshal(tests) + fmt.Printf("non-flakytest failures: %s\n", j) + } + fmt.Println() os.Exit(1) } From 06634125592abd2b9c5727ae3cc4116580dab33d Mon Sep 17 00:00:00 2001 From: "M. J. Fromberger" Date: Thu, 13 Mar 2025 08:06:20 -0700 Subject: [PATCH 70/87] util/eventbus: add basic throughput benchmarks (#15284) Shovel small events through the pipeine as fast as possible in a few basic configurations, to establish some baseline performance numbers. Updates #15160 Change-Id: I1dcbbd1109abb7b93aa4dcb70da57f183eb0e60e Signed-off-by: M. J. Fromberger --- util/eventbus/bench_test.go | 125 ++++++++++++++++++++++++++++++++++++ 1 file changed, 125 insertions(+) create mode 100644 util/eventbus/bench_test.go diff --git a/util/eventbus/bench_test.go b/util/eventbus/bench_test.go new file mode 100644 index 000000000..25f5b8002 --- /dev/null +++ b/util/eventbus/bench_test.go @@ -0,0 +1,125 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package eventbus_test + +import ( + "math/rand/v2" + "testing" + + "tailscale.com/util/eventbus" +) + +func BenchmarkBasicThroughput(b *testing.B) { + bus := eventbus.New() + pcli := bus.Client(b.Name() + "-pub") + scli := bus.Client(b.Name() + "-sub") + + type emptyEvent [0]byte + + // One publisher and a corresponding subscriber shoveling events as fast as + // they can through the plumbing. + pub := eventbus.Publish[emptyEvent](pcli) + sub := eventbus.Subscribe[emptyEvent](scli) + + go func() { + for { + select { + case <-sub.Events(): + continue + case <-sub.Done(): + return + } + } + }() + + for b.Loop() { + pub.Publish(emptyEvent{}) + } + bus.Close() +} + +func BenchmarkSubsThroughput(b *testing.B) { + bus := eventbus.New() + pcli := bus.Client(b.Name() + "-pub") + scli1 := bus.Client(b.Name() + "-sub1") + scli2 := bus.Client(b.Name() + "-sub2") + + type emptyEvent [0]byte + + // One publisher and two subscribers shoveling events as fast as they can + // through the plumbing. + pub := eventbus.Publish[emptyEvent](pcli) + sub1 := eventbus.Subscribe[emptyEvent](scli1) + sub2 := eventbus.Subscribe[emptyEvent](scli2) + + for _, sub := range []*eventbus.Subscriber[emptyEvent]{sub1, sub2} { + go func() { + for { + select { + case <-sub.Events(): + continue + case <-sub.Done(): + return + } + } + }() + } + + for b.Loop() { + pub.Publish(emptyEvent{}) + } + bus.Close() +} + +func BenchmarkMultiThroughput(b *testing.B) { + bus := eventbus.New() + cli := bus.Client(b.Name()) + + type eventA struct{} + type eventB struct{} + + // Two disjoint event streams routed through the global order. + apub := eventbus.Publish[eventA](cli) + asub := eventbus.Subscribe[eventA](cli) + bpub := eventbus.Publish[eventB](cli) + bsub := eventbus.Subscribe[eventB](cli) + + go func() { + for { + select { + case <-asub.Events(): + continue + case <-asub.Done(): + return + } + } + }() + go func() { + for { + select { + case <-bsub.Events(): + continue + case <-bsub.Done(): + return + } + } + }() + + var rng uint64 + var bits int + for b.Loop() { + if bits == 0 { + rng = rand.Uint64() + bits = 64 + } + if rng&1 == 0 { + apub.Publish(eventA{}) + } else { + bpub.Publish(eventB{}) + } + rng >>= 1 + bits-- + } + bus.Close() +} From f0b395d851bbca03ad2712571898dbad0f9aad6a Mon Sep 17 00:00:00 2001 From: Patrick O'Doherty Date: Thu, 13 Mar 2025 10:37:42 -0700 Subject: [PATCH 71/87] go.mod update golang.org/x/net to 0.36.0 for govulncheck (#15296) Updates #cleanup Signed-off-by: Patrick O'Doherty --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 970e2e63c..a566c941f 100644 --- a/go.mod +++ b/go.mod @@ -97,7 +97,7 @@ require ( golang.org/x/crypto v0.35.0 golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac golang.org/x/mod v0.23.0 - golang.org/x/net v0.35.0 + golang.org/x/net v0.36.0 golang.org/x/oauth2 v0.26.0 golang.org/x/sync v0.11.0 golang.org/x/sys v0.30.0 diff --git a/go.sum b/go.sum index 1707effd5..528e48c16 100644 --- a/go.sum +++ b/go.sum @@ -1135,8 +1135,8 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= -golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= +golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA= +golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= From 8b1e7f646ee4730ad06c9b70c13e7861b964949b Mon Sep 17 00:00:00 2001 From: Jordan Whited Date: Thu, 13 Mar 2025 13:33:26 -0700 Subject: [PATCH 72/87] net/packet: implement Geneve header serialization (#15301) Updates tailscale/corp#27100 Signed-off-by: Jordan Whited --- net/packet/geneve.go | 104 ++++++++++++++++++++++++++++++++++++++ net/packet/geneve_test.go | 32 ++++++++++++ 2 files changed, 136 insertions(+) create mode 100644 net/packet/geneve.go create mode 100644 net/packet/geneve_test.go diff --git a/net/packet/geneve.go b/net/packet/geneve.go new file mode 100644 index 000000000..29970a8fd --- /dev/null +++ b/net/packet/geneve.go @@ -0,0 +1,104 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package packet + +import ( + "encoding/binary" + "errors" + "io" +) + +const ( + // GeneveFixedHeaderLength is the length of the fixed size portion of the + // Geneve header, in bytes. + GeneveFixedHeaderLength = 8 +) + +const ( + // GeneveProtocolDisco is the IEEE 802 Ethertype number used to represent + // the Tailscale Disco protocol in a Geneve header. + GeneveProtocolDisco uint16 = 0x7A11 + // GeneveProtocolWireGuard is the IEEE 802 Ethertype number used to represent the + // WireGuard protocol in a Geneve header. + GeneveProtocolWireGuard uint16 = 0x7A12 +) + +// GeneveHeader represents the fixed size Geneve header from RFC8926. +// TLVs/options are not implemented/supported. +// +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// |Ver| Opt Len |O|C| Rsvd. | Protocol Type | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Virtual Network Identifier (VNI) | Reserved | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +type GeneveHeader struct { + // Ver (2 bits): The current version number is 0. Packets received by a + // tunnel endpoint with an unknown version MUST be dropped. Transit devices + // interpreting Geneve packets with an unknown version number MUST treat + // them as UDP packets with an unknown payload. + Version uint8 + + // Protocol Type (16 bits): The type of protocol data unit appearing after + // the Geneve header. This follows the Ethertype [ETYPES] convention, with + // Ethernet itself being represented by the value 0x6558. + Protocol uint16 + + // Virtual Network Identifier (VNI) (24 bits): An identifier for a unique + // element of a virtual network. In many situations, this may represent an + // L2 segment; however, the control plane defines the forwarding semantics + // of decapsulated packets. The VNI MAY be used as part of ECMP forwarding + // decisions or MAY be used as a mechanism to distinguish between + // overlapping address spaces contained in the encapsulated packet when load + // balancing across CPUs. + VNI uint32 + + // O (1 bit): Control packet. This packet contains a control message. + // Control messages are sent between tunnel endpoints. Tunnel endpoints MUST + // NOT forward the payload, and transit devices MUST NOT attempt to + // interpret it. Since control messages are less frequent, it is RECOMMENDED + // that tunnel endpoints direct these packets to a high-priority control + // queue (for example, to direct the packet to a general purpose CPU from a + // forwarding Application-Specific Integrated Circuit (ASIC) or to separate + // out control traffic on a NIC). Transit devices MUST NOT alter forwarding + // behavior on the basis of this bit, such as ECMP link selection. + Control bool +} + +// Encode encodes GeneveHeader into b. If len(b) < GeneveFixedHeaderLength an +// io.ErrShortBuffer error is returned. +func (h *GeneveHeader) Encode(b []byte) error { + if len(b) < GeneveFixedHeaderLength { + return io.ErrShortBuffer + } + if h.Version > 3 { + return errors.New("version must be <= 3") + } + b[0] = 0 + b[1] = 0 + b[0] |= h.Version << 6 + if h.Control { + b[1] |= 0x80 + } + binary.BigEndian.PutUint16(b[2:], h.Protocol) + if h.VNI > 1<<24-1 { + return errors.New("VNI must be <= 2^24-1") + } + binary.BigEndian.PutUint32(b[4:], h.VNI<<8) + return nil +} + +// Decode decodes GeneveHeader from b. If len(b) < GeneveFixedHeaderLength an +// io.ErrShortBuffer error is returned. +func (h *GeneveHeader) Decode(b []byte) error { + if len(b) < GeneveFixedHeaderLength { + return io.ErrShortBuffer + } + h.Version = b[0] >> 6 + if b[1]&0x80 != 0 { + h.Control = true + } + h.Protocol = binary.BigEndian.Uint16(b[2:]) + h.VNI = binary.BigEndian.Uint32(b[4:]) >> 8 + return nil +} diff --git a/net/packet/geneve_test.go b/net/packet/geneve_test.go new file mode 100644 index 000000000..029638638 --- /dev/null +++ b/net/packet/geneve_test.go @@ -0,0 +1,32 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package packet + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestGeneveHeader(t *testing.T) { + in := GeneveHeader{ + Version: 3, + Protocol: GeneveProtocolDisco, + VNI: 1<<24 - 1, + Control: true, + } + b := make([]byte, GeneveFixedHeaderLength) + err := in.Encode(b) + if err != nil { + t.Fatal(err) + } + out := GeneveHeader{} + err = out.Decode(b) + if err != nil { + t.Fatal(err) + } + if diff := cmp.Diff(out, in); diff != "" { + t.Fatalf("wrong results (-got +want)\n%s", diff) + } +} From 299c5372bd2803bdbecbe7faf9e7112b55ef81d6 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Fri, 14 Mar 2025 17:33:08 +0000 Subject: [PATCH 73/87] cmd/containerboot: manage HA Ingress TLS certs from containerboot (#15303) cmd/containerboot: manage HA Ingress TLS certs from containerboot When ran as HA Ingress node, containerboot now can determine whether it should manage TLS certs for the HA Ingress replicas and call the LocalAPI cert endpoint to ensure initial issuance and renewal of the shared TLS certs. Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- cmd/containerboot/certs.go | 147 ++++++++++++++++++++ cmd/containerboot/certs_test.go | 229 ++++++++++++++++++++++++++++++++ cmd/containerboot/main.go | 2 +- cmd/containerboot/serve.go | 22 ++- cmd/containerboot/serve_test.go | 4 + cmd/containerboot/settings.go | 17 +++ cmd/containerboot/tailscaled.go | 3 + 7 files changed, 419 insertions(+), 5 deletions(-) create mode 100644 cmd/containerboot/certs.go create mode 100644 cmd/containerboot/certs_test.go diff --git a/cmd/containerboot/certs.go b/cmd/containerboot/certs.go new file mode 100644 index 000000000..7af0424a9 --- /dev/null +++ b/cmd/containerboot/certs.go @@ -0,0 +1,147 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package main + +import ( + "context" + "fmt" + "log" + "net" + "sync" + "time" + + "tailscale.com/ipn" + "tailscale.com/util/goroutines" + "tailscale.com/util/mak" +) + +// certManager is responsible for issuing certificates for known domains and for +// maintaining a loop that re-attempts issuance daily. +// Currently cert manager logic is only run on ingress ProxyGroup replicas that are responsible for managing certs for +// HA Ingress HTTPS endpoints ('write' replicas). +type certManager struct { + lc localClient + tracker goroutines.Tracker // tracks running goroutines + mu sync.Mutex // guards the following + // certLoops contains a map of DNS names, for which we currently need to + // manage certs to cancel functions that allow stopping a goroutine when + // we no longer need to manage certs for the DNS name. + certLoops map[string]context.CancelFunc +} + +// ensureCertLoops ensures that, for all currently managed Service HTTPS +// endpoints, there is a cert loop responsible for issuing and ensuring the +// renewal of the TLS certs. +// ServeConfig must not be nil. +func (cm *certManager) ensureCertLoops(ctx context.Context, sc *ipn.ServeConfig) error { + if sc == nil { + return fmt.Errorf("[unexpected] ensureCertLoops called with nil ServeConfig") + } + currentDomains := make(map[string]bool) + const httpsPort = "443" + for _, service := range sc.Services { + for hostPort := range service.Web { + domain, port, err := net.SplitHostPort(string(hostPort)) + if err != nil { + return fmt.Errorf("[unexpected] unable to parse HostPort %s", hostPort) + } + if port != httpsPort { // HA Ingress' HTTP endpoint + continue + } + currentDomains[domain] = true + } + } + cm.mu.Lock() + defer cm.mu.Unlock() + for domain := range currentDomains { + if _, exists := cm.certLoops[domain]; !exists { + cancelCtx, cancel := context.WithCancel(ctx) + mak.Set(&cm.certLoops, domain, cancel) + cm.tracker.Go(func() { cm.runCertLoop(cancelCtx, domain) }) + } + } + + // Stop goroutines for domain names that are no longer in the config. + for domain, cancel := range cm.certLoops { + if !currentDomains[domain] { + cancel() + delete(cm.certLoops, domain) + } + } + return nil +} + +// runCertLoop: +// - calls localAPI certificate endpoint to ensure that certs are issued for the +// given domain name +// - calls localAPI certificate endpoint daily to ensure that certs are renewed +// - if certificate issuance failed retries after an exponential backoff period +// starting at 1 minute and capped at 24 hours. Reset the backoff once issuance succeeds. +// Note that renewal check also happens when the node receives an HTTPS request and it is possible that certs get +// renewed at that point. Renewal here is needed to prevent the shared certs from expiry in edge cases where the 'write' +// replica does not get any HTTPS requests. +// https://letsencrypt.org/docs/integration-guide/#retrying-failures +func (cm *certManager) runCertLoop(ctx context.Context, domain string) { + const ( + normalInterval = 24 * time.Hour // regular renewal check + initialRetry = 1 * time.Minute // initial backoff after a failure + maxRetryInterval = 24 * time.Hour // max backoff period + ) + timer := time.NewTimer(0) // fire off timer immediately + defer timer.Stop() + retryCount := 0 + for { + select { + case <-ctx.Done(): + return + case <-timer.C: + // We call the certificate endpoint, but don't do anything + // with the returned certs here. + // The call to the certificate endpoint will ensure that + // certs are issued/renewed as needed and stored in the + // relevant state store. For example, for HA Ingress + // 'write' replica, the cert and key will be stored in a + // Kubernetes Secret named after the domain for which we + // are issuing. + // Note that renewals triggered by the call to the + // certificates endpoint here and by renewal check + // triggered during a call to node's HTTPS endpoint + // share the same state/renewal lock mechanism, so we + // should not run into redundant issuances during + // concurrent renewal checks. + // TODO(irbekrm): maybe it is worth adding a new + // issuance endpoint that explicitly only triggers + // issuance and stores certs in the relevant store, but + // does not return certs to the caller? + _, _, err := cm.lc.CertPair(ctx, domain) + if err != nil { + log.Printf("error refreshing certificate for %s: %v", domain, err) + } + var nextInterval time.Duration + // TODO(irbekrm): distinguish between LE rate limit + // errors and other error types like transient network + // errors. + if err == nil { + retryCount = 0 + nextInterval = normalInterval + } else { + retryCount++ + // Calculate backoff: initialRetry * 2^(retryCount-1) + // For retryCount=1: 1min * 2^0 = 1min + // For retryCount=2: 1min * 2^1 = 2min + // For retryCount=3: 1min * 2^2 = 4min + backoff := initialRetry * time.Duration(1<<(retryCount-1)) + if backoff > maxRetryInterval { + backoff = maxRetryInterval + } + nextInterval = backoff + log.Printf("Error refreshing certificate for %s (retry %d): %v. Will retry in %v\n", + domain, retryCount, err, nextInterval) + } + timer.Reset(nextInterval) + } + } +} diff --git a/cmd/containerboot/certs_test.go b/cmd/containerboot/certs_test.go new file mode 100644 index 000000000..577311ea3 --- /dev/null +++ b/cmd/containerboot/certs_test.go @@ -0,0 +1,229 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build linux + +package main + +import ( + "context" + "testing" + "time" + + "tailscale.com/ipn" + "tailscale.com/tailcfg" +) + +// TestEnsureCertLoops tests that the certManager correctly starts and stops +// update loops for certs when the serve config changes. It tracks goroutine +// count and uses that as a validator that the expected number of cert loops are +// running. +func TestEnsureCertLoops(t *testing.T) { + tests := []struct { + name string + initialConfig *ipn.ServeConfig + updatedConfig *ipn.ServeConfig + initialGoroutines int64 // after initial serve config is applied + updatedGoroutines int64 // after updated serve config is applied + wantErr bool + }{ + { + name: "empty_serve_config", + initialConfig: &ipn.ServeConfig{}, + initialGoroutines: 0, + }, + { + name: "nil_serve_config", + initialConfig: nil, + initialGoroutines: 0, + wantErr: true, + }, + { + name: "empty_to_one_service", + initialConfig: &ipn.ServeConfig{}, + updatedConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:my-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-app.tailnetxyz.ts.net:443": {}, + }, + }, + }, + }, + initialGoroutines: 0, + updatedGoroutines: 1, + }, + { + name: "single_service", + initialConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:my-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-app.tailnetxyz.ts.net:443": {}, + }, + }, + }, + }, + initialGoroutines: 1, + }, + { + name: "multiple_services", + initialConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:my-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-app.tailnetxyz.ts.net:443": {}, + }, + }, + "svc:my-other-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-other-app.tailnetxyz.ts.net:443": {}, + }, + }, + }, + }, + initialGoroutines: 2, // one loop per domain across all services + }, + { + name: "ignore_non_https_ports", + initialConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:my-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-app.tailnetxyz.ts.net:443": {}, + "my-app.tailnetxyz.ts.net:80": {}, + }, + }, + }, + }, + initialGoroutines: 1, // only one loop for the 443 endpoint + }, + { + name: "remove_domain", + initialConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:my-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-app.tailnetxyz.ts.net:443": {}, + }, + }, + "svc:my-other-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-other-app.tailnetxyz.ts.net:443": {}, + }, + }, + }, + }, + updatedConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:my-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-app.tailnetxyz.ts.net:443": {}, + }, + }, + }, + }, + initialGoroutines: 2, // initially two loops (one per service) + updatedGoroutines: 1, // one loop after removing service2 + }, + { + name: "add_domain", + initialConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:my-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-app.tailnetxyz.ts.net:443": {}, + }, + }, + }, + }, + updatedConfig: &ipn.ServeConfig{ + Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{ + "svc:my-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-app.tailnetxyz.ts.net:443": {}, + }, + }, + "svc:my-other-app": { + Web: map[ipn.HostPort]*ipn.WebServerConfig{ + "my-other-app.tailnetxyz.ts.net:443": {}, + }, + }, + }, + }, + initialGoroutines: 1, + updatedGoroutines: 2, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cm := &certManager{ + lc: &fakeLocalClient{}, + certLoops: make(map[string]context.CancelFunc), + } + + allDone := make(chan bool, 1) + defer cm.tracker.AddDoneCallback(func() { + cm.mu.Lock() + defer cm.mu.Unlock() + if cm.tracker.RunningGoroutines() > 0 { + return + } + select { + case allDone <- true: + default: + } + })() + + err := cm.ensureCertLoops(ctx, tt.initialConfig) + if (err != nil) != tt.wantErr { + t.Fatalf("ensureCertLoops() error = %v", err) + } + + if got := cm.tracker.RunningGoroutines(); got != tt.initialGoroutines { + t.Errorf("after initial config: got %d running goroutines, want %d", got, tt.initialGoroutines) + } + + if tt.updatedConfig != nil { + if err := cm.ensureCertLoops(ctx, tt.updatedConfig); err != nil { + t.Fatalf("ensureCertLoops() error on update = %v", err) + } + + // Although starting goroutines and cancelling + // the context happens in the main goroutine, it + // the actual goroutine exit when a context is + // cancelled does not- so wait for a bit for the + // running goroutine count to reach the expected + // number. + deadline := time.After(5 * time.Second) + for { + if got := cm.tracker.RunningGoroutines(); got == tt.updatedGoroutines { + break + } + select { + case <-deadline: + t.Fatalf("timed out waiting for goroutine count to reach %d, currently at %d", + tt.updatedGoroutines, cm.tracker.RunningGoroutines()) + case <-time.After(10 * time.Millisecond): + continue + } + } + } + + if tt.updatedGoroutines == 0 { + return // no goroutines to wait for + } + // cancel context to make goroutines exit + cancel() + select { + case <-time.After(5 * time.Second): + t.Fatal("timed out waiting for goroutine to finish") + case <-allDone: + } + }) + } +} diff --git a/cmd/containerboot/main.go b/cmd/containerboot/main.go index cf4bd8620..5f8052bb9 100644 --- a/cmd/containerboot/main.go +++ b/cmd/containerboot/main.go @@ -646,7 +646,7 @@ runLoop: if cfg.ServeConfigPath != "" { triggerWatchServeConfigChanges.Do(func() { - go watchServeConfigChanges(ctx, cfg.ServeConfigPath, certDomainChanged, certDomain, client, kc) + go watchServeConfigChanges(ctx, certDomainChanged, certDomain, client, kc, cfg) }) } diff --git a/cmd/containerboot/serve.go b/cmd/containerboot/serve.go index 4ea5a9c46..37fd49777 100644 --- a/cmd/containerboot/serve.go +++ b/cmd/containerboot/serve.go @@ -28,10 +28,11 @@ import ( // applies it to lc. It exits when ctx is canceled. cdChanged is a channel that // is written to when the certDomain changes, causing the serve config to be // re-read and applied. -func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan bool, certDomainAtomic *atomic.Pointer[string], lc *local.Client, kc *kubeClient) { +func watchServeConfigChanges(ctx context.Context, cdChanged <-chan bool, certDomainAtomic *atomic.Pointer[string], lc *local.Client, kc *kubeClient, cfg *settings) { if certDomainAtomic == nil { panic("certDomainAtomic must not be nil") } + var tickChan <-chan time.Time var eventChan <-chan fsnotify.Event if w, err := fsnotify.NewWatcher(); err != nil { @@ -43,7 +44,7 @@ func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan tickChan = ticker.C } else { defer w.Close() - if err := w.Add(filepath.Dir(path)); err != nil { + if err := w.Add(filepath.Dir(cfg.ServeConfigPath)); err != nil { log.Fatalf("serve proxy: failed to add fsnotify watch: %v", err) } eventChan = w.Events @@ -51,6 +52,12 @@ func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan var certDomain string var prevServeConfig *ipn.ServeConfig + var cm certManager + if cfg.CertShareMode == "rw" { + cm = certManager{ + lc: lc, + } + } for { select { case <-ctx.Done(): @@ -63,12 +70,12 @@ func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan // k8s handles these mounts. So just re-read the file and apply it // if it's changed. } - sc, err := readServeConfig(path, certDomain) + sc, err := readServeConfig(cfg.ServeConfigPath, certDomain) if err != nil { log.Fatalf("serve proxy: failed to read serve config: %v", err) } if sc == nil { - log.Printf("serve proxy: no serve config at %q, skipping", path) + log.Printf("serve proxy: no serve config at %q, skipping", cfg.ServeConfigPath) continue } if prevServeConfig != nil && reflect.DeepEqual(sc, prevServeConfig) { @@ -83,6 +90,12 @@ func watchServeConfigChanges(ctx context.Context, path string, cdChanged <-chan } } prevServeConfig = sc + if cfg.CertShareMode != "rw" { + continue + } + if err := cm.ensureCertLoops(ctx, sc); err != nil { + log.Fatalf("serve proxy: error ensuring cert loops: %v", err) + } } } @@ -96,6 +109,7 @@ func certDomainFromNetmap(nm *netmap.NetworkMap) string { // localClient is a subset of [local.Client] that can be mocked for testing. type localClient interface { SetServeConfig(context.Context, *ipn.ServeConfig) error + CertPair(context.Context, string) ([]byte, []byte, error) } func updateServeConfig(ctx context.Context, sc *ipn.ServeConfig, certDomain string, lc localClient) error { diff --git a/cmd/containerboot/serve_test.go b/cmd/containerboot/serve_test.go index eb92a8dc8..fc18f254d 100644 --- a/cmd/containerboot/serve_test.go +++ b/cmd/containerboot/serve_test.go @@ -206,6 +206,10 @@ func (m *fakeLocalClient) SetServeConfig(ctx context.Context, cfg *ipn.ServeConf return nil } +func (m *fakeLocalClient) CertPair(ctx context.Context, domain string) (certPEM, keyPEM []byte, err error) { + return nil, nil, nil +} + func TestHasHTTPSEndpoint(t *testing.T) { tests := []struct { name string diff --git a/cmd/containerboot/settings.go b/cmd/containerboot/settings.go index 0da18e52c..c62db5340 100644 --- a/cmd/containerboot/settings.go +++ b/cmd/containerboot/settings.go @@ -74,6 +74,12 @@ type settings struct { HealthCheckEnabled bool DebugAddrPort string EgressProxiesCfgPath string + // CertShareMode is set for Kubernetes Pods running cert share mode. + // Possible values are empty (containerboot doesn't run any certs + // logic), 'ro' (for Pods that shold never attempt to issue/renew + // certs) and 'rw' for Pods that should manage the TLS certs shared + // amongst the replicas. + CertShareMode string } func configFromEnv() (*settings, error) { @@ -128,6 +134,17 @@ func configFromEnv() (*settings, error) { cfg.PodIPv6 = parsed.String() } } + // If cert share is enabled, set the replica as read or write. Only 0th + // replica should be able to write. + isInCertShareMode := defaultBool("TS_EXPERIMENTAL_CERT_SHARE", false) + if isInCertShareMode { + cfg.CertShareMode = "ro" + podName := os.Getenv("POD_NAME") + if strings.HasSuffix(podName, "-0") { + cfg.CertShareMode = "rw" + } + } + if err := cfg.validate(); err != nil { return nil, fmt.Errorf("invalid configuration: %v", err) } diff --git a/cmd/containerboot/tailscaled.go b/cmd/containerboot/tailscaled.go index 01ee96d3a..654b34757 100644 --- a/cmd/containerboot/tailscaled.go +++ b/cmd/containerboot/tailscaled.go @@ -33,6 +33,9 @@ func startTailscaled(ctx context.Context, cfg *settings) (*local.Client, *os.Pro cmd.SysProcAttr = &syscall.SysProcAttr{ Setpgid: true, } + if cfg.CertShareMode != "" { + cmd.Env = append(os.Environ(), "TS_CERT_SHARE_MODE="+cfg.CertShareMode) + } log.Printf("Starting tailscaled") if err := cmd.Start(); err != nil { return nil, nil, fmt.Errorf("starting tailscaled failed: %v", err) From 3a4b62227654029384006b264ee21a9ab0e2d54b Mon Sep 17 00:00:00 2001 From: Andrew Lytvynov Date: Fri, 14 Mar 2025 12:30:29 -0700 Subject: [PATCH 74/87] .github/workflows/govulncheck.yml: send messages to another channel (#15295) Updates #cleanup Signed-off-by: Andrew Lytvynov --- .github/workflows/govulncheck.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index 47d278e1c..10269ff0b 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -30,7 +30,7 @@ jobs: token: ${{ secrets.GOVULNCHECK_BOT_TOKEN }} payload: | { - "channel": "C05PXRM304B", + "channel": "C08FGKZCQTW", "blocks": [ { "type": "section", From 27ef9b666cd23c2ad5acb27c4f87294228219305 Mon Sep 17 00:00:00 2001 From: James Sanderson Date: Fri, 7 Mar 2025 15:07:00 +0000 Subject: [PATCH 75/87] ipn/ipnlocal: add test for CapMap packet filters Updates tailscale/corp#20514 Signed-off-by: James Sanderson --- ipn/ipnlocal/local_test.go | 59 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) diff --git a/ipn/ipnlocal/local_test.go b/ipn/ipnlocal/local_test.go index 35977e679..aa9137275 100644 --- a/ipn/ipnlocal/local_test.go +++ b/ipn/ipnlocal/local_test.go @@ -44,6 +44,7 @@ import ( "tailscale.com/tsd" "tailscale.com/tstest" "tailscale.com/types/dnstype" + "tailscale.com/types/ipproto" "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/types/logid" @@ -60,6 +61,7 @@ import ( "tailscale.com/util/syspolicy/source" "tailscale.com/wgengine" "tailscale.com/wgengine/filter" + "tailscale.com/wgengine/filter/filtertype" "tailscale.com/wgengine/wgcfg" ) @@ -5206,3 +5208,60 @@ func TestUpdateIngressLocked(t *testing.T) { }) } } + +// TestSrcCapPacketFilter tests that LocalBackend handles packet filters with +// SrcCaps instead of Srcs (IPs) +func TestSrcCapPacketFilter(t *testing.T) { + lb := newLocalBackendWithTestControl(t, false, func(tb testing.TB, opts controlclient.Options) controlclient.Client { + return newClient(tb, opts) + }) + if err := lb.Start(ipn.Options{}); err != nil { + t.Fatalf("(*LocalBackend).Start(): %v", err) + } + + var k key.NodePublic + must.Do(k.UnmarshalText([]byte("nodekey:5c8f86d5fc70d924e55f02446165a5dae8f822994ad26bcf4b08fd841f9bf261"))) + + controlClient := lb.cc.(*mockControl) + controlClient.send(nil, "", false, &netmap.NetworkMap{ + SelfNode: (&tailcfg.Node{ + Addresses: []netip.Prefix{netip.MustParsePrefix("1.1.1.1/32")}, + }).View(), + Peers: []tailcfg.NodeView{ + (&tailcfg.Node{ + Addresses: []netip.Prefix{netip.MustParsePrefix("2.2.2.2/32")}, + ID: 2, + Key: k, + CapMap: tailcfg.NodeCapMap{"cap-X": nil}, // node 2 has cap + }).View(), + (&tailcfg.Node{ + Addresses: []netip.Prefix{netip.MustParsePrefix("3.3.3.3/32")}, + ID: 3, + Key: k, + CapMap: tailcfg.NodeCapMap{}, // node 3 does not have the cap + }).View(), + }, + PacketFilter: []filtertype.Match{{ + IPProto: views.SliceOf([]ipproto.Proto{ipproto.TCP}), + SrcCaps: []tailcfg.NodeCapability{"cap-X"}, // cap in packet filter rule + Dsts: []filtertype.NetPortRange{{ + Net: netip.MustParsePrefix("1.1.1.1/32"), + Ports: filtertype.PortRange{ + First: 22, + Last: 22, + }, + }}, + }}, + }) + + f := lb.GetFilterForTest() + res := f.Check(netip.MustParseAddr("2.2.2.2"), netip.MustParseAddr("1.1.1.1"), 22, ipproto.TCP) + if res != filter.Accept { + t.Errorf("Check(2.2.2.2, ...) = %s, want %s", res, filter.Accept) + } + + res = f.Check(netip.MustParseAddr("3.3.3.3"), netip.MustParseAddr("1.1.1.1"), 22, ipproto.TCP) + if !res.IsDrop() { + t.Error("IsDrop() for node without cap = false, want true") + } +} From 25b059c0eec0ed8475239c640ceddf4a1bd17e98 Mon Sep 17 00:00:00 2001 From: License Updater Date: Mon, 17 Mar 2025 15:02:33 +0000 Subject: [PATCH 76/87] licenses: update license notices Signed-off-by: License Updater --- licenses/android.md | 6 +++--- licenses/apple.md | 2 +- licenses/tailscale.md | 2 +- licenses/windows.md | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/licenses/android.md b/licenses/android.md index c3e9e989a..37961b74c 100644 --- a/licenses/android.md +++ b/licenses/android.md @@ -29,7 +29,7 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/djherbis/times](https://pkg.go.dev/github.com/djherbis/times) ([MIT](https://github.com/djherbis/times/blob/v1.6.0/LICENSE)) - [github.com/fxamacker/cbor/v2](https://pkg.go.dev/github.com/fxamacker/cbor/v2) ([MIT](https://github.com/fxamacker/cbor/blob/v2.7.0/LICENSE)) - [github.com/gaissmai/bart](https://pkg.go.dev/github.com/gaissmai/bart) ([MIT](https://github.com/gaissmai/bart/blob/v0.18.0/LICENSE)) - - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/6a9a0fde9288/LICENSE)) + - [github.com/go-json-experiment/json](https://pkg.go.dev/github.com/go-json-experiment/json) ([BSD-3-Clause](https://github.com/go-json-experiment/json/blob/d3c622f1b874/LICENSE)) - [github.com/godbus/dbus/v5](https://pkg.go.dev/github.com/godbus/dbus/v5) ([BSD-2-Clause](https://github.com/godbus/dbus/blob/76236955d466/LICENSE)) - [github.com/golang/groupcache/lru](https://pkg.go.dev/github.com/golang/groupcache/lru) ([Apache-2.0](https://github.com/golang/groupcache/blob/41bb18bfe9da/LICENSE)) - [github.com/google/btree](https://pkg.go.dev/github.com/google/btree) ([Apache-2.0](https://github.com/google/btree/blob/v1.1.2/LICENSE)) @@ -64,11 +64,11 @@ Client][]. See also the dependencies in the [Tailscale CLI][]. - [go4.org/mem](https://pkg.go.dev/go4.org/mem) ([Apache-2.0](https://github.com/go4org/mem/blob/ae6ca9944745/LICENSE)) - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - [go4.org/unsafe/assume-no-moving-gc](https://pkg.go.dev/go4.org/unsafe/assume-no-moving-gc) ([BSD-3-Clause](https://github.com/go4org/unsafe-assume-no-moving-gc/blob/e7c30c78aeb2/LICENSE)) - - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.33.0:LICENSE)) + - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - [golang.org/x/mobile](https://pkg.go.dev/golang.org/x/mobile) ([BSD-3-Clause](https://cs.opensource.google/go/x/mobile/+/81131f64:LICENSE)) - [golang.org/x/mod/semver](https://pkg.go.dev/golang.org/x/mod/semver) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.23.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.35.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) diff --git a/licenses/apple.md b/licenses/apple.md index a2984ea2e..814df22da 100644 --- a/licenses/apple.md +++ b/licenses/apple.md @@ -70,7 +70,7 @@ See also the dependencies in the [Tailscale CLI][]. - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.35.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) diff --git a/licenses/tailscale.md b/licenses/tailscale.md index 777687be6..b3095f5b4 100644 --- a/licenses/tailscale.md +++ b/licenses/tailscale.md @@ -92,7 +92,7 @@ Some packages may only be included on certain architectures or operating systems - [go4.org/netipx](https://pkg.go.dev/go4.org/netipx) ([BSD-3-Clause](https://github.com/go4org/netipx/blob/fdeea329fbba/LICENSE)) - [golang.org/x/crypto](https://pkg.go.dev/golang.org/x/crypto) ([BSD-3-Clause](https://cs.opensource.google/go/x/crypto/+/v0.35.0:LICENSE)) - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.35.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - [golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) ([BSD-3-Clause](https://cs.opensource.google/go/x/oauth2/+/v0.26.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) diff --git a/licenses/windows.md b/licenses/windows.md index 78fdcf7fb..bdf965051 100644 --- a/licenses/windows.md +++ b/licenses/windows.md @@ -62,7 +62,7 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [github.com/tailscale/go-winio](https://pkg.go.dev/github.com/tailscale/go-winio) ([MIT](https://github.com/tailscale/go-winio/blob/c4f33415bf55/LICENSE)) - [github.com/tailscale/hujson](https://pkg.go.dev/github.com/tailscale/hujson) ([BSD-3-Clause](https://github.com/tailscale/hujson/blob/20486734a56a/LICENSE)) - [github.com/tailscale/netlink](https://pkg.go.dev/github.com/tailscale/netlink) ([Apache-2.0](https://github.com/tailscale/netlink/blob/4d49adab4de7/LICENSE)) - - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/04068c1cab63/LICENSE)) + - [github.com/tailscale/walk](https://pkg.go.dev/github.com/tailscale/walk) ([BSD-3-Clause](https://github.com/tailscale/walk/blob/b2c15a420186/LICENSE)) - [github.com/tailscale/win](https://pkg.go.dev/github.com/tailscale/win) ([BSD-3-Clause](https://github.com/tailscale/win/blob/5992cb43ca35/LICENSE)) - [github.com/tailscale/xnet/webdav](https://pkg.go.dev/github.com/tailscale/xnet/webdav) ([BSD-3-Clause](https://github.com/tailscale/xnet/blob/8497ac4dab2e/LICENSE)) - [github.com/tc-hib/winres](https://pkg.go.dev/github.com/tc-hib/winres) ([0BSD](https://github.com/tc-hib/winres/blob/v0.2.1/LICENSE)) @@ -74,7 +74,7 @@ Windows][]. See also the dependencies in the [Tailscale CLI][]. - [golang.org/x/exp](https://pkg.go.dev/golang.org/x/exp) ([BSD-3-Clause](https://cs.opensource.google/go/x/exp/+/939b2ce7:LICENSE)) - [golang.org/x/image/bmp](https://pkg.go.dev/golang.org/x/image/bmp) ([BSD-3-Clause](https://cs.opensource.google/go/x/image/+/v0.24.0:LICENSE)) - [golang.org/x/mod](https://pkg.go.dev/golang.org/x/mod) ([BSD-3-Clause](https://cs.opensource.google/go/x/mod/+/v0.23.0:LICENSE)) - - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.35.0:LICENSE)) + - [golang.org/x/net](https://pkg.go.dev/golang.org/x/net) ([BSD-3-Clause](https://cs.opensource.google/go/x/net/+/v0.36.0:LICENSE)) - [golang.org/x/sync](https://pkg.go.dev/golang.org/x/sync) ([BSD-3-Clause](https://cs.opensource.google/go/x/sync/+/v0.11.0:LICENSE)) - [golang.org/x/sys](https://pkg.go.dev/golang.org/x/sys) ([BSD-3-Clause](https://cs.opensource.google/go/x/sys/+/v0.30.0:LICENSE)) - [golang.org/x/term](https://pkg.go.dev/golang.org/x/term) ([BSD-3-Clause](https://cs.opensource.google/go/x/term/+/v0.29.0:LICENSE)) From b413b70ae27686746e461b0e51670d4ac5d3c987 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Sun, 9 Mar 2025 16:55:51 -0700 Subject: [PATCH 77/87] cmd/proxy-to-grafana: support setting Grafana role via grants This adds support for using ACL Grants to configure a role for the auto-provisioned user. Fixes tailscale/corp#14567 Signed-off-by: Anton Tolchanov --- cmd/proxy-to-grafana/proxy-to-grafana.go | 104 +++++++++++++++++++++-- 1 file changed, 97 insertions(+), 7 deletions(-) diff --git a/cmd/proxy-to-grafana/proxy-to-grafana.go b/cmd/proxy-to-grafana/proxy-to-grafana.go index 849d184c6..bdabd650f 100644 --- a/cmd/proxy-to-grafana/proxy-to-grafana.go +++ b/cmd/proxy-to-grafana/proxy-to-grafana.go @@ -19,8 +19,25 @@ // header_property = username // auto_sign_up = true // whitelist = 127.0.0.1 -// headers = Name:X-WEBAUTH-NAME +// headers = Email:X-Webauth-User, Name:X-Webauth-Name, Role:X-Webauth-Role // enable_login_token = true +// +// You can use grants in Tailscale ACL to give users different roles in Grafana. +// For example, to give group:eng the Editor role, add the following to your ACLs: +// +// "grants": [ +// { +// "src": ["group:eng"], +// "dst": ["tag:grafana"], +// "app": { +// "tailscale.com/cap/proxy-to-grafana": [{ +// "role": "editor", +// }], +// }, +// }, +// ], +// +// If multiple roles are specified, the most permissive role is used. package main import ( @@ -49,6 +66,57 @@ var ( loginServer = flag.String("login-server", "", "URL to alternative control server. If empty, the default Tailscale control is used.") ) +// aclCap is the Tailscale ACL capability used to configure proxy-to-grafana. +const aclCap tailcfg.PeerCapability = "tailscale.com/cap/proxy-to-grafana" + +// aclGrant is an access control rule that assigns Grafana permissions +// while provisioning a user. +type aclGrant struct { + // Role is one of: "viewer", "editor", "admin". + Role string `json:"role"` +} + +// grafanaRole defines possible Grafana roles. +type grafanaRole int + +const ( + // Roles are ordered by their permissions, with the least permissive role first. + // If a user has multiple roles, the most permissive role is used. + ViewerRole grafanaRole = iota + EditorRole + AdminRole +) + +// String returns the string representation of a grafanaRole. +// It is used as a header value in the HTTP request to Grafana. +func (r grafanaRole) String() string { + switch r { + case ViewerRole: + return "Viewer" + case EditorRole: + return "Editor" + case AdminRole: + return "Admin" + default: + // A safe default. + return "Viewer" + } +} + +// roleFromString converts a string to a grafanaRole. +// It is used to parse the role from the ACL grant. +func roleFromString(s string) (grafanaRole, error) { + switch strings.ToLower(s) { + case "viewer": + return ViewerRole, nil + case "editor": + return EditorRole, nil + case "admin": + return AdminRole, nil + } + return ViewerRole, fmt.Errorf("unknown role: %q", s) +} + func main() { flag.Parse() if *hostname == "" || strings.Contains(*hostname, ".") { @@ -134,7 +202,15 @@ func modifyRequest(req *http.Request, localClient *local.Client) { return } - user, err := getTailscaleUser(req.Context(), localClient, req.RemoteAddr) + // Delete any existing X-Webauth-* headers to prevent possible spoofing + // if getting Tailnet identity fails. + for h := range req.Header { + if strings.HasPrefix(h, "X-Webauth-") { + req.Header.Del(h) + } + } + + user, role, err := getTailscaleIdentity(req.Context(), localClient, req.RemoteAddr) if err != nil { log.Printf("error getting Tailscale user: %v", err) return @@ -142,19 +218,33 @@ func modifyRequest(req *http.Request, localClient *local.Client) { req.Header.Set("X-Webauth-User", user.LoginName) req.Header.Set("X-Webauth-Name", user.DisplayName) + req.Header.Set("X-Webauth-Role", role.String()) } -func getTailscaleUser(ctx context.Context, localClient *local.Client, ipPort string) (*tailcfg.UserProfile, error) { +func getTailscaleIdentity(ctx context.Context, localClient *local.Client, ipPort string) (*tailcfg.UserProfile, grafanaRole, error) { whois, err := localClient.WhoIs(ctx, ipPort) if err != nil { - return nil, fmt.Errorf("failed to identify remote host: %w", err) + return nil, ViewerRole, fmt.Errorf("failed to identify remote host: %w", err) } if whois.Node.IsTagged() { - return nil, fmt.Errorf("tagged nodes are not users") + return nil, ViewerRole, fmt.Errorf("tagged nodes are not users") } if whois.UserProfile == nil || whois.UserProfile.LoginName == "" { - return nil, fmt.Errorf("failed to identify remote user") + return nil, ViewerRole, fmt.Errorf("failed to identify remote user") } - return whois.UserProfile, nil + role := ViewerRole + grants, err := tailcfg.UnmarshalCapJSON[aclGrant](whois.CapMap, aclCap) + if err != nil { + return nil, ViewerRole, fmt.Errorf("failed to unmarshal ACL grants: %w", err) + } + for _, g := range grants { + r, err := roleFromString(g.Role) + if err != nil { + return nil, ViewerRole, fmt.Errorf("failed to parse role: %w", err) + } + role = max(role, r) + } + + return whois.UserProfile, role, nil } From ef1e14250c40b28c68691f88dc1b6d1cc33425c0 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Tue, 18 Mar 2025 05:48:59 -0700 Subject: [PATCH 78/87] cmd/k8s-operator: ensure old VIPServices are cleaned up (#15344) When the Ingress is updated to a new hostname, the controller does not currently clean up the old VIPService from control. Fix this up to parse the ownership comment correctly and write a test to enforce the improved behaviour Updates tailscale/corp#24795 Change-Id: I792ae7684807d254bf2d3cc7aa54aa04a582d1f5 Signed-off-by: Tom Proctor --- cmd/k8s-operator/ingress-for-pg.go | 20 ++------- cmd/k8s-operator/ingress-for-pg_test.go | 57 +++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 17 deletions(-) diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index 85a64a336..cdbfecb35 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -402,16 +402,9 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG logger.Infof("VIPService %q is not owned by any Ingress, cleaning up", vipServiceName) // Delete the VIPService from control if necessary. - svc, _ := r.tsClient.GetVIPService(ctx, vipServiceName) - if svc != nil && isVIPServiceForAnyIngress(svc) { - logger.Infof("cleaning up orphaned VIPService %q", vipServiceName) - svcsChanged, err = r.cleanupVIPService(ctx, vipServiceName, logger) - if err != nil { - errResp := &tailscale.ErrResponse{} - if !errors.As(err, &errResp) || errResp.Status != http.StatusNotFound { - return false, fmt.Errorf("deleting VIPService %q: %w", vipServiceName, err) - } - } + svcsChanged, err = r.cleanupVIPService(ctx, vipServiceName, logger) + if err != nil { + return false, fmt.Errorf("deleting VIPService %q: %w", vipServiceName, err) } // Make sure the VIPService is not advertised in tailscaled or serve config. @@ -570,13 +563,6 @@ func (r *HAIngressReconciler) shouldExpose(ing *networkingv1.Ingress) bool { return isTSIngress && pgAnnot != "" } -func isVIPServiceForAnyIngress(svc *tailscale.VIPService) bool { - if svc == nil { - return false - } - return strings.HasPrefix(svc.Comment, "tailscale.com/k8s-operator:owned-by:") -} - // validateIngress validates that the Ingress is properly configured. // Currently validates: // - Any tags provided via tailscale.com/tags annotation are valid Tailscale ACL tags diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 7a995e169..2f675337e 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -8,8 +8,10 @@ package main import ( "context" "encoding/json" + "errors" "fmt" "maps" + "net/http" "reflect" "testing" @@ -186,6 +188,61 @@ func TestIngressPGReconciler(t *testing.T) { verifyTailscaledConfig(t, fc, nil) } +func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { + ingPGR, fc, ft := setupIngressTest(t) + + ing := &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{Kind: "Ingress", APIVersion: "networking.k8s.io/v1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ingress", + Namespace: "default", + UID: types.UID("1234-UID"), + Annotations: map[string]string{ + "tailscale.com/proxy-group": "test-pg", + }, + }, + Spec: networkingv1.IngressSpec{ + IngressClassName: ptr.To("tailscale"), + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test", + Port: networkingv1.ServiceBackendPort{ + Number: 8080, + }, + }, + }, + TLS: []networkingv1.IngressTLS{ + {Hosts: []string{"my-svc.tailnetxyz.ts.net"}}, + }, + }, + } + mustCreate(t, fc, ing) + + // Verify initial reconciliation + expectReconciled(t, ingPGR, "default", "test-ingress") + verifyServeConfig(t, fc, "svc:my-svc", false) + verifyVIPService(t, ft, "svc:my-svc", []string{"443"}) + verifyTailscaledConfig(t, fc, []string{"svc:my-svc"}) + + // Update the Ingress hostname and make sure the original VIPService is deleted. + mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) { + ing.Spec.TLS[0].Hosts[0] = "updated-svc.tailnetxyz.ts.net" + }) + expectReconciled(t, ingPGR, "default", "test-ingress") + verifyServeConfig(t, fc, "svc:updated-svc", false) + verifyVIPService(t, ft, "svc:updated-svc", []string{"443"}) + verifyTailscaledConfig(t, fc, []string{"svc:updated-svc"}) + + _, err := ft.GetVIPService(context.Background(), tailcfg.ServiceName("svc:my-svc")) + if err == nil { + t.Fatalf("svc:my-svc not cleaned up") + } + var errResp *tailscale.ErrResponse + if !errors.As(err, &errResp) || errResp.Status != http.StatusNotFound { + t.Fatalf("unexpected error: %v", err) + } +} + func TestValidateIngress(t *testing.T) { baseIngress := &networkingv1.Ingress{ ObjectMeta: metav1.ObjectMeta{ From 34734ba6351b76eaef525623ab6d17fd38f9b3d6 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Tue, 18 Mar 2025 15:09:22 +0000 Subject: [PATCH 79/87] ipn/store/kubestore,kube,envknob,cmd/tailscaled/depaware.txt: allow kubestore read/write custom TLS secrets (#15307) This PR adds some custom logic for reading and writing kube store values that are TLS certs and keys: 1) when store is initialized, lookup additional TLS Secrets for this node and if found, load TLS certs from there 2) if the node runs in certs 'read only' mode and TLS cert and key are not found in the in-memory store, look those up in a Secret 3) if the node runs in certs 'read only' mode, run a daily TLS certs reload to memory to get any renewed certs Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- cmd/tailscaled/depaware.txt | 2 +- envknob/envknob.go | 10 +- ipn/store/kubestore/store_kube.go | 272 +++++++++-- ipn/store/kubestore/store_kube_test.go | 634 +++++++++++++++++++++++-- kube/kubeapi/api.go | 8 + kube/kubeclient/client.go | 37 +- kube/kubeclient/fake_client.go | 7 + kube/kubetypes/types.go | 3 + 8 files changed, 881 insertions(+), 92 deletions(-) diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index 026758a47..b47f43c76 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -286,7 +286,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de tailscale.com/ipn/store/mem from tailscale.com/ipn/ipnlocal+ L tailscale.com/kube/kubeapi from tailscale.com/ipn/store/kubestore+ L tailscale.com/kube/kubeclient from tailscale.com/ipn/store/kubestore - tailscale.com/kube/kubetypes from tailscale.com/envknob + tailscale.com/kube/kubetypes from tailscale.com/envknob+ tailscale.com/licenses from tailscale.com/client/web tailscale.com/log/filelogger from tailscale.com/logpolicy tailscale.com/log/sockstatlog from tailscale.com/ipn/ipnlocal diff --git a/envknob/envknob.go b/envknob/envknob.go index 2662da2b4..e581eb27e 100644 --- a/envknob/envknob.go +++ b/envknob/envknob.go @@ -429,10 +429,16 @@ func App() string { // is a shared cert available. func IsCertShareReadOnlyMode() bool { m := String("TS_CERT_SHARE_MODE") - return m == modeRO + return m == "ro" } -const modeRO = "ro" +// IsCertShareReadWriteMode returns true if this instance is the replica +// responsible for issuing and renewing TLS certs in an HA setup with certs +// shared between multiple replicas. +func IsCertShareReadWriteMode() bool { + m := String("TS_CERT_SHARE_MODE") + return m == "rw" +} // CrashOnUnexpected reports whether the Tailscale client should panic // on unexpected conditions. If TS_DEBUG_CRASH_ON_UNEXPECTED is set, that's diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index ecd101c57..79e66d357 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -13,11 +13,14 @@ import ( "strings" "time" + "tailscale.com/envknob" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" + "tailscale.com/kube/kubetypes" "tailscale.com/types/logger" + "tailscale.com/util/dnsname" "tailscale.com/util/mak" ) @@ -32,21 +35,37 @@ const ( reasonTailscaleStateLoadFailed = "TailscaleStateLoadFailed" eventTypeWarning = "Warning" eventTypeNormal = "Normal" + + keyTLSCert = "tls.crt" + keyTLSKey = "tls.key" ) // Store is an ipn.StateStore that uses a Kubernetes Secret for persistence. type Store struct { - client kubeclient.Client - canPatch bool - secretName string + client kubeclient.Client + canPatch bool + secretName string // state Secret + certShareMode string // 'ro', 'rw', or empty + podName string - // memory holds the latest tailscale state. Writes write state to a kube Secret and memory, Reads read from - // memory. + // memory holds the latest tailscale state. Writes write state to a kube + // Secret and memory, Reads read from memory. memory mem.Store } -// New returns a new Store that persists to the named Secret. -func New(_ logger.Logf, secretName string) (*Store, error) { +// New returns a new Store that persists state to Kubernets Secret(s). +// Tailscale state is stored in a Secret named by the secretName parameter. +// TLS certs are stored and retrieved from state Secret or separate Secrets +// named after TLS endpoints if running in cert share mode. +func New(logf logger.Logf, secretName string) (*Store, error) { + c, err := newClient() + if err != nil { + return nil, err + } + return newWithClient(logf, c, secretName) +} + +func newClient() (kubeclient.Client, error) { c, err := kubeclient.New("tailscale-state-store") if err != nil { return nil, err @@ -55,6 +74,10 @@ func New(_ logger.Logf, secretName string) (*Store, error) { // Derive the API server address from the environment variables c.SetURL(fmt.Sprintf("https://%s:%s", os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS"))) } + return c, nil +} + +func newWithClient(logf logger.Logf, c kubeclient.Client, secretName string) (*Store, error) { canPatch, _, err := c.CheckSecretPermissions(context.Background(), secretName) if err != nil { return nil, err @@ -63,11 +86,30 @@ func New(_ logger.Logf, secretName string) (*Store, error) { client: c, canPatch: canPatch, secretName: secretName, + podName: os.Getenv("POD_NAME"), } + if envknob.IsCertShareReadWriteMode() { + s.certShareMode = "rw" + } else if envknob.IsCertShareReadOnlyMode() { + s.certShareMode = "ro" + } + // Load latest state from kube Secret if it already exists. if err := s.loadState(); err != nil && err != ipn.ErrStateNotExist { return nil, fmt.Errorf("error loading state from kube Secret: %w", err) } + // If we are in cert share mode, pre-load existing shared certs. + if s.certShareMode == "rw" || s.certShareMode == "ro" { + sel := s.certSecretSelector() + if err := s.loadCerts(context.Background(), sel); err != nil { + // We will attempt to again retrieve the certs from Secrets when a request for an HTTPS endpoint + // is received. + log.Printf("[unexpected] error loading TLS certs: %v", err) + } + } + if s.certShareMode == "ro" { + go s.runCertReload(context.Background(), logf) + } return s, nil } @@ -84,27 +126,101 @@ func (s *Store) ReadState(id ipn.StateKey) ([]byte, error) { // WriteState implements the StateStore interface. func (s *Store) WriteState(id ipn.StateKey, bs []byte) (err error) { - return s.updateStateSecret(map[string][]byte{string(id): bs}) -} - -// WriteTLSCertAndKey writes a TLS cert and key to domain.crt, domain.key fields of a Tailscale Kubernetes node's state -// Secret. -func (s *Store) WriteTLSCertAndKey(domain string, cert, key []byte) error { - return s.updateStateSecret(map[string][]byte{domain + ".crt": cert, domain + ".key": key}) -} - -func (s *Store) updateStateSecret(data map[string][]byte) (err error) { - ctx, cancel := context.WithTimeout(context.Background(), timeout) defer func() { if err == nil { - for id, bs := range data { - // The in-memory store does not distinguish between values read from state Secret on - // init and values written to afterwards. Values read from the state - // Secret will always be sanitized, so we also need to sanitize values written to store - // later, so that the Read logic can just lookup keys in sanitized form. - s.memory.WriteState(ipn.StateKey(sanitizeKey(id)), bs) - } + s.memory.WriteState(ipn.StateKey(sanitizeKey(id)), bs) } + }() + return s.updateSecret(map[string][]byte{string(id): bs}, s.secretName) +} + +// WriteTLSCertAndKey writes a TLS cert and key to domain.crt, domain.key fields +// of a Tailscale Kubernetes node's state Secret. +func (s *Store) WriteTLSCertAndKey(domain string, cert, key []byte) (err error) { + if s.certShareMode == "ro" { + log.Printf("[unexpected] TLS cert and key write in read-only mode") + } + if err := dnsname.ValidHostname(domain); err != nil { + return fmt.Errorf("invalid domain name %q: %w", domain, err) + } + defer func() { + // TODO(irbekrm): a read between these two separate writes would + // get a mismatched cert and key. Allow writing both cert and + // key to the memory store in a single, lock-protected operation. + if err == nil { + s.memory.WriteState(ipn.StateKey(domain+".crt"), cert) + s.memory.WriteState(ipn.StateKey(domain+".key"), key) + } + }() + secretName := s.secretName + data := map[string][]byte{ + domain + ".crt": cert, + domain + ".key": key, + } + // If we run in cert share mode, cert and key for a DNS name are written + // to a separate Secret. + if s.certShareMode == "rw" { + secretName = domain + data = map[string][]byte{ + keyTLSCert: cert, + keyTLSKey: key, + } + } + return s.updateSecret(data, secretName) +} + +// ReadTLSCertAndKey reads a TLS cert and key from memory or from a +// domain-specific Secret. It first checks the in-memory store, if not found in +// memory and running cert store in read-only mode, looks up a Secret. +func (s *Store) ReadTLSCertAndKey(domain string) (cert, key []byte, err error) { + if err := dnsname.ValidHostname(domain); err != nil { + return nil, nil, fmt.Errorf("invalid domain name %q: %w", domain, err) + } + certKey := domain + ".crt" + keyKey := domain + ".key" + + cert, err = s.memory.ReadState(ipn.StateKey(certKey)) + if err == nil { + key, err = s.memory.ReadState(ipn.StateKey(keyKey)) + if err == nil { + return cert, key, nil + } + } + if s.certShareMode != "ro" { + return nil, nil, ipn.ErrStateNotExist + } + // If we are in cert share read only mode, it is possible that a write + // replica just issued the TLS cert for this DNS name and it has not + // been loaded to store yet, so check the Secret. + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + secret, err := s.client.GetSecret(ctx, domain) + if err != nil { + if kubeclient.IsNotFoundErr(err) { + // TODO(irbekrm): we should return a more specific error + // that wraps ipn.ErrStateNotExist here. + return nil, nil, ipn.ErrStateNotExist + } + return nil, nil, fmt.Errorf("getting TLS Secret %q: %w", domain, err) + } + cert = secret.Data[keyTLSCert] + key = secret.Data[keyTLSKey] + if len(cert) == 0 || len(key) == 0 { + return nil, nil, ipn.ErrStateNotExist + } + // TODO(irbekrm): a read between these two separate writes would + // get a mismatched cert and key. Allow writing both cert and + // key to the memory store in a single lock-protected operation. + s.memory.WriteState(ipn.StateKey(certKey), cert) + s.memory.WriteState(ipn.StateKey(keyKey), key) + return cert, key, nil +} + +func (s *Store) updateSecret(data map[string][]byte, secretName string) (err error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer func() { if err != nil { if err := s.client.Event(ctx, eventTypeWarning, reasonTailscaleStateUpdateFailed, err.Error()); err != nil { log.Printf("kubestore: error creating tailscaled state update Event: %v", err) @@ -116,17 +232,17 @@ func (s *Store) updateStateSecret(data map[string][]byte) (err error) { } cancel() }() - secret, err := s.client.GetSecret(ctx, s.secretName) + secret, err := s.client.GetSecret(ctx, secretName) if err != nil { // If the Secret does not exist, create it with the required data. - if kubeclient.IsNotFoundErr(err) { + if kubeclient.IsNotFoundErr(err) && s.canCreateSecret(secretName) { return s.client.CreateSecret(ctx, &kubeapi.Secret{ TypeMeta: kubeapi.TypeMeta{ APIVersion: "v1", Kind: "Secret", }, ObjectMeta: kubeapi.ObjectMeta{ - Name: s.secretName, + Name: secretName, }, Data: func(m map[string][]byte) map[string][]byte { d := make(map[string][]byte, len(m)) @@ -137,9 +253,9 @@ func (s *Store) updateStateSecret(data map[string][]byte) (err error) { }(data), }) } - return err + return fmt.Errorf("error getting Secret %s: %w", secretName, err) } - if s.canPatch { + if s.canPatchSecret(secretName) { var m []kubeclient.JSONPatch // If the user has pre-created a Secret with no data, we need to ensure the top level /data field. if len(secret.Data) == 0 { @@ -166,7 +282,7 @@ func (s *Store) updateStateSecret(data map[string][]byte) (err error) { }) } } - if err := s.client.JSONPatchResource(ctx, s.secretName, kubeclient.TypeSecrets, m); err != nil { + if err := s.client.JSONPatchResource(ctx, secretName, kubeclient.TypeSecrets, m); err != nil { return fmt.Errorf("error patching Secret %s: %w", s.secretName, err) } return nil @@ -176,9 +292,9 @@ func (s *Store) updateStateSecret(data map[string][]byte) (err error) { mak.Set(&secret.Data, sanitizeKey(key), val) } if err := s.client.UpdateSecret(ctx, secret); err != nil { - return err + return fmt.Errorf("error updating Secret %s: %w", s.secretName, err) } - return err + return nil } func (s *Store) loadState() (err error) { @@ -202,6 +318,96 @@ func (s *Store) loadState() (err error) { return nil } +// runCertReload relists and reloads all TLS certs for endpoints shared by this +// node from Secrets other than the state Secret to ensure that renewed certs get eventually loaded. +// It is not critical to reload a cert immediately after +// renewal, so a daily check is acceptable. +// Currently (3/2025) this is only used for the shared HA Ingress certs on 'read' replicas. +// Note that if shared certs are not found in memory on an HTTPS request, we +// do a Secret lookup, so this mechanism does not need to ensure that newly +// added Ingresses' certs get loaded. +func (s *Store) runCertReload(ctx context.Context, logf logger.Logf) { + ticker := time.NewTicker(time.Hour * 24) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + sel := s.certSecretSelector() + if err := s.loadCerts(ctx, sel); err != nil { + logf("[unexpected] error reloading TLS certs: %v", err) + } + } + } +} + +// loadCerts lists all Secrets matching the provided selector and loads TLS +// certs and keys from those. +func (s *Store) loadCerts(ctx context.Context, sel map[string]string) error { + ss, err := s.client.ListSecrets(ctx, sel) + if err != nil { + return fmt.Errorf("error listing TLS Secrets: %w", err) + } + for _, secret := range ss.Items { + if !hasTLSData(&secret) { + continue + } + // Only load secrets that have valid domain names (ending in .ts.net) + if !strings.HasSuffix(secret.Name, ".ts.net") { + continue + } + s.memory.WriteState(ipn.StateKey(secret.Name)+".crt", secret.Data[keyTLSCert]) + s.memory.WriteState(ipn.StateKey(secret.Name)+".key", secret.Data[keyTLSKey]) + } + return nil +} + +// canCreateSecret returns true if this node should be allowed to create the given +// Secret in its namespace. +func (s *Store) canCreateSecret(secret string) bool { + // Only allow creating the state Secret (and not TLS Secrets). + return secret == s.secretName +} + +// canPatchSecret returns true if this node should be allowed to patch the given +// Secret. +func (s *Store) canPatchSecret(secret string) bool { + // For backwards compatibility reasons, setups where the proxies are not + // given PATCH permissions for state Secrets are allowed. For TLS + // Secrets, we should always have PATCH permissions. + if secret == s.secretName { + return s.canPatch + } + return true +} + +// certSecretSelector returns a label selector that can be used to list all +// Secrets that aren't Tailscale state Secrets and contain TLS certificates for +// HTTPS endpoints that this node serves. +// Currently (3/2025) this only applies to the Kubernetes Operator's ingress +// ProxyGroup. +func (s *Store) certSecretSelector() map[string]string { + if s.podName == "" { + return map[string]string{} + } + p := strings.LastIndex(s.podName, "-") + if p == -1 { + return map[string]string{} + } + pgName := s.podName[:p] + return map[string]string{ + kubetypes.LabelSecretType: "certs", + kubetypes.LabelManaged: "true", + "tailscale.com/proxy-group": pgName, + } +} + +// hasTLSData returns true if the provided Secret contains non-empty TLS cert and key. +func hasTLSData(s *kubeapi.Secret) bool { + return len(s.Data[keyTLSCert]) != 0 && len(s.Data[keyTLSKey]) != 0 +} + // sanitizeKey converts any value that can be converted to a string into a valid Kubernetes Secret key. // Valid characters are alphanumeric, -, _, and . // https://kubernetes.io/docs/concepts/configuration/secret/#restriction-names-data. diff --git a/ipn/store/kubestore/store_kube_test.go b/ipn/store/kubestore/store_kube_test.go index 351458efb..2ed16e77b 100644 --- a/ipn/store/kubestore/store_kube_test.go +++ b/ipn/store/kubestore/store_kube_test.go @@ -4,33 +4,37 @@ package kubestore import ( + "bytes" "context" + "encoding/json" + "fmt" "strings" "testing" "github.com/google/go-cmp/cmp" + "tailscale.com/envknob" "tailscale.com/ipn" "tailscale.com/ipn/store/mem" "tailscale.com/kube/kubeapi" "tailscale.com/kube/kubeclient" ) -func TestUpdateStateSecret(t *testing.T) { +func TestWriteState(t *testing.T) { tests := []struct { name string initial map[string][]byte - updates map[string][]byte + key ipn.StateKey + value []byte wantData map[string][]byte allowPatch bool }{ { - name: "basic_update", + name: "basic_write", initial: map[string][]byte{ "existing": []byte("old"), }, - updates: map[string][]byte{ - "foo": []byte("bar"), - }, + key: "foo", + value: []byte("bar"), wantData: map[string][]byte{ "existing": []byte("old"), "foo": []byte("bar"), @@ -42,35 +46,17 @@ func TestUpdateStateSecret(t *testing.T) { initial: map[string][]byte{ "foo": []byte("old"), }, - updates: map[string][]byte{ - "foo": []byte("new"), - }, + key: "foo", + value: []byte("new"), wantData: map[string][]byte{ "foo": []byte("new"), }, allowPatch: true, }, { - name: "multiple_updates", - initial: map[string][]byte{ - "keep": []byte("keep"), - }, - updates: map[string][]byte{ - "foo": []byte("bar"), - "baz": []byte("qux"), - }, - wantData: map[string][]byte{ - "keep": []byte("keep"), - "foo": []byte("bar"), - "baz": []byte("qux"), - }, - allowPatch: true, - }, - { - name: "create_new_secret", - updates: map[string][]byte{ - "foo": []byte("bar"), - }, + name: "create_new_secret", + key: "foo", + value: []byte("bar"), wantData: map[string][]byte{ "foo": []byte("bar"), }, @@ -81,29 +67,23 @@ func TestUpdateStateSecret(t *testing.T) { initial: map[string][]byte{ "foo": []byte("old"), }, - updates: map[string][]byte{ - "foo": []byte("new"), - }, + key: "foo", + value: []byte("new"), wantData: map[string][]byte{ "foo": []byte("new"), }, allowPatch: false, }, { - name: "sanitize_keys", + name: "sanitize_key", initial: map[string][]byte{ "clean-key": []byte("old"), }, - updates: map[string][]byte{ - "dirty@key": []byte("new"), - "also/bad": []byte("value"), - "good.key": []byte("keep"), - }, + key: "dirty@key", + value: []byte("new"), wantData: map[string][]byte{ "clean-key": []byte("old"), "dirty_key": []byte("new"), - "also_bad": []byte("value"), - "good.key": []byte("keep"), }, allowPatch: true, }, @@ -152,13 +132,13 @@ func TestUpdateStateSecret(t *testing.T) { s := &Store{ client: client, canPatch: tt.allowPatch, - secretName: "test-secret", + secretName: "ts-state", memory: mem.Store{}, } - err := s.updateStateSecret(tt.updates) + err := s.WriteState(tt.key, tt.value) if err != nil { - t.Errorf("updateStateSecret() error = %v", err) + t.Errorf("WriteState() error = %v", err) return } @@ -168,16 +148,576 @@ func TestUpdateStateSecret(t *testing.T) { } // Verify memory store was updated - for k, v := range tt.updates { - got, err := s.memory.ReadState(ipn.StateKey(sanitizeKey(k))) + got, err := s.memory.ReadState(ipn.StateKey(sanitizeKey(string(tt.key)))) + if err != nil { + t.Errorf("reading from memory store: %v", err) + } + if !cmp.Equal(got, tt.value) { + t.Errorf("memory store key %q = %v, want %v", tt.key, got, tt.value) + } + }) + } +} + +func TestWriteTLSCertAndKey(t *testing.T) { + const ( + testDomain = "my-app.tailnetxyz.ts.net" + testCert = "fake-cert" + testKey = "fake-key" + ) + + tests := []struct { + name string + initial map[string][]byte // pre-existing cert and key + certShareMode string + allowPatch bool // whether client can patch the Secret + wantSecretName string // name of the Secret where cert and key should be written + wantSecretData map[string][]byte + wantMemoryStore map[ipn.StateKey][]byte + }{ + { + name: "basic_write", + initial: map[string][]byte{ + "existing": []byte("old"), + }, + allowPatch: true, + wantSecretName: "ts-state", + wantSecretData: map[string][]byte{ + "existing": []byte("old"), + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + wantMemoryStore: map[ipn.StateKey][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + }, + { + name: "cert_share_mode_write", + certShareMode: "rw", + allowPatch: true, + wantSecretName: "my-app.tailnetxyz.ts.net", + wantSecretData: map[string][]byte{ + "tls.crt": []byte(testCert), + "tls.key": []byte(testKey), + }, + wantMemoryStore: map[ipn.StateKey][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + }, + { + name: "cert_share_mode_write_update_existing", + initial: map[string][]byte{ + "tls.crt": []byte("old-cert"), + "tls.key": []byte("old-key"), + }, + certShareMode: "rw", + allowPatch: true, + wantSecretName: "my-app.tailnetxyz.ts.net", + wantSecretData: map[string][]byte{ + "tls.crt": []byte(testCert), + "tls.key": []byte(testKey), + }, + wantMemoryStore: map[ipn.StateKey][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + }, + { + name: "update_existing", + initial: map[string][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte("old-cert"), + "my-app.tailnetxyz.ts.net.key": []byte("old-key"), + }, + certShareMode: "", + allowPatch: true, + wantSecretName: "ts-state", + wantSecretData: map[string][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + wantMemoryStore: map[ipn.StateKey][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + }, + { + name: "patch_denied", + certShareMode: "", + allowPatch: false, + wantSecretName: "ts-state", + wantSecretData: map[string][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + wantMemoryStore: map[ipn.StateKey][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + // Set POD_NAME for testing selectors + envknob.Setenv("POD_NAME", "ingress-proxies-1") + defer envknob.Setenv("POD_NAME", "") + + secret := tt.initial // track current state + client := &kubeclient.FakeClient{ + GetSecretImpl: func(ctx context.Context, name string) (*kubeapi.Secret, error) { + if secret == nil { + return nil, &kubeapi.Status{Code: 404} + } + return &kubeapi.Secret{Data: secret}, nil + }, + CheckSecretPermissionsImpl: func(ctx context.Context, name string) (bool, bool, error) { + return tt.allowPatch, true, nil + }, + CreateSecretImpl: func(ctx context.Context, s *kubeapi.Secret) error { + if s.Name != tt.wantSecretName { + t.Errorf("CreateSecret called with wrong name, got %q, want %q", s.Name, tt.wantSecretName) + } + secret = s.Data + return nil + }, + UpdateSecretImpl: func(ctx context.Context, s *kubeapi.Secret) error { + if s.Name != tt.wantSecretName { + t.Errorf("UpdateSecret called with wrong name, got %q, want %q", s.Name, tt.wantSecretName) + } + secret = s.Data + return nil + }, + JSONPatchResourceImpl: func(ctx context.Context, name, resourceType string, patches []kubeclient.JSONPatch) error { + if !tt.allowPatch { + return &kubeapi.Status{Reason: "Forbidden"} + } + if name != tt.wantSecretName { + t.Errorf("JSONPatchResource called with wrong name, got %q, want %q", name, tt.wantSecretName) + } + if secret == nil { + secret = make(map[string][]byte) + } + for _, p := range patches { + if p.Op == "add" && p.Path == "/data" { + secret = p.Value.(map[string][]byte) + } else if p.Op == "add" && strings.HasPrefix(p.Path, "/data/") { + key := strings.TrimPrefix(p.Path, "/data/") + secret[key] = p.Value.([]byte) + } + } + return nil + }, + } + + s := &Store{ + client: client, + canPatch: tt.allowPatch, + secretName: tt.wantSecretName, + certShareMode: tt.certShareMode, + memory: mem.Store{}, + } + + err := s.WriteTLSCertAndKey(testDomain, []byte(testCert), []byte(testKey)) + if err != nil { + t.Errorf("WriteTLSCertAndKey() error = '%v'", err) + return + } + + // Verify secret data + if diff := cmp.Diff(secret, tt.wantSecretData); diff != "" { + t.Errorf("secret data mismatch (-got +want):\n%s", diff) + } + + // Verify memory store was updated + for key, want := range tt.wantMemoryStore { + got, err := s.memory.ReadState(key) if err != nil { t.Errorf("reading from memory store: %v", err) continue } - if !cmp.Equal(got, v) { - t.Errorf("memory store key %q = %v, want %v", k, got, v) + if !cmp.Equal(got, want) { + t.Errorf("memory store key %q = %v, want %v", key, got, want) } } }) } } + +func TestReadTLSCertAndKey(t *testing.T) { + const ( + testDomain = "my-app.tailnetxyz.ts.net" + testCert = "fake-cert" + testKey = "fake-key" + ) + + tests := []struct { + name string + memoryStore map[ipn.StateKey][]byte // pre-existing memory store state + certShareMode string + domain string + secretData map[string][]byte // data to return from mock GetSecret + secretGetErr error // error to return from mock GetSecret + wantCert []byte + wantKey []byte + wantErr error + // what should end up in memory store after the store is created + wantMemoryStore map[ipn.StateKey][]byte + }{ + { + name: "found", + memoryStore: map[ipn.StateKey][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + domain: testDomain, + wantCert: []byte(testCert), + wantKey: []byte(testKey), + wantMemoryStore: map[ipn.StateKey][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + }, + { + name: "not_found", + domain: testDomain, + wantErr: ipn.ErrStateNotExist, + }, + { + name: "cert_share_ro_mode_found_in_secret", + certShareMode: "ro", + domain: testDomain, + secretData: map[string][]byte{ + "tls.crt": []byte(testCert), + "tls.key": []byte(testKey), + }, + wantCert: []byte(testCert), + wantKey: []byte(testKey), + wantMemoryStore: map[ipn.StateKey][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + }, + { + name: "cert_share_ro_mode_found_in_memory", + certShareMode: "ro", + memoryStore: map[ipn.StateKey][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + domain: testDomain, + wantCert: []byte(testCert), + wantKey: []byte(testKey), + wantMemoryStore: map[ipn.StateKey][]byte{ + "my-app.tailnetxyz.ts.net.crt": []byte(testCert), + "my-app.tailnetxyz.ts.net.key": []byte(testKey), + }, + }, + { + name: "cert_share_ro_mode_not_found", + certShareMode: "ro", + domain: testDomain, + secretGetErr: &kubeapi.Status{Code: 404}, + wantErr: ipn.ErrStateNotExist, + }, + { + name: "cert_share_ro_mode_empty_cert_in_secret", + certShareMode: "ro", + domain: testDomain, + secretData: map[string][]byte{ + "tls.crt": {}, + "tls.key": []byte(testKey), + }, + wantErr: ipn.ErrStateNotExist, + }, + { + name: "cert_share_ro_mode_kube_api_error", + certShareMode: "ro", + domain: testDomain, + secretGetErr: fmt.Errorf("api error"), + wantErr: fmt.Errorf("getting TLS Secret %q: api error", sanitizeKey(testDomain)), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + client := &kubeclient.FakeClient{ + GetSecretImpl: func(ctx context.Context, name string) (*kubeapi.Secret, error) { + if tt.secretGetErr != nil { + return nil, tt.secretGetErr + } + return &kubeapi.Secret{Data: tt.secretData}, nil + }, + } + + s := &Store{ + client: client, + secretName: "ts-state", + certShareMode: tt.certShareMode, + memory: mem.Store{}, + } + + // Initialize memory store + for k, v := range tt.memoryStore { + s.memory.WriteState(k, v) + } + + gotCert, gotKey, err := s.ReadTLSCertAndKey(tt.domain) + if tt.wantErr != nil { + if err == nil { + t.Errorf("ReadTLSCertAndKey() error = nil, want error containing %v", tt.wantErr) + return + } + if !strings.Contains(err.Error(), tt.wantErr.Error()) { + t.Errorf("ReadTLSCertAndKey() error = %v, want error containing %v", err, tt.wantErr) + } + return + } + if err != nil { + t.Errorf("ReadTLSCertAndKey() unexpected error: %v", err) + return + } + + if !bytes.Equal(gotCert, tt.wantCert) { + t.Errorf("ReadTLSCertAndKey() gotCert = %v, want %v", gotCert, tt.wantCert) + } + if !bytes.Equal(gotKey, tt.wantKey) { + t.Errorf("ReadTLSCertAndKey() gotKey = %v, want %v", gotKey, tt.wantKey) + } + + // Verify memory store contents after operation + if tt.wantMemoryStore != nil { + for key, want := range tt.wantMemoryStore { + got, err := s.memory.ReadState(key) + if err != nil { + t.Errorf("reading from memory store: %v", err) + continue + } + if !bytes.Equal(got, want) { + t.Errorf("memory store key %q = %v, want %v", key, got, want) + } + } + } + }) + } +} + +func TestNewWithClient(t *testing.T) { + const ( + secretName = "ts-state" + testCert = "fake-cert" + testKey = "fake-key" + ) + + certSecretsLabels := map[string]string{ + "tailscale.com/secret-type": "certs", + "tailscale.com/managed": "true", + "tailscale.com/proxy-group": "ingress-proxies", + } + + // Helper function to create Secret objects for testing + makeSecret := func(name string, labels map[string]string, certSuffix string) kubeapi.Secret { + return kubeapi.Secret{ + ObjectMeta: kubeapi.ObjectMeta{ + Name: name, + Labels: labels, + }, + Data: map[string][]byte{ + "tls.crt": []byte(testCert + certSuffix), + "tls.key": []byte(testKey + certSuffix), + }, + } + } + + tests := []struct { + name string + stateSecretContents map[string][]byte // data in state Secret + TLSSecrets []kubeapi.Secret // list of TLS cert Secrets + certMode string + secretGetErr error // error to return from GetSecret + secretsListErr error // error to return from ListSecrets + wantMemoryStoreContents map[ipn.StateKey][]byte + wantErr error + }{ + { + name: "empty_state_secret", + stateSecretContents: map[string][]byte{}, + wantMemoryStoreContents: map[ipn.StateKey][]byte{}, + }, + { + name: "state_secret_not_found", + secretGetErr: &kubeapi.Status{Code: 404}, + wantMemoryStoreContents: map[ipn.StateKey][]byte{}, + }, + { + name: "state_secret_get_error", + secretGetErr: fmt.Errorf("some error"), + wantErr: fmt.Errorf("error loading state from kube Secret: some error"), + }, + { + name: "load_existing_state", + stateSecretContents: map[string][]byte{ + "foo": []byte("bar"), + "baz": []byte("qux"), + }, + wantMemoryStoreContents: map[ipn.StateKey][]byte{ + "foo": []byte("bar"), + "baz": []byte("qux"), + }, + }, + { + name: "load_select_certs_in_read_only_mode", + certMode: "ro", + stateSecretContents: map[string][]byte{ + "foo": []byte("bar"), + }, + TLSSecrets: []kubeapi.Secret{ + makeSecret("app1.tailnetxyz.ts.net", certSecretsLabels, "1"), + makeSecret("app2.tailnetxyz.ts.net", certSecretsLabels, "2"), + makeSecret("some-other-secret", nil, "3"), + makeSecret("app3.other-proxies.ts.net", map[string]string{ + "tailscale.com/secret-type": "certs", + "tailscale.com/managed": "true", + "tailscale.com/proxy-group": "some-other-proxygroup", + }, "4"), + }, + wantMemoryStoreContents: map[ipn.StateKey][]byte{ + "foo": []byte("bar"), + "app1.tailnetxyz.ts.net.crt": []byte(testCert + "1"), + "app1.tailnetxyz.ts.net.key": []byte(testKey + "1"), + "app2.tailnetxyz.ts.net.crt": []byte(testCert + "2"), + "app2.tailnetxyz.ts.net.key": []byte(testKey + "2"), + }, + }, + { + name: "load_select_certs_in_read_write_mode", + certMode: "rw", + stateSecretContents: map[string][]byte{ + "foo": []byte("bar"), + }, + TLSSecrets: []kubeapi.Secret{ + makeSecret("app1.tailnetxyz.ts.net", certSecretsLabels, "1"), + makeSecret("app2.tailnetxyz.ts.net", certSecretsLabels, "2"), + makeSecret("some-other-secret", nil, "3"), + makeSecret("app3.other-proxies.ts.net", map[string]string{ + "tailscale.com/secret-type": "certs", + "tailscale.com/managed": "true", + "tailscale.com/proxy-group": "some-other-proxygroup", + }, "4"), + }, + wantMemoryStoreContents: map[ipn.StateKey][]byte{ + "foo": []byte("bar"), + "app1.tailnetxyz.ts.net.crt": []byte(testCert + "1"), + "app1.tailnetxyz.ts.net.key": []byte(testKey + "1"), + "app2.tailnetxyz.ts.net.crt": []byte(testCert + "2"), + "app2.tailnetxyz.ts.net.key": []byte(testKey + "2"), + }, + }, + { + name: "list_cert_secrets_fails", + certMode: "ro", + stateSecretContents: map[string][]byte{ + "foo": []byte("bar"), + }, + secretsListErr: fmt.Errorf("list error"), + // The error is logged but not returned, and state is still loaded + wantMemoryStoreContents: map[ipn.StateKey][]byte{ + "foo": []byte("bar"), + }, + }, + { + name: "cert_secrets_not_loaded_when_not_in_share_mode", + certMode: "", + stateSecretContents: map[string][]byte{ + "foo": []byte("bar"), + }, + TLSSecrets: []kubeapi.Secret{ + makeSecret("app1.tailnetxyz.ts.net", certSecretsLabels, "1"), + }, + wantMemoryStoreContents: map[ipn.StateKey][]byte{ + "foo": []byte("bar"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + envknob.Setenv("TS_CERT_SHARE_MODE", tt.certMode) + + t.Setenv("POD_NAME", "ingress-proxies-1") + + client := &kubeclient.FakeClient{ + GetSecretImpl: func(ctx context.Context, name string) (*kubeapi.Secret, error) { + if tt.secretGetErr != nil { + return nil, tt.secretGetErr + } + if name == secretName { + return &kubeapi.Secret{Data: tt.stateSecretContents}, nil + } + return nil, &kubeapi.Status{Code: 404} + }, + CheckSecretPermissionsImpl: func(ctx context.Context, name string) (bool, bool, error) { + return true, true, nil + }, + ListSecretsImpl: func(ctx context.Context, selector map[string]string) (*kubeapi.SecretList, error) { + if tt.secretsListErr != nil { + return nil, tt.secretsListErr + } + var matchingSecrets []kubeapi.Secret + for _, secret := range tt.TLSSecrets { + matches := true + for k, v := range selector { + if secret.Labels[k] != v { + matches = false + break + } + } + if matches { + matchingSecrets = append(matchingSecrets, secret) + } + } + return &kubeapi.SecretList{Items: matchingSecrets}, nil + }, + } + + s, err := newWithClient(t.Logf, client, secretName) + if tt.wantErr != nil { + if err == nil { + t.Errorf("NewWithClient() error = nil, want error containing %v", tt.wantErr) + return + } + if !strings.Contains(err.Error(), tt.wantErr.Error()) { + t.Errorf("NewWithClient() error = %v, want error containing %v", err, tt.wantErr) + } + return + } + + if err != nil { + t.Errorf("NewWithClient() unexpected error: %v", err) + return + } + + // Verify memory store contents + gotJSON, err := s.memory.ExportToJSON() + if err != nil { + t.Errorf("ExportToJSON failed: %v", err) + return + } + var got map[ipn.StateKey][]byte + if err := json.Unmarshal(gotJSON, &got); err != nil { + t.Errorf("failed to unmarshal memory store JSON: %v", err) + return + } + want := tt.wantMemoryStoreContents + if want == nil { + want = map[ipn.StateKey][]byte{} + } + if diff := cmp.Diff(got, want); diff != "" { + t.Errorf("memory store contents mismatch (-got +want):\n%s", diff) + } + }) + } +} diff --git a/kube/kubeapi/api.go b/kube/kubeapi/api.go index a2ae8cc79..e62bd6e2b 100644 --- a/kube/kubeapi/api.go +++ b/kube/kubeapi/api.go @@ -153,6 +153,14 @@ type Secret struct { Data map[string][]byte `json:"data,omitempty"` } +// SecretList is a list of Secret objects. +type SecretList struct { + TypeMeta `json:",inline"` + ObjectMeta `json:"metadata"` + + Items []Secret `json:"items,omitempty"` +} + // Event contains a subset of fields from corev1.Event. // https://github.com/kubernetes/api/blob/6cc44b8953ae704d6d9ec2adf32e7ae19199ea9f/core/v1/types.go#L7034 // It is copied here to avoid having to import kube libraries. diff --git a/kube/kubeclient/client.go b/kube/kubeclient/client.go index d4309448d..332b21106 100644 --- a/kube/kubeclient/client.go +++ b/kube/kubeclient/client.go @@ -60,6 +60,7 @@ func readFile(n string) ([]byte, error) { // It expects to be run inside a cluster. type Client interface { GetSecret(context.Context, string) (*kubeapi.Secret, error) + ListSecrets(context.Context, map[string]string) (*kubeapi.SecretList, error) UpdateSecret(context.Context, *kubeapi.Secret) error CreateSecret(context.Context, *kubeapi.Secret) error // Event attempts to ensure an event with the specified options associated with the Pod in which we are @@ -248,21 +249,35 @@ func (c *client) newRequest(ctx context.Context, method, url string, in any) (*h // GetSecret fetches the secret from the Kubernetes API. func (c *client) GetSecret(ctx context.Context, name string) (*kubeapi.Secret, error) { s := &kubeapi.Secret{Data: make(map[string][]byte)} - if err := c.kubeAPIRequest(ctx, "GET", c.resourceURL(name, TypeSecrets), nil, s); err != nil { + if err := c.kubeAPIRequest(ctx, "GET", c.resourceURL(name, TypeSecrets, ""), nil, s); err != nil { return nil, err } return s, nil } +// ListSecrets fetches the secret from the Kubernetes API. +func (c *client) ListSecrets(ctx context.Context, selector map[string]string) (*kubeapi.SecretList, error) { + sl := new(kubeapi.SecretList) + s := make([]string, 0, len(selector)) + for key, val := range selector { + s = append(s, key+"="+url.QueryEscape(val)) + } + ss := strings.Join(s, ",") + if err := c.kubeAPIRequest(ctx, "GET", c.resourceURL("", TypeSecrets, ss), nil, sl); err != nil { + return nil, err + } + return sl, nil +} + // CreateSecret creates a secret in the Kubernetes API. func (c *client) CreateSecret(ctx context.Context, s *kubeapi.Secret) error { s.Namespace = c.ns - return c.kubeAPIRequest(ctx, "POST", c.resourceURL("", TypeSecrets), s, nil) + return c.kubeAPIRequest(ctx, "POST", c.resourceURL("", TypeSecrets, ""), s, nil) } // UpdateSecret updates a secret in the Kubernetes API. func (c *client) UpdateSecret(ctx context.Context, s *kubeapi.Secret) error { - return c.kubeAPIRequest(ctx, "PUT", c.resourceURL(s.Name, TypeSecrets), s, nil) + return c.kubeAPIRequest(ctx, "PUT", c.resourceURL(s.Name, TypeSecrets, ""), s, nil) } // JSONPatch is a JSON patch operation. @@ -283,14 +298,14 @@ func (c *client) JSONPatchResource(ctx context.Context, name, typ string, patche return fmt.Errorf("unsupported JSON patch operation: %q", p.Op) } } - return c.kubeAPIRequest(ctx, "PATCH", c.resourceURL(name, typ), patches, nil, setHeader("Content-Type", "application/json-patch+json")) + return c.kubeAPIRequest(ctx, "PATCH", c.resourceURL(name, typ, ""), patches, nil, setHeader("Content-Type", "application/json-patch+json")) } // StrategicMergePatchSecret updates a secret in the Kubernetes API using a // strategic merge patch. // If a fieldManager is provided, it will be used to track the patch. func (c *client) StrategicMergePatchSecret(ctx context.Context, name string, s *kubeapi.Secret, fieldManager string) error { - surl := c.resourceURL(name, TypeSecrets) + surl := c.resourceURL(name, TypeSecrets, "") if fieldManager != "" { uv := url.Values{ "fieldManager": {fieldManager}, @@ -342,7 +357,7 @@ func (c *client) Event(ctx context.Context, typ, reason, msg string) error { LastTimestamp: now, Count: 1, } - return c.kubeAPIRequest(ctx, "POST", c.resourceURL("", typeEvents), &ev, nil) + return c.kubeAPIRequest(ctx, "POST", c.resourceURL("", typeEvents, ""), &ev, nil) } // If the Event already exists, we patch its count and last timestamp. This ensures that when users run 'kubectl // describe pod...', they see the event just once (but with a message of how many times it has appeared over @@ -472,9 +487,13 @@ func (c *client) checkPermission(ctx context.Context, verb, typ, name string) (b // resourceURL returns a URL that can be used to interact with the given resource type and, if name is not empty string, // the named resource of that type. // Note that this only works for core/v1 resource types. -func (c *client) resourceURL(name, typ string) string { +func (c *client) resourceURL(name, typ, sel string) string { if name == "" { - return fmt.Sprintf("%s/api/v1/namespaces/%s/%s", c.url, c.ns, typ) + url := fmt.Sprintf("%s/api/v1/namespaces/%s/%s", c.url, c.ns, typ) + if sel != "" { + url += "?labelSelector=" + sel + } + return url } return fmt.Sprintf("%s/api/v1/namespaces/%s/%s/%s", c.url, c.ns, typ, name) } @@ -487,7 +506,7 @@ func (c *client) nameForEvent(reason string) string { // getEvent fetches the event from the Kubernetes API. func (c *client) getEvent(ctx context.Context, name string) (*kubeapi.Event, error) { e := &kubeapi.Event{} - if err := c.kubeAPIRequest(ctx, "GET", c.resourceURL(name, typeEvents), nil, e); err != nil { + if err := c.kubeAPIRequest(ctx, "GET", c.resourceURL(name, typeEvents, ""), nil, e); err != nil { return nil, err } return e, nil diff --git a/kube/kubeclient/fake_client.go b/kube/kubeclient/fake_client.go index aea786ea0..c21dc2bf8 100644 --- a/kube/kubeclient/fake_client.go +++ b/kube/kubeclient/fake_client.go @@ -18,6 +18,7 @@ type FakeClient struct { CreateSecretImpl func(context.Context, *kubeapi.Secret) error UpdateSecretImpl func(context.Context, *kubeapi.Secret) error JSONPatchResourceImpl func(context.Context, string, string, []JSONPatch) error + ListSecretsImpl func(context.Context, map[string]string) (*kubeapi.SecretList, error) } func (fc *FakeClient) CheckSecretPermissions(ctx context.Context, name string) (bool, bool, error) { @@ -45,3 +46,9 @@ func (fc *FakeClient) UpdateSecret(ctx context.Context, secret *kubeapi.Secret) func (fc *FakeClient) CreateSecret(ctx context.Context, secret *kubeapi.Secret) error { return fc.CreateSecretImpl(ctx, secret) } +func (fc *FakeClient) ListSecrets(ctx context.Context, selector map[string]string) (*kubeapi.SecretList, error) { + if fc.ListSecretsImpl != nil { + return fc.ListSecretsImpl(ctx, selector) + } + return nil, nil +} diff --git a/kube/kubetypes/types.go b/kube/kubetypes/types.go index 894cbb41d..e54e1c99f 100644 --- a/kube/kubetypes/types.go +++ b/kube/kubetypes/types.go @@ -48,4 +48,7 @@ const ( PodIPv4Header string = "Pod-IPv4" EgessServicesPreshutdownEP = "/internal-egress-services-preshutdown" + + LabelManaged = "tailscale.com/managed" + LabelSecretType = "tailscale.com/secret-type" // "config", "state" "certs" ) From 74ee7493866c10da0ba0ff58a9020313e006a712 Mon Sep 17 00:00:00 2001 From: Anton Tolchanov Date: Mon, 17 Mar 2025 18:06:58 +0000 Subject: [PATCH 80/87] client/tailscale: add tailnet lock fields to Device struct These are documented, but have not yet been defined in the client. https://tailscale.com/api#tag/devices/GET/device/{deviceId} Updates tailscale/corp#27050 Signed-off-by: Anton Tolchanov --- client/tailscale/devices.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/client/tailscale/devices.go b/client/tailscale/devices.go index b79191d53..0664f9e63 100644 --- a/client/tailscale/devices.go +++ b/client/tailscale/devices.go @@ -79,6 +79,13 @@ type Device struct { // Tailscale have attempted to collect this from the device but it has not // opted in, PostureIdentity will have Disabled=true. PostureIdentity *DevicePostureIdentity `json:"postureIdentity"` + + // TailnetLockKey is the tailnet lock public key of the node as a hex string. + TailnetLockKey string `json:"tailnetLockKey,omitempty"` + + // TailnetLockErr indicates an issue with the tailnet lock node-key signature + // on this device. This field is only populated when tailnet lock is enabled. + TailnetLockErr string `json:"tailnetLockError,omitempty"` } type DevicePostureIdentity struct { From daa5635ba6226bef75d37867fb3449332a1a9758 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Thu, 13 Mar 2025 15:29:58 -0700 Subject: [PATCH 81/87] tsweb: split promvarz into an optional dependency Allows the use of tsweb without pulling in all of the heavy prometheus client libraries, protobuf and so on. Updates #15160 Signed-off-by: David Anderson --- cmd/derper/depaware.txt | 13 +++++++------ cmd/derper/derper.go | 3 +++ cmd/derpprobe/derpprobe.go | 3 +++ cmd/k8s-operator/depaware.txt | 6 +++--- cmd/stund/depaware.txt | 13 +++++++------ cmd/stund/stund.go | 3 +++ cmd/tailscale/depaware.txt | 6 +++--- cmd/tailscaled/depaware.txt | 6 +++--- cmd/xdpderper/xdpderper.go | 3 +++ tsweb/debug.go | 13 +++++++++++-- tsweb/promvarz/promvarz.go | 13 +++++++++++-- tsweb/promvarz/promvarz_test.go | 2 +- 12 files changed, 58 insertions(+), 26 deletions(-) diff --git a/cmd/derper/depaware.txt b/cmd/derper/depaware.txt index 1812a1a8d..5d375a515 100644 --- a/cmd/derper/depaware.txt +++ b/cmd/derper/depaware.txt @@ -96,6 +96,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/disco from tailscale.com/derp tailscale.com/drive from tailscale.com/client/local+ tailscale.com/envknob from tailscale.com/client/local+ + tailscale.com/feature from tailscale.com/tsweb tailscale.com/health from tailscale.com/net/tlsdial+ tailscale.com/hostinfo from tailscale.com/net/netmon+ tailscale.com/ipn from tailscale.com/client/local @@ -128,8 +129,8 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa tailscale.com/tstime from tailscale.com/derp+ tailscale.com/tstime/mono from tailscale.com/tstime/rate tailscale.com/tstime/rate from tailscale.com/derp - tailscale.com/tsweb from tailscale.com/cmd/derper - tailscale.com/tsweb/promvarz from tailscale.com/tsweb + tailscale.com/tsweb from tailscale.com/cmd/derper+ + tailscale.com/tsweb/promvarz from tailscale.com/cmd/derper tailscale.com/tsweb/varz from tailscale.com/tsweb+ tailscale.com/types/dnstype from tailscale.com/tailcfg+ tailscale.com/types/empty from tailscale.com/ipn @@ -309,7 +310,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa html from net/http/pprof+ html/template from tailscale.com/cmd/derper internal/abi from crypto/x509/internal/macos+ - internal/asan from syscall+ + internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug internal/bytealg from bytes+ internal/byteorder from crypto/cipher+ @@ -319,12 +320,12 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa internal/filepathlite from os+ internal/fmtsort from fmt+ internal/goarch from crypto/internal/fips140deps/cpu+ - internal/godebug from crypto/tls+ + internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ - internal/goexperiment from runtime+ + internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ internal/itoa from internal/poll+ - internal/msan from syscall+ + internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ diff --git a/cmd/derper/derper.go b/cmd/derper/derper.go index 221ee0bff..3c6fda68c 100644 --- a/cmd/derper/derper.go +++ b/cmd/derper/derper.go @@ -49,6 +49,9 @@ import ( "tailscale.com/types/key" "tailscale.com/types/logger" "tailscale.com/version" + + // Support for prometheus varz in tsweb + _ "tailscale.com/tsweb/promvarz" ) var ( diff --git a/cmd/derpprobe/derpprobe.go b/cmd/derpprobe/derpprobe.go index 6e8c603b9..899838462 100644 --- a/cmd/derpprobe/derpprobe.go +++ b/cmd/derpprobe/derpprobe.go @@ -15,6 +15,9 @@ import ( "tailscale.com/prober" "tailscale.com/tsweb" "tailscale.com/version" + + // Support for prometheus varz in tsweb + _ "tailscale.com/tsweb/promvarz" ) var ( diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 1c27fddea..978744947 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -1151,7 +1151,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ html from html/template+ html/template from github.com/gorilla/csrf internal/abi from crypto/x509/internal/macos+ - internal/asan from syscall+ + internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug internal/bytealg from bytes+ internal/byteorder from crypto/cipher+ @@ -1163,11 +1163,11 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from archive/tar+ internal/godebugs from internal/godebug+ - internal/goexperiment from runtime+ + internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ internal/itoa from internal/poll+ internal/lazyregexp from go/doc - internal/msan from syscall+ + internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ diff --git a/cmd/stund/depaware.txt b/cmd/stund/depaware.txt index 1d0a093c4..2326e3a24 100644 --- a/cmd/stund/depaware.txt +++ b/cmd/stund/depaware.txt @@ -49,6 +49,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+ tailscale.com from tailscale.com/version tailscale.com/envknob from tailscale.com/tsweb+ + tailscale.com/feature from tailscale.com/tsweb tailscale.com/kube/kubetypes from tailscale.com/envknob tailscale.com/metrics from tailscale.com/net/stunserver+ tailscale.com/net/netaddr from tailscale.com/net/tsaddr @@ -57,8 +58,8 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar tailscale.com/net/tsaddr from tailscale.com/tsweb tailscale.com/syncs from tailscale.com/metrics tailscale.com/tailcfg from tailscale.com/version - tailscale.com/tsweb from tailscale.com/cmd/stund - tailscale.com/tsweb/promvarz from tailscale.com/tsweb + tailscale.com/tsweb from tailscale.com/cmd/stund+ + tailscale.com/tsweb/promvarz from tailscale.com/cmd/stund tailscale.com/tsweb/varz from tailscale.com/tsweb+ tailscale.com/types/dnstype from tailscale.com/tailcfg tailscale.com/types/ipproto from tailscale.com/tailcfg @@ -194,7 +195,7 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar hash/maphash from go4.org/mem html from net/http/pprof+ internal/abi from crypto/x509/internal/macos+ - internal/asan from syscall+ + internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug internal/bytealg from bytes+ internal/byteorder from crypto/cipher+ @@ -204,12 +205,12 @@ tailscale.com/cmd/stund dependencies: (generated by github.com/tailscale/depawar internal/filepathlite from os+ internal/fmtsort from fmt internal/goarch from crypto/internal/fips140deps/cpu+ - internal/godebug from crypto/tls+ + internal/godebug from crypto/internal/fips140deps/godebug+ internal/godebugs from internal/godebug+ - internal/goexperiment from runtime+ + internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ internal/itoa from internal/poll+ - internal/msan from syscall+ + internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ diff --git a/cmd/stund/stund.go b/cmd/stund/stund.go index c38429169..1055d966f 100644 --- a/cmd/stund/stund.go +++ b/cmd/stund/stund.go @@ -15,6 +15,9 @@ import ( "tailscale.com/net/stunserver" "tailscale.com/tsweb" + + // Support for prometheus varz in tsweb + _ "tailscale.com/tsweb/promvarz" ) var ( diff --git a/cmd/tailscale/depaware.txt b/cmd/tailscale/depaware.txt index afe62165c..431bf7b71 100644 --- a/cmd/tailscale/depaware.txt +++ b/cmd/tailscale/depaware.txt @@ -333,7 +333,7 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep image/color from github.com/skip2/go-qrcode+ image/png from github.com/skip2/go-qrcode internal/abi from crypto/x509/internal/macos+ - internal/asan from syscall+ + internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug internal/bytealg from bytes+ internal/byteorder from crypto/cipher+ @@ -345,10 +345,10 @@ tailscale.com/cmd/tailscale dependencies: (generated by github.com/tailscale/dep internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from archive/tar+ internal/godebugs from internal/godebug+ - internal/goexperiment from runtime+ + internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ internal/itoa from internal/poll+ - internal/msan from syscall+ + internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ diff --git a/cmd/tailscaled/depaware.txt b/cmd/tailscaled/depaware.txt index b47f43c76..0a9c46831 100644 --- a/cmd/tailscaled/depaware.txt +++ b/cmd/tailscaled/depaware.txt @@ -589,7 +589,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de html from html/template+ html/template from github.com/gorilla/csrf internal/abi from crypto/x509/internal/macos+ - internal/asan from syscall+ + internal/asan from internal/runtime/maps+ internal/bisect from internal/godebug internal/bytealg from bytes+ internal/byteorder from crypto/cipher+ @@ -601,10 +601,10 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de internal/goarch from crypto/internal/fips140deps/cpu+ internal/godebug from archive/tar+ internal/godebugs from internal/godebug+ - internal/goexperiment from runtime+ + internal/goexperiment from hash/maphash+ internal/goos from crypto/x509+ internal/itoa from internal/poll+ - internal/msan from syscall+ + internal/msan from internal/runtime/maps+ internal/nettrace from net+ internal/oserror from io/fs+ internal/poll from net+ diff --git a/cmd/xdpderper/xdpderper.go b/cmd/xdpderper/xdpderper.go index 599034ae7..c127baf54 100644 --- a/cmd/xdpderper/xdpderper.go +++ b/cmd/xdpderper/xdpderper.go @@ -18,6 +18,9 @@ import ( "tailscale.com/derp/xdp" "tailscale.com/net/netutil" "tailscale.com/tsweb" + + // Support for prometheus varz in tsweb + _ "tailscale.com/tsweb/promvarz" ) var ( diff --git a/tsweb/debug.go b/tsweb/debug.go index 843324482..ac1981999 100644 --- a/tsweb/debug.go +++ b/tsweb/debug.go @@ -14,7 +14,7 @@ import ( "os" "runtime" - "tailscale.com/tsweb/promvarz" + "tailscale.com/feature" "tailscale.com/tsweb/varz" "tailscale.com/version" ) @@ -37,6 +37,11 @@ type DebugHandler struct { title string // title displayed on index page } +// PrometheusHandler is an optional hook to enable native Prometheus +// support in the debug handler. It is disabled by default. Import the +// tailscale.com/tsweb/promvarz package to enable this feature. +var PrometheusHandler feature.Hook[func(*DebugHandler)] + // Debugger returns the DebugHandler registered on mux at /debug/, // creating it if necessary. func Debugger(mux *http.ServeMux) *DebugHandler { @@ -53,7 +58,11 @@ func Debugger(mux *http.ServeMux) *DebugHandler { ret.KVFunc("Uptime", func() any { return varz.Uptime() }) ret.KV("Version", version.Long()) ret.Handle("vars", "Metrics (Go)", expvar.Handler()) - ret.Handle("varz", "Metrics (Prometheus)", http.HandlerFunc(promvarz.Handler)) + if PrometheusHandler.IsSet() { + PrometheusHandler.Get()(ret) + } else { + ret.Handle("varz", "Metrics (Prometheus)", http.HandlerFunc(varz.Handler)) + } // pprof.Index serves everything that runtime/pprof.Lookup finds: // goroutine, threadcreate, heap, allocs, block, mutex diff --git a/tsweb/promvarz/promvarz.go b/tsweb/promvarz/promvarz.go index d0e1e52ba..1d978c767 100644 --- a/tsweb/promvarz/promvarz.go +++ b/tsweb/promvarz/promvarz.go @@ -11,12 +11,21 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/expfmt" + "tailscale.com/tsweb" "tailscale.com/tsweb/varz" ) -// Handler returns Prometheus metrics exported by our expvar converter +func init() { + tsweb.PrometheusHandler.Set(registerVarz) +} + +func registerVarz(debug *tsweb.DebugHandler) { + debug.Handle("varz", "Metrics (Prometheus)", http.HandlerFunc(handler)) +} + +// handler returns Prometheus metrics exported by our expvar converter // and the official Prometheus client. -func Handler(w http.ResponseWriter, r *http.Request) { +func handler(w http.ResponseWriter, r *http.Request) { if err := gatherNativePrometheusMetrics(w); err != nil { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) diff --git a/tsweb/promvarz/promvarz_test.go b/tsweb/promvarz/promvarz_test.go index a3f4e66f1..9f91b5d12 100644 --- a/tsweb/promvarz/promvarz_test.go +++ b/tsweb/promvarz/promvarz_test.go @@ -23,7 +23,7 @@ func TestHandler(t *testing.T) { testVar1.Set(42) testVar2.Set(4242) - svr := httptest.NewServer(http.HandlerFunc(Handler)) + svr := httptest.NewServer(http.HandlerFunc(handler)) defer svr.Close() want := ` From e091e71937bd6cd2b1f9e2685991600211f28446 Mon Sep 17 00:00:00 2001 From: David Anderson Date: Fri, 7 Mar 2025 09:49:09 -0800 Subject: [PATCH 82/87] util/eventbus: remove debug UI from iOS build The use of html/template causes reflect-based linker bloat. Longer term we have options to bring the UI back to iOS, but for now, cut it out. Updates #15297 Signed-off-by: David Anderson --- util/eventbus/debughttp.go | 2 ++ util/eventbus/debughttp_ios.go | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+) create mode 100644 util/eventbus/debughttp_ios.go diff --git a/util/eventbus/debughttp.go b/util/eventbus/debughttp.go index bbd929efb..18888cc56 100644 --- a/util/eventbus/debughttp.go +++ b/util/eventbus/debughttp.go @@ -1,6 +1,8 @@ // Copyright (c) Tailscale Inc & AUTHORS // SPDX-License-Identifier: BSD-3-Clause +//go:build !ios + package eventbus import ( diff --git a/util/eventbus/debughttp_ios.go b/util/eventbus/debughttp_ios.go new file mode 100644 index 000000000..a898898b7 --- /dev/null +++ b/util/eventbus/debughttp_ios.go @@ -0,0 +1,18 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build ios + +package eventbus + +import "tailscale.com/tsweb" + +func registerHTTPDebugger(d *Debugger, td *tsweb.DebugHandler) { + // The event bus debugging UI uses html/template, which uses + // reflection for method lookups. This forces the compiler to + // retain a lot more code and information to make dynamic method + // dispatch work, which is unacceptable bloat for the iOS build. + // + // TODO: https://github.com/tailscale/tailscale/issues/15297 to + // bring the debug UI back to iOS somehow. +} From b0095a5da4a0f10e85d9c6a0c5c8005a3d7ea3a1 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Wed, 19 Mar 2025 01:53:15 -0700 Subject: [PATCH 83/87] cmd/k8s-operator: wait for VIPService before updating HA Ingress status (#15343) Update the HA Ingress controller to wait until it sees AdvertisedServices config propagated into at least 1 Pod's prefs before it updates the status on the Ingress, to ensure the ProxyGroup Pods are ready to serve traffic before indicating that the Ingress is ready Updates tailscale/corp#24795 Change-Id: I1b8ce23c9e312d08f9d02e48d70bdebd9e1a4757 Signed-off-by: Tom Proctor --- cmd/k8s-operator/ingress-for-pg.go | 91 ++++++++++++++++++------- cmd/k8s-operator/ingress-for-pg_test.go | 25 +++++++ cmd/k8s-operator/operator.go | 42 +++++++++++- cmd/k8s-operator/proxygroup.go | 6 +- cmd/k8s-operator/proxygroup_specs.go | 4 +- cmd/k8s-operator/tsrecorder.go | 41 ++++++----- 6 files changed, 158 insertions(+), 51 deletions(-) diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index cdbfecb35..fe85509ad 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -154,13 +154,13 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin pg := &tsapi.ProxyGroup{} if err := r.Get(ctx, client.ObjectKey{Name: pgName}, pg); err != nil { if apierrors.IsNotFound(err) { - logger.Infof("ProxyGroup %q does not exist", pgName) + logger.Infof("ProxyGroup does not exist") return false, nil } return false, fmt.Errorf("getting ProxyGroup %q: %w", pgName, err) } if !tsoperator.ProxyGroupIsReady(pg) { - logger.Infof("ProxyGroup %q is not (yet) ready", pgName) + logger.Infof("ProxyGroup is not (yet) ready") return false, nil } @@ -175,8 +175,6 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin r.recorder.Event(ing, corev1.EventTypeWarning, "HTTPSNotEnabled", "HTTPS is not enabled on the tailnet; ingress may not work") } - logger = logger.With("proxy-group", pg.Name) - if !slices.Contains(ing.Finalizers, FinalizerNamePG) { // This log line is printed exactly once during initial provisioning, // because once the finalizer is in place this block gets skipped. So, @@ -326,7 +324,7 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin !reflect.DeepEqual(vipSvc.Tags, existingVIPSvc.Tags) || !reflect.DeepEqual(vipSvc.Ports, existingVIPSvc.Ports) || !strings.EqualFold(vipSvc.Comment, existingVIPSvc.Comment) { - logger.Infof("Ensuring VIPService %q exists and is up to date", hostname) + logger.Infof("Ensuring VIPService exists and is up to date") if err := r.tsClient.CreateOrUpdateVIPService(ctx, vipSvc); err != nil { return false, fmt.Errorf("error creating VIPService: %w", err) } @@ -338,31 +336,48 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin return false, fmt.Errorf("failed to update tailscaled config: %w", err) } - // TODO(irbekrm): check that the replicas are ready to route traffic for the VIPService before updating Ingress - // status. - // 6. Update Ingress status + // 6. Update Ingress status if ProxyGroup Pods are ready. + count, err := r.numberPodsAdvertising(ctx, pg.Name, serviceName) + if err != nil { + return false, fmt.Errorf("failed to check if any Pods are configured: %w", err) + } + oldStatus := ing.Status.DeepCopy() - ports := []networkingv1.IngressPortStatus{ - { - Protocol: "TCP", - Port: 443, - }, + + switch count { + case 0: + ing.Status.LoadBalancer.Ingress = nil + default: + ports := []networkingv1.IngressPortStatus{ + { + Protocol: "TCP", + Port: 443, + }, + } + if isHTTPEndpointEnabled(ing) { + ports = append(ports, networkingv1.IngressPortStatus{ + Protocol: "TCP", + Port: 80, + }) + } + ing.Status.LoadBalancer.Ingress = []networkingv1.IngressLoadBalancerIngress{ + { + Hostname: dnsName, + Ports: ports, + }, + } } - if isHTTPEndpointEnabled(ing) { - ports = append(ports, networkingv1.IngressPortStatus{ - Protocol: "TCP", - Port: 80, - }) - } - ing.Status.LoadBalancer.Ingress = []networkingv1.IngressLoadBalancerIngress{ - { - Hostname: dnsName, - Ports: ports, - }, - } - if apiequality.Semantic.DeepEqual(oldStatus, ing.Status) { + if apiequality.Semantic.DeepEqual(oldStatus, &ing.Status) { return svcsChanged, nil } + + const prefix = "Updating Ingress status" + if count == 0 { + logger.Infof("%s. No Pods are advertising VIPService yet", prefix) + } else { + logger.Infof("%s. %d Pod(s) advertising VIPService", prefix, count) + } + if err := r.Status().Update(ctx, ing); err != nil { return false, fmt.Errorf("failed to update Ingress status: %w", err) } @@ -726,6 +741,30 @@ func (a *HAIngressReconciler) maybeUpdateAdvertiseServicesConfig(ctx context.Con return nil } +func (a *HAIngressReconciler) numberPodsAdvertising(ctx context.Context, pgName string, serviceName tailcfg.ServiceName) (int, error) { + // Get all state Secrets for this ProxyGroup. + secrets := &corev1.SecretList{} + if err := a.List(ctx, secrets, client.InNamespace(a.tsNamespace), client.MatchingLabels(pgSecretLabels(pgName, "state"))); err != nil { + return 0, fmt.Errorf("failed to list ProxyGroup %q state Secrets: %w", pgName, err) + } + + var count int + for _, secret := range secrets.Items { + prefs, ok, err := getDevicePrefs(&secret) + if err != nil { + return 0, fmt.Errorf("error getting node metadata: %w", err) + } + if !ok { + continue + } + if slices.Contains(prefs.AdvertiseServices, serviceName.String()) { + count++ + } + } + + return count, nil +} + // OwnerRef is an owner reference that uniquely identifies a Tailscale // Kubernetes operator instance. type OwnerRef struct { diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 2f675337e..0e90ec980 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -461,6 +461,31 @@ func TestIngressPGReconciler_HTTPEndpoint(t *testing.T) { t.Fatal(err) } + // Status will be empty until the VIPService shows up in prefs. + if !reflect.DeepEqual(ing.Status.LoadBalancer.Ingress, []networkingv1.IngressLoadBalancerIngress(nil)) { + t.Errorf("incorrect Ingress status: got %v, want empty", + ing.Status.LoadBalancer.Ingress) + } + + // Add the VIPService to prefs to have the Ingress recognised as ready. + mustCreate(t, fc, &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pg-0", + Namespace: "operator-ns", + Labels: pgSecretLabels("test-pg", "state"), + }, + Data: map[string][]byte{ + "_current-profile": []byte("profile-foo"), + "profile-foo": []byte(`{"AdvertiseServices":["svc:my-svc"],"Config":{"NodeID":"node-foo"}}`), + }, + }) + + // Reconcile and re-fetch Ingress. + expectReconciled(t, ingPGR, "default", "test-ingress") + if err := fc.Get(context.Background(), client.ObjectKeyFromObject(ing), ing); err != nil { + t.Fatal(err) + } + wantStatus := []networkingv1.IngressPortStatus{ {Port: 443, Protocol: "TCP"}, {Port: 80, Protocol: "TCP"}, diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index 1dcd130fb..ff2a959bd 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -347,6 +347,7 @@ func runReconcilers(opts reconcilerOpts) { For(&networkingv1.Ingress{}). Named("ingress-pg-reconciler"). Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(serviceHandlerForIngressPG(mgr.GetClient(), startlog))). + Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(ingressesFromPGStateSecret(mgr.GetClient(), startlog))). Watches(&tsapi.ProxyGroup{}, ingressProxyGroupFilter). Complete(&HAIngressReconciler{ recorder: eventRecorder, @@ -978,8 +979,6 @@ func egressEpsFromPGStateSecrets(cl client.Client, ns string) handler.MapFunc { if v, ok := o.GetLabels()[LabelManaged]; !ok || v != "true" { return nil } - // TODO(irbekrm): for now this is good enough as all ProxyGroups are egress. Add a type check once we - // have ingress ProxyGroups. if parentType := o.GetLabels()[LabelParentType]; parentType != "proxygroup" { return nil } @@ -1040,6 +1039,45 @@ func reconcileRequestsForPG(pg string, cl client.Client, ns string) []reconcile. return reqs } +func ingressesFromPGStateSecret(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { + return func(ctx context.Context, o client.Object) []reconcile.Request { + secret, ok := o.(*corev1.Secret) + if !ok { + logger.Infof("[unexpected] ProxyGroup handler triggered for an object that is not a ProxyGroup") + return nil + } + if secret.ObjectMeta.Labels[LabelManaged] != "true" { + return nil + } + if secret.ObjectMeta.Labels[LabelParentType] != "proxygroup" { + return nil + } + if secret.ObjectMeta.Labels[labelSecretType] != "state" { + return nil + } + pgName, ok := secret.ObjectMeta.Labels[LabelParentName] + if !ok { + return nil + } + + ingList := &networkingv1.IngressList{} + if err := cl.List(ctx, ingList, client.MatchingFields{indexIngressProxyGroup: pgName}); err != nil { + logger.Infof("error listing Ingresses, skipping a reconcile for event on Secret %s: %v", secret.Name, err) + return nil + } + reqs := make([]reconcile.Request, 0) + for _, ing := range ingList.Items { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: ing.Namespace, + Name: ing.Name, + }, + }) + } + return reqs + } +} + // egressSvcsFromEgressProxyGroup is an event handler for egress ProxyGroups. It returns reconcile requests for all // user-created ExternalName Services that should be exposed on this ProxyGroup. func egressSvcsFromEgressProxyGroup(cl client.Client, logger *zap.SugaredLogger) handler.MapFunc { diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index 463d29249..c961c0471 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -645,7 +645,7 @@ func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.Pr return nil, fmt.Errorf("unexpected secret %s was labelled as owned by the ProxyGroup %s: %w", secret.Name, pg.Name, err) } - id, dnsName, ok, err := getNodeMetadata(ctx, &secret) + prefs, ok, err := getDevicePrefs(&secret) if err != nil { return nil, err } @@ -656,8 +656,8 @@ func (r *ProxyGroupReconciler) getNodeMetadata(ctx context.Context, pg *tsapi.Pr nm := nodeMetadata{ ordinal: ordinal, stateSecret: &secret, - tsID: id, - dnsName: dnsName, + tsID: prefs.Config.NodeID, + dnsName: prefs.Config.UserProfile.LoginName, } pod := &corev1.Pod{} if err := r.Get(ctx, client.ObjectKey{Namespace: r.tsNamespace, Name: secret.Name}, pod); err != nil && !apierrors.IsNotFound(err) { diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 40bbaec17..8c17c7b6b 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -318,9 +318,9 @@ func pgIngressCM(pg *tsapi.ProxyGroup, namespace string) *corev1.ConfigMap { } } -func pgSecretLabels(pgName, typ string) map[string]string { +func pgSecretLabels(pgName, secretType string) map[string]string { return pgLabels(pgName, map[string]string{ - labelSecretType: typ, // "config" or "state". + labelSecretType: secretType, // "config" or "state". }) } diff --git a/cmd/k8s-operator/tsrecorder.go b/cmd/k8s-operator/tsrecorder.go index 44ce731fe..e9e6b2c6c 100644 --- a/cmd/k8s-operator/tsrecorder.go +++ b/cmd/k8s-operator/tsrecorder.go @@ -230,7 +230,7 @@ func (r *RecorderReconciler) maybeProvision(ctx context.Context, tsr *tsapi.Reco func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Recorder) (bool, error) { logger := r.logger(tsr.Name) - id, _, ok, err := r.getNodeMetadata(ctx, tsr.Name) + prefs, ok, err := r.getDevicePrefs(ctx, tsr.Name) if err != nil { return false, err } @@ -243,6 +243,7 @@ func (r *RecorderReconciler) maybeCleanup(ctx context.Context, tsr *tsapi.Record return true, nil } + id := string(prefs.Config.NodeID) logger.Debugf("deleting device %s from control", string(id)) if err := r.tsClient.DeleteDevice(ctx, string(id)); err != nil { errResp := &tailscale.ErrResponse{} @@ -327,34 +328,33 @@ func (r *RecorderReconciler) getStateSecret(ctx context.Context, tsrName string) return secret, nil } -func (r *RecorderReconciler) getNodeMetadata(ctx context.Context, tsrName string) (id tailcfg.StableNodeID, dnsName string, ok bool, err error) { +func (r *RecorderReconciler) getDevicePrefs(ctx context.Context, tsrName string) (prefs prefs, ok bool, err error) { secret, err := r.getStateSecret(ctx, tsrName) if err != nil || secret == nil { - return "", "", false, err + return prefs, false, err } - return getNodeMetadata(ctx, secret) + return getDevicePrefs(secret) } -// getNodeMetadata returns 'ok == true' iff the node ID is found. The dnsName +// getDevicePrefs returns 'ok == true' iff the node ID is found. The dnsName // is expected to always be non-empty if the node ID is, but not required. -func getNodeMetadata(ctx context.Context, secret *corev1.Secret) (id tailcfg.StableNodeID, dnsName string, ok bool, err error) { +func getDevicePrefs(secret *corev1.Secret) (prefs prefs, ok bool, err error) { // TODO(tomhjp): Should maybe use ipn to parse the following info instead. currentProfile, ok := secret.Data[currentProfileKey] if !ok { - return "", "", false, nil + return prefs, false, nil } profileBytes, ok := secret.Data[string(currentProfile)] if !ok { - return "", "", false, nil + return prefs, false, nil } - var profile profile - if err := json.Unmarshal(profileBytes, &profile); err != nil { - return "", "", false, fmt.Errorf("failed to extract node profile info from state Secret %s: %w", secret.Name, err) + if err := json.Unmarshal(profileBytes, &prefs); err != nil { + return prefs, false, fmt.Errorf("failed to extract node profile info from state Secret %s: %w", secret.Name, err) } - ok = profile.Config.NodeID != "" - return tailcfg.StableNodeID(profile.Config.NodeID), profile.Config.UserProfile.LoginName, ok, nil + ok = prefs.Config.NodeID != "" + return prefs, ok, nil } func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsrName string) (d tsapi.RecorderTailnetDevice, ok bool, err error) { @@ -367,14 +367,14 @@ func (r *RecorderReconciler) getDeviceInfo(ctx context.Context, tsrName string) } func getDeviceInfo(ctx context.Context, tsClient tsClient, secret *corev1.Secret) (d tsapi.RecorderTailnetDevice, ok bool, err error) { - nodeID, dnsName, ok, err := getNodeMetadata(ctx, secret) + prefs, ok, err := getDevicePrefs(secret) if !ok || err != nil { return tsapi.RecorderTailnetDevice{}, false, err } // TODO(tomhjp): The profile info doesn't include addresses, which is why we // need the API. Should we instead update the profile to include addresses? - device, err := tsClient.Device(ctx, string(nodeID), nil) + device, err := tsClient.Device(ctx, string(prefs.Config.NodeID), nil) if err != nil { return tsapi.RecorderTailnetDevice{}, false, fmt.Errorf("failed to get device info from API: %w", err) } @@ -383,20 +383,25 @@ func getDeviceInfo(ctx context.Context, tsClient tsClient, secret *corev1.Secret Hostname: device.Hostname, TailnetIPs: device.Addresses, } - if dnsName != "" { + if dnsName := prefs.Config.UserProfile.LoginName; dnsName != "" { d.URL = fmt.Sprintf("https://%s", dnsName) } return d, true, nil } -type profile struct { +// [prefs] is a subset of the ipn.Prefs struct used for extracting information +// from the state Secret of Tailscale devices. +type prefs struct { Config struct { - NodeID string `json:"NodeID"` + NodeID tailcfg.StableNodeID `json:"NodeID"` UserProfile struct { + // LoginName is the MagicDNS name of the device, e.g. foo.tail-scale.ts.net. LoginName string `json:"LoginName"` } `json:"UserProfile"` } `json:"Config"` + + AdvertiseServices []string `json:"AdvertiseServices"` } func markedForDeletion(obj metav1.Object) bool { From f50d3b22db19f34e233063050581a89694e10622 Mon Sep 17 00:00:00 2001 From: Irbe Krumina Date: Wed, 19 Mar 2025 12:49:31 +0000 Subject: [PATCH 84/87] cmd/k8s-operator: configure proxies for HA Ingress to run in cert share mode (#15308) cmd/k8s-operator: configure HA Ingress replicas to share certs Creates TLS certs Secret and RBAC that allows HA Ingress replicas to read/write to the Secret. Configures HA Ingress replicas to run in read-only mode. Updates tailscale/corp#24795 Signed-off-by: Irbe Krumina --- .../deploy/chart/templates/operator-rbac.yaml | 2 +- .../deploy/manifests/operator.yaml | 1 + cmd/k8s-operator/dnsrecords_test.go | 9 +- cmd/k8s-operator/egress-pod-readiness.go | 6 +- cmd/k8s-operator/egress-pod-readiness_test.go | 6 +- cmd/k8s-operator/egress-services.go | 12 +- cmd/k8s-operator/ingress-for-pg.go | 161 +++++++++++++++++- cmd/k8s-operator/ingress-for-pg_test.go | 19 +++ cmd/k8s-operator/metrics_resources.go | 3 +- cmd/k8s-operator/operator.go | 28 +-- cmd/k8s-operator/operator_test.go | 8 +- cmd/k8s-operator/proxygroup_specs.go | 21 ++- cmd/k8s-operator/proxygroup_test.go | 2 +- cmd/k8s-operator/sts.go | 4 +- cmd/k8s-operator/sts_test.go | 21 +-- cmd/k8s-operator/svc.go | 8 +- cmd/k8s-operator/testutils_test.go | 9 +- ipn/store/kubestore/store_kube.go | 2 +- 18 files changed, 255 insertions(+), 67 deletions(-) diff --git a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml index 7056ef42f..5bf50617e 100644 --- a/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml +++ b/cmd/k8s-operator/deploy/chart/templates/operator-rbac.yaml @@ -75,7 +75,7 @@ rules: verbs: ["get", "list", "watch", "create", "update", "deletecollection"] - apiGroups: ["rbac.authorization.k8s.io"] resources: ["roles", "rolebindings"] - verbs: ["get", "create", "patch", "update", "list", "watch"] + verbs: ["get", "create", "patch", "update", "list", "watch", "deletecollection"] - apiGroups: ["monitoring.coreos.com"] resources: ["servicemonitors"] verbs: ["get", "list", "update", "create", "delete"] diff --git a/cmd/k8s-operator/deploy/manifests/operator.yaml b/cmd/k8s-operator/deploy/manifests/operator.yaml index e966ef559..9ee3b441a 100644 --- a/cmd/k8s-operator/deploy/manifests/operator.yaml +++ b/cmd/k8s-operator/deploy/manifests/operator.yaml @@ -4898,6 +4898,7 @@ rules: - update - list - watch + - deletecollection - apiGroups: - monitoring.coreos.com resources: diff --git a/cmd/k8s-operator/dnsrecords_test.go b/cmd/k8s-operator/dnsrecords_test.go index 389461b85..4e73e6c9e 100644 --- a/cmd/k8s-operator/dnsrecords_test.go +++ b/cmd/k8s-operator/dnsrecords_test.go @@ -22,6 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" operatorutils "tailscale.com/k8s-operator" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" "tailscale.com/tstest" "tailscale.com/types/ptr" ) @@ -163,10 +164,10 @@ func headlessSvcForParent(o client.Object, typ string) *corev1.Service { Name: o.GetName(), Namespace: "tailscale", Labels: map[string]string{ - LabelManaged: "true", - LabelParentName: o.GetName(), - LabelParentNamespace: o.GetNamespace(), - LabelParentType: typ, + kubetypes.LabelManaged: "true", + LabelParentName: o.GetName(), + LabelParentNamespace: o.GetNamespace(), + LabelParentType: typ, }, }, Spec: corev1.ServiceSpec{ diff --git a/cmd/k8s-operator/egress-pod-readiness.go b/cmd/k8s-operator/egress-pod-readiness.go index a6c57bf9d..05cf1aa1a 100644 --- a/cmd/k8s-operator/egress-pod-readiness.go +++ b/cmd/k8s-operator/egress-pod-readiness.go @@ -112,9 +112,9 @@ func (er *egressPodsReconciler) Reconcile(ctx context.Context, req reconcile.Req } // Get all ClusterIP Services for all egress targets exposed to cluster via this ProxyGroup. lbls := map[string]string{ - LabelManaged: "true", - labelProxyGroup: proxyGroupName, - labelSvcType: typeEgress, + kubetypes.LabelManaged: "true", + labelProxyGroup: proxyGroupName, + labelSvcType: typeEgress, } svcs := &corev1.ServiceList{} if err := er.List(ctx, svcs, client.InNamespace(er.tsNamespace), client.MatchingLabels(lbls)); err != nil { diff --git a/cmd/k8s-operator/egress-pod-readiness_test.go b/cmd/k8s-operator/egress-pod-readiness_test.go index 5e6fa2bb4..3c35d9043 100644 --- a/cmd/k8s-operator/egress-pod-readiness_test.go +++ b/cmd/k8s-operator/egress-pod-readiness_test.go @@ -450,9 +450,9 @@ func newSvc(name string, port int32) (*corev1.Service, string) { Namespace: "operator-ns", Name: name, Labels: map[string]string{ - LabelManaged: "true", - labelProxyGroup: "dev", - labelSvcType: typeEgress, + kubetypes.LabelManaged: "true", + labelProxyGroup: "dev", + labelSvcType: typeEgress, }, }, Spec: corev1.ServiceSpec{}, diff --git a/cmd/k8s-operator/egress-services.go b/cmd/k8s-operator/egress-services.go index e997e5884..7103205ac 100644 --- a/cmd/k8s-operator/egress-services.go +++ b/cmd/k8s-operator/egress-services.go @@ -680,12 +680,12 @@ func egressSvcsConfigs(ctx context.Context, cl client.Client, proxyGroupName, ts // should probably validate and truncate (?) the names is they are too long. func egressSvcChildResourceLabels(svc *corev1.Service) map[string]string { return map[string]string{ - LabelManaged: "true", - LabelParentType: "svc", - LabelParentName: svc.Name, - LabelParentNamespace: svc.Namespace, - labelProxyGroup: svc.Annotations[AnnotationProxyGroup], - labelSvcType: typeEgress, + kubetypes.LabelManaged: "true", + LabelParentType: "svc", + LabelParentName: svc.Name, + LabelParentNamespace: svc.Namespace, + labelProxyGroup: svc.Annotations[AnnotationProxyGroup], + labelSvcType: typeEgress, } } diff --git a/cmd/k8s-operator/ingress-for-pg.go b/cmd/k8s-operator/ingress-for-pg.go index fe85509ad..dc74a86a5 100644 --- a/cmd/k8s-operator/ingress-for-pg.go +++ b/cmd/k8s-operator/ingress-for-pg.go @@ -22,6 +22,7 @@ import ( "go.uber.org/zap" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -240,8 +241,12 @@ func (r *HAIngressReconciler) maybeProvision(ctx context.Context, hostname strin r.recorder.Event(ing, corev1.EventTypeWarning, "InvalidVIPService", msg) return false, nil } + // 3. Ensure that TLS Secret and RBAC exists + if err := r.ensureCertResources(ctx, pgName, dnsName); err != nil { + return false, fmt.Errorf("error ensuring cert resources: %w", err) + } - // 3. Ensure that the serve config for the ProxyGroup contains the VIPService. + // 4. Ensure that the serve config for the ProxyGroup contains the VIPService. cm, cfg, err := r.proxyGroupServeConfig(ctx, pgName) if err != nil { return false, fmt.Errorf("error getting Ingress serve config: %w", err) @@ -426,8 +431,15 @@ func (r *HAIngressReconciler) maybeCleanupProxyGroup(ctx context.Context, proxyG if err = r.maybeUpdateAdvertiseServicesConfig(ctx, proxyGroupName, vipServiceName, false, logger); err != nil { return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } - delete(cfg.Services, vipServiceName) - serveConfigChanged = true + _, ok := cfg.Services[vipServiceName] + if ok { + logger.Infof("Removing VIPService %q from serve config", vipServiceName) + delete(cfg.Services, vipServiceName) + serveConfigChanged = true + } + if err := r.cleanupCertResources(ctx, proxyGroupName, vipServiceName); err != nil { + return false, fmt.Errorf("failed to clean up cert resources: %w", err) + } } } @@ -488,16 +500,22 @@ func (r *HAIngressReconciler) maybeCleanup(ctx context.Context, hostname string, if err != nil { return false, fmt.Errorf("error deleting VIPService: %w", err) } + + // 3. Clean up any cluster resources + if err := r.cleanupCertResources(ctx, pg, serviceName); err != nil { + return false, fmt.Errorf("failed to clean up cert resources: %w", err) + } + if cfg == nil || cfg.Services == nil { // user probably deleted the ProxyGroup return svcChanged, nil } - // 3. Unadvertise the VIPService in tailscaled config. + // 4. Unadvertise the VIPService in tailscaled config. if err = r.maybeUpdateAdvertiseServicesConfig(ctx, pg, serviceName, false, logger); err != nil { return false, fmt.Errorf("failed to update tailscaled config services: %w", err) } - // 4. Remove the VIPService from the serve config for the ProxyGroup. + // 5. Remove the VIPService from the serve config for the ProxyGroup. logger.Infof("Removing VIPService %q from serve config for ProxyGroup %q", hostname, pg) delete(cfg.Services, serviceName) cfgBytes, err := json.Marshal(cfg) @@ -816,6 +834,49 @@ func (r *HAIngressReconciler) ownerRefsComment(svc *tailscale.VIPService) (strin return string(json), nil } +// ensureCertResources ensures that the TLS Secret for an HA Ingress and RBAC +// resources that allow proxies to manage the Secret are created. +// Note that Tailscale VIPService name validation matches Kubernetes +// resource name validation, so we can be certain that the VIPService name +// (domain) is a valid Kubernetes resource name. +// https://github.com/tailscale/tailscale/blob/8b1e7f646ee4730ad06c9b70c13e7861b964949b/util/dnsname/dnsname.go#L99 +// https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-subdomain-names +func (r *HAIngressReconciler) ensureCertResources(ctx context.Context, pgName, domain string) error { + secret := certSecret(pgName, r.tsNamespace, domain) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, secret, nil); err != nil { + return fmt.Errorf("failed to create or update Secret %s: %w", secret.Name, err) + } + role := certSecretRole(pgName, r.tsNamespace, domain) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, role, nil); err != nil { + return fmt.Errorf("failed to create or update Role %s: %w", role.Name, err) + } + rb := certSecretRoleBinding(pgName, r.tsNamespace, domain) + if _, err := createOrUpdate(ctx, r.Client, r.tsNamespace, rb, nil); err != nil { + return fmt.Errorf("failed to create or update RoleBinding %s: %w", rb.Name, err) + } + return nil +} + +// cleanupCertResources ensures that the TLS Secret and associated RBAC +// resources that allow proxies to read/write to the Secret are deleted. +func (r *HAIngressReconciler) cleanupCertResources(ctx context.Context, pgName string, name tailcfg.ServiceName) error { + domainName, err := r.dnsNameForService(ctx, tailcfg.ServiceName(name)) + if err != nil { + return fmt.Errorf("error getting DNS name for VIPService %s: %w", name, err) + } + labels := certResourceLabels(pgName, domainName) + if err := r.DeleteAllOf(ctx, &rbacv1.RoleBinding{}, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels)); err != nil { + return fmt.Errorf("error deleting RoleBinding for domain name %s: %w", domainName, err) + } + if err := r.DeleteAllOf(ctx, &rbacv1.Role{}, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels)); err != nil { + return fmt.Errorf("error deleting Role for domain name %s: %w", domainName, err) + } + if err := r.DeleteAllOf(ctx, &corev1.Secret{}, client.InNamespace(r.tsNamespace), client.MatchingLabels(labels)); err != nil { + return fmt.Errorf("error deleting Secret for domain name %s: %w", domainName, err) + } + return nil +} + // parseComment returns VIPService comment or nil if none found or not matching the expected format. func parseComment(vipSvc *tailscale.VIPService) (*comment, error) { if vipSvc.Comment == "" { @@ -836,3 +897,93 @@ func parseComment(vipSvc *tailscale.VIPService) (*comment, error) { func requeueInterval() time.Duration { return time.Duration(rand.N(5)+5) * time.Minute } + +// certSecretRole creates a Role that will allow proxies to manage the TLS +// Secret for the given domain. Domain must be a valid Kubernetes resource name. +func certSecretRole(pgName, namespace, domain string) *rbacv1.Role { + return &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: domain, + Namespace: namespace, + Labels: certResourceLabels(pgName, domain), + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"secrets"}, + ResourceNames: []string{domain}, + Verbs: []string{ + "get", + "list", + "patch", + "update", + }, + }, + }, + } +} + +// certSecretRoleBinding creates a RoleBinding for Role that will allow proxies +// to manage the TLS Secret for the given domain. Domain must be a valid +// Kubernetes resource name. +func certSecretRoleBinding(pgName, namespace, domain string) *rbacv1.RoleBinding { + return &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: domain, + Namespace: namespace, + Labels: certResourceLabels(pgName, domain), + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: pgName, + Namespace: namespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "Role", + Name: domain, + }, + } +} + +// certSecret creates a Secret that will store the TLS certificate and private +// key for the given domain. Domain must be a valid Kubernetes resource name. +func certSecret(pgName, namespace, domain string) *corev1.Secret { + labels := certResourceLabels(pgName, domain) + labels[kubetypes.LabelSecretType] = "certs" + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: domain, + Namespace: namespace, + Labels: labels, + }, + Data: map[string][]byte{ + corev1.TLSCertKey: nil, + corev1.TLSPrivateKeyKey: nil, + }, + Type: corev1.SecretTypeTLS, + } +} + +func certResourceLabels(pgName, domain string) map[string]string { + return map[string]string{ + kubetypes.LabelManaged: "true", + "tailscale.com/proxy-group": pgName, + "tailscale.com/domain": domain, + } +} + +// dnsNameForService returns the DNS name for the given VIPService name. +func (r *HAIngressReconciler) dnsNameForService(ctx context.Context, svc tailcfg.ServiceName) (string, error) { + s := svc.WithoutPrefix() + tcd, err := r.tailnetCertDomain(ctx) + if err != nil { + return "", fmt.Errorf("error determining DNS name base: %w", err) + } + return s + "." + tcd, nil +} diff --git a/cmd/k8s-operator/ingress-for-pg_test.go b/cmd/k8s-operator/ingress-for-pg_test.go index 0e90ec980..5716c0bbf 100644 --- a/cmd/k8s-operator/ingress-for-pg_test.go +++ b/cmd/k8s-operator/ingress-for-pg_test.go @@ -20,6 +20,7 @@ import ( "go.uber.org/zap" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" @@ -70,6 +71,11 @@ func TestIngressPGReconciler(t *testing.T) { verifyVIPService(t, ft, "svc:my-svc", []string{"443"}) verifyTailscaledConfig(t, fc, []string{"svc:my-svc"}) + // Verify cert resources were created for the first Ingress + expectEqual(t, fc, certSecret("test-pg", "operator-ns", "my-svc.ts.net")) + expectEqual(t, fc, certSecretRole("test-pg", "operator-ns", "my-svc.ts.net")) + expectEqual(t, fc, certSecretRoleBinding("test-pg", "operator-ns", "my-svc.ts.net")) + mustUpdate(t, fc, "default", "test-ingress", func(ing *networkingv1.Ingress) { ing.Annotations["tailscale.com/tags"] = "tag:custom,tag:test" }) @@ -124,6 +130,11 @@ func TestIngressPGReconciler(t *testing.T) { verifyServeConfig(t, fc, "svc:my-other-svc", false) verifyVIPService(t, ft, "svc:my-other-svc", []string{"443"}) + // Verify cert resources were created for the second Ingress + expectEqual(t, fc, certSecret("test-pg", "operator-ns", "my-other-svc.ts.net")) + expectEqual(t, fc, certSecretRole("test-pg", "operator-ns", "my-other-svc.ts.net")) + expectEqual(t, fc, certSecretRoleBinding("test-pg", "operator-ns", "my-other-svc.ts.net")) + // Verify first Ingress is still working verifyServeConfig(t, fc, "svc:my-svc", false) verifyVIPService(t, ft, "svc:my-svc", []string{"443"}) @@ -160,6 +171,9 @@ func TestIngressPGReconciler(t *testing.T) { } verifyTailscaledConfig(t, fc, []string{"svc:my-svc"}) + expectMissing[corev1.Secret](t, fc, "operator-ns", "my-other-svc.ts.net") + expectMissing[rbacv1.Role](t, fc, "operator-ns", "my-other-svc.ts.net") + expectMissing[rbacv1.RoleBinding](t, fc, "operator-ns", "my-other-svc.ts.net") // Delete the first Ingress and verify cleanup if err := fc.Delete(context.Background(), ing); err != nil { @@ -186,6 +200,11 @@ func TestIngressPGReconciler(t *testing.T) { t.Error("serve config not cleaned up") } verifyTailscaledConfig(t, fc, nil) + + // Add verification that cert resources were cleaned up + expectMissing[corev1.Secret](t, fc, "operator-ns", "my-svc.ts.net") + expectMissing[rbacv1.Role](t, fc, "operator-ns", "my-svc.ts.net") + expectMissing[rbacv1.RoleBinding](t, fc, "operator-ns", "my-svc.ts.net") } func TestIngressPGReconciler_UpdateIngressHostname(t *testing.T) { diff --git a/cmd/k8s-operator/metrics_resources.go b/cmd/k8s-operator/metrics_resources.go index 8516cf8be..0579e3466 100644 --- a/cmd/k8s-operator/metrics_resources.go +++ b/cmd/k8s-operator/metrics_resources.go @@ -19,6 +19,7 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" ) const ( @@ -222,7 +223,7 @@ func metricsResourceName(stsName string) string { // proxy. func metricsResourceLabels(opts *metricsOpts) map[string]string { lbls := map[string]string{ - LabelManaged: "true", + kubetypes.LabelManaged: "true", labelMetricsTarget: opts.proxyStsName, labelPromProxyType: opts.proxyType, labelPromProxyParentName: opts.proxyLabels[LabelParentName], diff --git a/cmd/k8s-operator/operator.go b/cmd/k8s-operator/operator.go index ff2a959bd..b0f0b3576 100644 --- a/cmd/k8s-operator/operator.go +++ b/cmd/k8s-operator/operator.go @@ -637,8 +637,8 @@ func enqueueAllIngressEgressProxySvcsInNS(ns string, cl client.Client, logger *z // Get all headless Services for proxies configured using Service. svcProxyLabels := map[string]string{ - LabelManaged: "true", - LabelParentType: "svc", + kubetypes.LabelManaged: "true", + LabelParentType: "svc", } svcHeadlessSvcList := &corev1.ServiceList{} if err := cl.List(ctx, svcHeadlessSvcList, client.InNamespace(ns), client.MatchingLabels(svcProxyLabels)); err != nil { @@ -651,8 +651,8 @@ func enqueueAllIngressEgressProxySvcsInNS(ns string, cl client.Client, logger *z // Get all headless Services for proxies configured using Ingress. ingProxyLabels := map[string]string{ - LabelManaged: "true", - LabelParentType: "ingress", + kubetypes.LabelManaged: "true", + LabelParentType: "ingress", } ingHeadlessSvcList := &corev1.ServiceList{} if err := cl.List(ctx, ingHeadlessSvcList, client.InNamespace(ns), client.MatchingLabels(ingProxyLabels)); err != nil { @@ -719,7 +719,7 @@ func dnsRecordsReconcilerIngressHandler(ns string, isDefaultLoadBalancer bool, c func isManagedResource(o client.Object) bool { ls := o.GetLabels() - return ls[LabelManaged] == "true" + return ls[kubetypes.LabelManaged] == "true" } func isManagedByType(o client.Object, typ string) bool { @@ -956,7 +956,7 @@ func egressPodsHandler(_ context.Context, o client.Object) []reconcile.Request { // returns reconciler requests for all egress EndpointSlices for that ProxyGroup. func egressEpsFromPGPods(cl client.Client, ns string) handler.MapFunc { return func(_ context.Context, o client.Object) []reconcile.Request { - if v, ok := o.GetLabels()[LabelManaged]; !ok || v != "true" { + if v, ok := o.GetLabels()[kubetypes.LabelManaged]; !ok || v != "true" { return nil } // TODO(irbekrm): for now this is good enough as all ProxyGroups are egress. Add a type check once we @@ -976,13 +976,13 @@ func egressEpsFromPGPods(cl client.Client, ns string) handler.MapFunc { // returns reconciler requests for all egress EndpointSlices for that ProxyGroup. func egressEpsFromPGStateSecrets(cl client.Client, ns string) handler.MapFunc { return func(_ context.Context, o client.Object) []reconcile.Request { - if v, ok := o.GetLabels()[LabelManaged]; !ok || v != "true" { + if v, ok := o.GetLabels()[kubetypes.LabelManaged]; !ok || v != "true" { return nil } if parentType := o.GetLabels()[LabelParentType]; parentType != "proxygroup" { return nil } - if secretType := o.GetLabels()[labelSecretType]; secretType != "state" { + if secretType := o.GetLabels()[kubetypes.LabelSecretType]; secretType != "state" { return nil } pg, ok := o.GetLabels()[LabelParentName] @@ -999,7 +999,7 @@ func egressSvcFromEps(_ context.Context, o client.Object) []reconcile.Request { if typ := o.GetLabels()[labelSvcType]; typ != typeEgress { return nil } - if v, ok := o.GetLabels()[LabelManaged]; !ok || v != "true" { + if v, ok := o.GetLabels()[kubetypes.LabelManaged]; !ok || v != "true" { return nil } svcName, ok := o.GetLabels()[LabelParentName] @@ -1046,13 +1046,13 @@ func ingressesFromPGStateSecret(cl client.Client, logger *zap.SugaredLogger) han logger.Infof("[unexpected] ProxyGroup handler triggered for an object that is not a ProxyGroup") return nil } - if secret.ObjectMeta.Labels[LabelManaged] != "true" { + if secret.ObjectMeta.Labels[kubetypes.LabelManaged] != "true" { return nil } if secret.ObjectMeta.Labels[LabelParentType] != "proxygroup" { return nil } - if secret.ObjectMeta.Labels[labelSecretType] != "state" { + if secret.ObjectMeta.Labels[kubetypes.LabelSecretType] != "state" { return nil } pgName, ok := secret.ObjectMeta.Labels[LabelParentName] @@ -1183,9 +1183,9 @@ func podsFromEgressEps(cl client.Client, logger *zap.SugaredLogger, ns string) h return nil } podLabels := map[string]string{ - LabelManaged: "true", - LabelParentType: "proxygroup", - LabelParentName: eps.Labels[labelProxyGroup], + kubetypes.LabelManaged: "true", + LabelParentType: "proxygroup", + LabelParentName: eps.Labels[labelProxyGroup], } podList := &corev1.PodList{} if err := cl.List(ctx, podList, client.InNamespace(ns), diff --git a/cmd/k8s-operator/operator_test.go b/cmd/k8s-operator/operator_test.go index 73c795bb3..175003ac7 100644 --- a/cmd/k8s-operator/operator_test.go +++ b/cmd/k8s-operator/operator_test.go @@ -1387,10 +1387,10 @@ func Test_serviceHandlerForIngress(t *testing.T) { Name: "headless-1", Namespace: "tailscale", Labels: map[string]string{ - LabelManaged: "true", - LabelParentName: "ing-1", - LabelParentNamespace: "ns-1", - LabelParentType: "ingress", + kubetypes.LabelManaged: "true", + LabelParentName: "ing-1", + LabelParentNamespace: "ns-1", + LabelParentType: "ingress", }, }, } diff --git a/cmd/k8s-operator/proxygroup_specs.go b/cmd/k8s-operator/proxygroup_specs.go index 8c17c7b6b..16deea278 100644 --- a/cmd/k8s-operator/proxygroup_specs.go +++ b/cmd/k8s-operator/proxygroup_specs.go @@ -178,7 +178,15 @@ func pgStatefulSet(pg *tsapi.ProxyGroup, namespace, image, tsFirewallMode string corev1.EnvVar{ Name: "TS_SERVE_CONFIG", Value: fmt.Sprintf("/etc/proxies/%s", serveConfigKey), - }) + }, + corev1.EnvVar{ + // Run proxies in cert share mode to + // ensure that only one TLS cert is + // issued for an HA Ingress. + Name: "TS_EXPERIMENTAL_CERT_SHARE", + Value: "true", + }, + ) } return append(c.Env, envs...) }() @@ -225,6 +233,13 @@ func pgRole(pg *tsapi.ProxyGroup, namespace string) *rbacv1.Role { OwnerReferences: pgOwnerReference(pg), }, Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"secrets"}, + Verbs: []string{ + "list", + }, + }, { APIGroups: []string{""}, Resources: []string{"secrets"}, @@ -320,7 +335,7 @@ func pgIngressCM(pg *tsapi.ProxyGroup, namespace string) *corev1.ConfigMap { func pgSecretLabels(pgName, secretType string) map[string]string { return pgLabels(pgName, map[string]string{ - labelSecretType: secretType, // "config" or "state". + kubetypes.LabelSecretType: secretType, // "config" or "state". }) } @@ -330,7 +345,7 @@ func pgLabels(pgName string, customLabels map[string]string) map[string]string { l[k] = v } - l[LabelManaged] = "true" + l[kubetypes.LabelManaged] = "true" l[LabelParentType] = "proxygroup" l[LabelParentName] = pgName diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 6829b3929..5b690a485 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -247,7 +247,6 @@ func TestProxyGroup(t *testing.T) { // The fake client does not clean up objects whose owner has been // deleted, so we can't test for the owned resources getting deleted. }) - } func TestProxyGroupTypes(t *testing.T) { @@ -417,6 +416,7 @@ func TestProxyGroupTypes(t *testing.T) { } verifyEnvVar(t, sts, "TS_INTERNAL_APP", kubetypes.AppProxyGroupIngress) verifyEnvVar(t, sts, "TS_SERVE_CONFIG", "/etc/proxies/serve-config.json") + verifyEnvVar(t, sts, "TS_EXPERIMENTAL_CERT_SHARE", "true") // Verify ConfigMap volume mount cmName := fmt.Sprintf("%s-ingress-config", pg.Name) diff --git a/cmd/k8s-operator/sts.go b/cmd/k8s-operator/sts.go index 0bc9d6fb9..6327a073b 100644 --- a/cmd/k8s-operator/sts.go +++ b/cmd/k8s-operator/sts.go @@ -44,11 +44,9 @@ const ( // Labels that the operator sets on StatefulSets and Pods. If you add a // new label here, do also add it to tailscaleManagedLabels var to // ensure that it does not get overwritten by ProxyClass configuration. - LabelManaged = "tailscale.com/managed" LabelParentType = "tailscale.com/parent-resource-type" LabelParentName = "tailscale.com/parent-resource" LabelParentNamespace = "tailscale.com/parent-resource-ns" - labelSecretType = "tailscale.com/secret-type" // "config" or "state". // LabelProxyClass can be set by users on tailscale Ingresses and Services that define cluster ingress or // cluster egress, to specify that configuration in this ProxyClass should be applied to resources created for @@ -108,7 +106,7 @@ const ( var ( // tailscaleManagedLabels are label keys that tailscale operator sets on StatefulSets and Pods. - tailscaleManagedLabels = []string{LabelManaged, LabelParentType, LabelParentName, LabelParentNamespace, "app"} + tailscaleManagedLabels = []string{kubetypes.LabelManaged, LabelParentType, LabelParentName, LabelParentNamespace, "app"} // tailscaleManagedAnnotations are annotation keys that tailscale operator sets on StatefulSets and Pods. tailscaleManagedAnnotations = []string{podAnnotationLastSetClusterIP, podAnnotationLastSetTailnetTargetIP, podAnnotationLastSetTailnetTargetFQDN, podAnnotationLastSetConfigFileHash} ) diff --git a/cmd/k8s-operator/sts_test.go b/cmd/k8s-operator/sts_test.go index 3d0cecc04..35c512c8c 100644 --- a/cmd/k8s-operator/sts_test.go +++ b/cmd/k8s-operator/sts_test.go @@ -21,6 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/yaml" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" "tailscale.com/types/ptr" ) @@ -156,8 +157,8 @@ func Test_applyProxyClassToStatefulSet(t *testing.T) { // Set a couple additional fields so we can test that we don't // mistakenly override those. labels := map[string]string{ - LabelManaged: "true", - LabelParentName: "foo", + kubetypes.LabelManaged: "true", + LabelParentName: "foo", } annots := map[string]string{ podAnnotationLastSetClusterIP: "1.2.3.4", @@ -303,28 +304,28 @@ func Test_mergeStatefulSetLabelsOrAnnots(t *testing.T) { }{ { name: "no custom labels specified and none present in current labels, return current labels", - current: map[string]string{LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, - want: map[string]string{LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, + current: map[string]string{kubetypes.LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, + want: map[string]string{kubetypes.LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, managed: tailscaleManagedLabels, }, { name: "no custom labels specified, but some present in current labels, return tailscale managed labels only from the current labels", - current: map[string]string{"foo": "bar", "something.io/foo": "bar", LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, - want: map[string]string{LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, + current: map[string]string{"foo": "bar", "something.io/foo": "bar", kubetypes.LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, + want: map[string]string{kubetypes.LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, managed: tailscaleManagedLabels, }, { name: "custom labels specified, current labels only contain tailscale managed labels, return a union of both", - current: map[string]string{LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, + current: map[string]string{kubetypes.LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, custom: map[string]string{"foo": "bar", "something.io/foo": "bar"}, - want: map[string]string{"foo": "bar", "something.io/foo": "bar", LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, + want: map[string]string{"foo": "bar", "something.io/foo": "bar", kubetypes.LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, managed: tailscaleManagedLabels, }, { name: "custom labels specified, current labels contain tailscale managed labels and custom labels, some of which re not present in the new custom labels, return a union of managed labels and the desired custom labels", - current: map[string]string{"foo": "bar", "bar": "baz", "app": "1234", LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, + current: map[string]string{"foo": "bar", "bar": "baz", "app": "1234", kubetypes.LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, custom: map[string]string{"foo": "bar", "something.io/foo": "bar"}, - want: map[string]string{"foo": "bar", "something.io/foo": "bar", "app": "1234", LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, + want: map[string]string{"foo": "bar", "something.io/foo": "bar", "app": "1234", kubetypes.LabelManaged: "true", LabelParentName: "foo", LabelParentType: "svc", LabelParentNamespace: "foo"}, managed: tailscaleManagedLabels, }, { diff --git a/cmd/k8s-operator/svc.go b/cmd/k8s-operator/svc.go index 70c810b25..d6a6f440f 100644 --- a/cmd/k8s-operator/svc.go +++ b/cmd/k8s-operator/svc.go @@ -84,10 +84,10 @@ func childResourceLabels(name, ns, typ string) map[string]string { // proxying. Instead, we have to do our own filtering and tracking with // labels. return map[string]string{ - LabelManaged: "true", - LabelParentName: name, - LabelParentNamespace: ns, - LabelParentType: typ, + kubetypes.LabelManaged: "true", + LabelParentName: name, + LabelParentNamespace: ns, + LabelParentType: typ, } } diff --git a/cmd/k8s-operator/testutils_test.go b/cmd/k8s-operator/testutils_test.go index 6b1a4f85b..f47f96e44 100644 --- a/cmd/k8s-operator/testutils_test.go +++ b/cmd/k8s-operator/testutils_test.go @@ -32,6 +32,7 @@ import ( "tailscale.com/ipn" "tailscale.com/ipn/ipnstate" tsapi "tailscale.com/k8s-operator/apis/v1alpha1" + "tailscale.com/kube/kubetypes" "tailscale.com/tailcfg" "tailscale.com/types/ptr" "tailscale.com/util/mak" @@ -563,10 +564,10 @@ func expectedSecret(t *testing.T, cl client.Client, opts configOpts) *corev1.Sec func findGenName(t *testing.T, client client.Client, ns, name, typ string) (full, noSuffix string) { t.Helper() labels := map[string]string{ - LabelManaged: "true", - LabelParentName: name, - LabelParentNamespace: ns, - LabelParentType: typ, + kubetypes.LabelManaged: "true", + LabelParentName: name, + LabelParentNamespace: ns, + LabelParentType: typ, } s, err := getSingleObject[corev1.Secret](context.Background(), client, "operator-ns", labels) if err != nil { diff --git a/ipn/store/kubestore/store_kube.go b/ipn/store/kubestore/store_kube.go index 79e66d357..ed37f06c2 100644 --- a/ipn/store/kubestore/store_kube.go +++ b/ipn/store/kubestore/store_kube.go @@ -283,7 +283,7 @@ func (s *Store) updateSecret(data map[string][]byte, secretName string) (err err } } if err := s.client.JSONPatchResource(ctx, secretName, kubeclient.TypeSecrets, m); err != nil { - return fmt.Errorf("error patching Secret %s: %w", s.secretName, err) + return fmt.Errorf("error patching Secret %s: %w", secretName, err) } return nil } From 25d5f78c6efef25eec0f6f78a651bafb61a3833c Mon Sep 17 00:00:00 2001 From: Jonathan Nobels Date: Wed, 19 Mar 2025 09:21:37 -0400 Subject: [PATCH 85/87] net/dns: expose a function for recompiling the DNS configuration (#15346) updates tailscale/corp#27145 We require a means to trigger a recompilation of the DNS configuration to pick up new nameservers for platforms where we blend the interface nameservers from the OS into our DNS config. Notably, on Darwin, the only API we have at our disposal will, in rare instances, return a transient error when querying the interface nameservers on a link change if they have not been set when we get the AF_ROUTE messages for the link update. There's a corresponding change in corp for Darwin clients, to track the interface namservers during NEPathMonitor events, and call this when the nameservers change. This will also fix the slightly more obscure bug of changing nameservers while tailscaled is running. That change can now be reflected in magicDNS without having to stop the client. Signed-off-by: Jonathan Nobels --- net/dns/manager.go | 36 ++++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/net/dns/manager.go b/net/dns/manager.go index ebf91811a..1e9eb7fe7 100644 --- a/net/dns/manager.go +++ b/net/dns/manager.go @@ -35,6 +35,9 @@ import ( var ( errFullQueue = errors.New("request queue full") + // ErrNoDNSConfig is returned by RecompileDNSConfig when the Manager + // has no existing DNS configuration. + ErrNoDNSConfig = errors.New("no DNS configuration") ) // maxActiveQueries returns the maximal number of DNS requests that can @@ -91,21 +94,18 @@ func NewManager(logf logger.Logf, oscfg OSConfigurator, health *health.Tracker, } // Rate limit our attempts to correct our DNS configuration. + // This is done on incoming queries, we don't want to spam it. limiter := rate.NewLimiter(1.0/5.0, 1) // This will recompile the DNS config, which in turn will requery the system // DNS settings. The recovery func should triggered only when we are missing // upstream nameservers and require them to forward a query. m.resolver.SetMissingUpstreamRecovery(func() { - m.mu.Lock() - defer m.mu.Unlock() - if m.config == nil { - return - } - if limiter.Allow() { - m.logf("DNS resolution failed due to missing upstream nameservers. Recompiling DNS configuration.") - m.setLocked(*m.config) + m.logf("resolution failed due to missing upstream nameservers. Recompiling DNS configuration.") + if err := m.RecompileDNSConfig(); err != nil { + m.logf("config recompilation failed: %v", err) + } } }) @@ -117,6 +117,26 @@ func NewManager(logf logger.Logf, oscfg OSConfigurator, health *health.Tracker, // Resolver returns the Manager's DNS Resolver. func (m *Manager) Resolver() *resolver.Resolver { return m.resolver } +// RecompileDNSConfig sets the DNS config to the current value, which has +// the side effect of re-querying the OS's interface nameservers. This should be used +// on platforms where the interface nameservers can change. Darwin, for example, +// where the nameservers aren't always available when we process a major interface +// change event, or platforms where the nameservers may change while tunnel is up. +// +// This should be called if it is determined that [OSConfigurator.GetBaseConfig] may +// give a better or different result than when [Manager.Set] was last called. The +// logic for making that determination is up to the caller. +// +// It returns [ErrNoDNSConfig] if the [Manager] has no existing DNS configuration. +func (m *Manager) RecompileDNSConfig() error { + m.mu.Lock() + defer m.mu.Unlock() + if m.config == nil { + return ErrNoDNSConfig + } + return m.setLocked(*m.config) +} + func (m *Manager) Set(cfg Config) error { m.mu.Lock() defer m.mu.Unlock() From 8d84720edb471c639e0f6de3addf3490e78b7748 Mon Sep 17 00:00:00 2001 From: Tom Proctor Date: Wed, 19 Mar 2025 06:49:36 -0700 Subject: [PATCH 86/87] cmd/k8s-operator: update ProxyGroup config Secrets instead of patch (#15353) There was a flaky failure case where renaming a TLS hostname for an ingress might leave the old hostname dangling in tailscaled config. This happened when the proxygroup reconciler loop had an outdated resource version of the config Secret in its cache after the ingress-pg-reconciler loop had very recently written it to delete the old hostname. As the proxygroup reconciler then did a patch, there was no conflict and it reinstated the old hostname. This commit updates the patch to an update operation so that if the resource version is out of date it will fail with an optimistic lock error. It also checks for equality to reduce the likelihood that we make the update API call in the first place, because most of the time the proxygroup reconciler is not even making an update to the Secret in the case that the hostname has changed. Updates tailscale/corp#24795 Change-Id: Ie23a97440063976c9a8475d24ab18253e1f89050 Signed-off-by: Tom Proctor --- cmd/k8s-operator/proxygroup.go | 16 +++++++++------- cmd/k8s-operator/proxygroup_test.go | 16 +++------------- 2 files changed, 12 insertions(+), 20 deletions(-) diff --git a/cmd/k8s-operator/proxygroup.go b/cmd/k8s-operator/proxygroup.go index c961c0471..112e5e2b0 100644 --- a/cmd/k8s-operator/proxygroup.go +++ b/cmd/k8s-operator/proxygroup.go @@ -461,7 +461,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p var existingCfgSecret *corev1.Secret // unmodified copy of secret if err := r.Get(ctx, client.ObjectKeyFromObject(cfgSecret), cfgSecret); err == nil { - logger.Debugf("secret %s/%s already exists", cfgSecret.GetNamespace(), cfgSecret.GetName()) + logger.Debugf("Secret %s/%s already exists", cfgSecret.GetNamespace(), cfgSecret.GetName()) existingCfgSecret = cfgSecret.DeepCopy() } else if !apierrors.IsNotFound(err) { return "", err @@ -469,7 +469,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p var authKey string if existingCfgSecret == nil { - logger.Debugf("creating authkey for new ProxyGroup proxy") + logger.Debugf("Creating authkey for new ProxyGroup proxy") tags := pg.Spec.Tags.Stringify() if len(tags) == 0 { tags = r.defaultTags @@ -490,7 +490,7 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p if err != nil { return "", fmt.Errorf("error marshalling tailscaled config: %w", err) } - mak.Set(&cfgSecret.StringData, tsoperator.TailscaledConfigFileName(cap), string(cfgJSON)) + mak.Set(&cfgSecret.Data, tsoperator.TailscaledConfigFileName(cap), cfgJSON) } // The config sha256 sum is a value for a hash annotation used to trigger @@ -520,12 +520,14 @@ func (r *ProxyGroupReconciler) ensureConfigSecretsCreated(ctx context.Context, p } if existingCfgSecret != nil { - logger.Debugf("patching the existing ProxyGroup config Secret %s", cfgSecret.Name) - if err := r.Patch(ctx, cfgSecret, client.MergeFrom(existingCfgSecret)); err != nil { - return "", err + if !apiequality.Semantic.DeepEqual(existingCfgSecret, cfgSecret) { + logger.Debugf("Updating the existing ProxyGroup config Secret %s", cfgSecret.Name) + if err := r.Update(ctx, cfgSecret); err != nil { + return "", err + } } } else { - logger.Debugf("creating a new config Secret %s for the ProxyGroup", cfgSecret.Name) + logger.Debugf("Creating a new config Secret %s for the ProxyGroup", cfgSecret.Name) if err := r.Create(ctx, cfgSecret); err != nil { return "", err } diff --git a/cmd/k8s-operator/proxygroup_test.go b/cmd/k8s-operator/proxygroup_test.go index 5b690a485..1f1a39ab0 100644 --- a/cmd/k8s-operator/proxygroup_test.go +++ b/cmd/k8s-operator/proxygroup_test.go @@ -475,8 +475,6 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { Name: pgConfigSecretName(pgName, 0), Namespace: tsNamespace, }, - // Write directly to Data because the fake client doesn't copy the write-only - // StringData field across to Data for us. Data: map[string][]byte{ tsoperator.TailscaledConfigFileName(106): existingConfigBytes, }, @@ -514,10 +512,10 @@ func TestIngressAdvertiseServicesConfigPreserved(t *testing.T) { Namespace: tsNamespace, ResourceVersion: "2", }, - StringData: map[string]string{ - tsoperator.TailscaledConfigFileName(106): string(expectedConfigBytes), + Data: map[string][]byte{ + tsoperator.TailscaledConfigFileName(106): expectedConfigBytes, }, - }, omitSecretData) + }) } func verifyProxyGroupCounts(t *testing.T, r *ProxyGroupReconciler, wantIngress, wantEgress int) { @@ -620,11 +618,3 @@ func addNodeIDToStateSecrets(t *testing.T, fc client.WithWatch, pg *tsapi.ProxyG }) } } - -// The operator mostly writes to StringData and reads from Data, but the fake -// client doesn't copy StringData across to Data on write. When comparing actual -// vs expected Secrets, use this function to only check what the operator writes -// to StringData. -func omitSecretData(secret *corev1.Secret) { - secret.Data = nil -} From 3a2c92f08eac8cd8f50356ff288e40a28636ee42 Mon Sep 17 00:00:00 2001 From: klyubin Date: Wed, 19 Mar 2025 10:46:32 -0600 Subject: [PATCH 87/87] web: support Host 100.100.100.100:80 in tailscaled web server This makes the web server running inside tailscaled on 100.100.100.100:80 support requests with `Host: 100.100.100.100:80` and its IPv6 equivalent. Prior to this commit, the web server replied to such requests with a redirect to the node's Tailscale IP:5252. Fixes https://github.com/tailscale/tailscale/issues/14415 Signed-off-by: Alex Klyubin --- client/web/web.go | 3 ++- client/web/web_test.go | 10 ++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/client/web/web.go b/client/web/web.go index e9810ccd0..6eccdadcf 100644 --- a/client/web/web.go +++ b/client/web/web.go @@ -335,7 +335,8 @@ func (s *Server) requireTailscaleIP(w http.ResponseWriter, r *http.Request) (han ipv6ServiceHost = "[" + tsaddr.TailscaleServiceIPv6String + "]" ) // allow requests on quad-100 (or ipv6 equivalent) - if r.Host == ipv4ServiceHost || r.Host == ipv6ServiceHost { + host := strings.TrimSuffix(r.Host, ":80") + if host == ipv4ServiceHost || host == ipv6ServiceHost { return false } diff --git a/client/web/web_test.go b/client/web/web_test.go index 291356260..334b403a6 100644 --- a/client/web/web_test.go +++ b/client/web/web_test.go @@ -1177,6 +1177,16 @@ func TestRequireTailscaleIP(t *testing.T) { target: "http://[fd7a:115c:a1e0::53]/", wantHandled: false, }, + { + name: "quad-100:80", + target: "http://100.100.100.100:80/", + wantHandled: false, + }, + { + name: "ipv6-service-addr:80", + target: "http://[fd7a:115c:a1e0::53]:80/", + wantHandled: false, + }, } for _, tt := range tests {