all-kube: create Tailscale Service for HA kube-apiserver ProxyGroup (#16572)

Adds a new reconciler for ProxyGroups of type kube-apiserver that will
provision a Tailscale Service for each replica to advertise. Adds two
new condition types to the ProxyGroup, TailscaleServiceValid and
TailscaleServiceConfigured, to post updates on the state of that
reconciler in a way that's consistent with the service-pg reconciler.
The created Tailscale Service name is configurable via a new ProxyGroup
field spec.kubeAPISserver.ServiceName, which expects a string of the
form "svc:<dns-label>".

Lots of supporting changes were needed to implement this in a way that's
consistent with other operator workflows, including:

* Pulled containerboot's ensureServicesUnadvertised and certManager into
  kube/ libraries to be shared with k8s-proxy. Use those in k8s-proxy to
  aid Service cert sharing between replicas and graceful Service shutdown.
* For certManager, add an initial wait to the cert loop to wait until
  the domain appears in the devices's netmap to avoid a guaranteed error
  on the first issue attempt when it's quick to start.
* Made several methods in ingress-for-pg.go and svc-for-pg.go into
  functions to share with the new reconciler
* Added a Resource struct to the owner refs stored in Tailscale Service
  annotations to be able to distinguish between Ingress- and ProxyGroup-
  based Services that need cleaning up in the Tailscale API.
* Added a ListVIPServices method to the internal tailscale client to aid
  cleaning up orphaned Services
* Support for reading config from a kube Secret, and partial support for
  config reloading, to prevent us having to force Pod restarts when
  config changes.
* Fixed up the zap logger so it's possible to set debug log level.

Updates #13358

Change-Id: Ia9607441157dd91fb9b6ecbc318eecbef446e116
Signed-off-by: Tom Proctor <tomhjp@users.noreply.github.com>
This commit is contained in:
Tom Proctor
2025-07-21 11:03:21 +01:00
committed by GitHub
parent 5adde9e3f3
commit f421907c38
39 changed files with 2551 additions and 397 deletions

View File

@@ -0,0 +1,264 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !plan9
// Package config provides watchers for the various supported ways to load a
// config file for k8s-proxy; currently file or Kubernetes Secret.
package config
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/fsnotify/fsnotify"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"tailscale.com/kube/k8s-proxy/conf"
"tailscale.com/kube/kubetypes"
"tailscale.com/types/ptr"
"tailscale.com/util/testenv"
)
type configLoader struct {
logger *zap.SugaredLogger
client clientcorev1.CoreV1Interface
cfgChan chan<- *conf.Config
previous []byte
once sync.Once // For use in tests. To close cfgIgnored.
cfgIgnored chan struct{} // For use in tests.
}
func NewConfigLoader(logger *zap.SugaredLogger, client clientcorev1.CoreV1Interface, cfgChan chan<- *conf.Config) *configLoader {
return &configLoader{
logger: logger,
client: client,
cfgChan: cfgChan,
}
}
func (l *configLoader) WatchConfig(ctx context.Context, path string) error {
secretNamespacedName, isKubeSecret := strings.CutPrefix(path, "kube:")
if isKubeSecret {
secretNamespace, secretName, ok := strings.Cut(secretNamespacedName, string(types.Separator))
if !ok {
return fmt.Errorf("invalid Kubernetes Secret reference %q, expected format <namespace>/<name>", path)
}
if err := l.watchConfigSecretChanges(ctx, secretNamespace, secretName); err != nil && !errors.Is(err, context.Canceled) {
return fmt.Errorf("error watching config Secret %q: %w", secretNamespacedName, err)
}
return nil
}
if err := l.watchConfigFileChanges(ctx, path); err != nil && !errors.Is(err, context.Canceled) {
return fmt.Errorf("error watching config file %q: %w", path, err)
}
return nil
}
func (l *configLoader) reloadConfig(ctx context.Context, raw []byte) error {
if bytes.Equal(raw, l.previous) {
if l.cfgIgnored != nil && testenv.InTest() {
l.once.Do(func() {
close(l.cfgIgnored)
})
}
return nil
}
cfg, err := conf.Load(raw)
if err != nil {
return fmt.Errorf("error loading config: %w", err)
}
select {
case <-ctx.Done():
return ctx.Err()
case l.cfgChan <- &cfg:
}
l.previous = raw
return nil
}
func (l *configLoader) watchConfigFileChanges(ctx context.Context, path string) error {
var (
tickChan <-chan time.Time
eventChan <-chan fsnotify.Event
errChan <-chan error
)
if w, err := fsnotify.NewWatcher(); err != nil {
// Creating a new fsnotify watcher would fail for example if inotify was not able to create a new file descriptor.
// See https://github.com/tailscale/tailscale/issues/15081
l.logger.Infof("Failed to create fsnotify watcher on config file %q; watching for changes on 5s timer: %v", path, err)
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
tickChan = ticker.C
} else {
dir := filepath.Dir(path)
file := filepath.Base(path)
l.logger.Infof("Watching directory %q for changes to config file %q", dir, file)
defer w.Close()
if err := w.Add(dir); err != nil {
return fmt.Errorf("failed to add fsnotify watch: %w", err)
}
eventChan = w.Events
errChan = w.Errors
}
// Read the initial config file, but after the watcher is already set up to
// avoid an unlucky race condition if the config file is edited in between.
b, err := os.ReadFile(path)
if err != nil {
return fmt.Errorf("error reading config file %q: %w", path, err)
}
if err := l.reloadConfig(ctx, b); err != nil {
return fmt.Errorf("error loading initial config file %q: %w", path, err)
}
for {
select {
case <-ctx.Done():
return ctx.Err()
case err, ok := <-errChan:
if !ok {
// Watcher was closed.
return nil
}
return fmt.Errorf("watcher error: %w", err)
case <-tickChan:
case ev, ok := <-eventChan:
if !ok {
// Watcher was closed.
return nil
}
if ev.Name != path || ev.Op&fsnotify.Write == 0 {
// Ignore irrelevant events.
continue
}
}
b, err := os.ReadFile(path)
if err != nil {
return fmt.Errorf("error reading config file: %w", err)
}
// Writers such as os.WriteFile may truncate the file before writing
// new contents, so it's possible to read an empty file if we read before
// the write has completed.
if len(b) == 0 {
continue
}
if err := l.reloadConfig(ctx, b); err != nil {
return fmt.Errorf("error reloading config file %q: %v", path, err)
}
}
}
func (l *configLoader) watchConfigSecretChanges(ctx context.Context, secretNamespace, secretName string) error {
secrets := l.client.Secrets(secretNamespace)
w, err := secrets.Watch(ctx, metav1.ListOptions{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
// Re-watch regularly to avoid relying on long-lived connections.
// See https://github.com/kubernetes-client/javascript/issues/596#issuecomment-786419380
TimeoutSeconds: ptr.To(int64(600)),
FieldSelector: fmt.Sprintf("metadata.name=%s", secretName),
Watch: true,
})
if err != nil {
return fmt.Errorf("failed to watch config Secret %q: %w", secretName, err)
}
defer func() {
// May not be the original watcher by the time we exit.
if w != nil {
w.Stop()
}
}()
// Get the initial config Secret now we've got the watcher set up.
secret, err := secrets.Get(ctx, secretName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get config Secret %q: %w", secretName, err)
}
if err := l.configFromSecret(ctx, secret); err != nil {
return fmt.Errorf("error loading initial config: %w", err)
}
l.logger.Infof("Watching config Secret %q for changes", secretName)
for {
var secret *corev1.Secret
select {
case <-ctx.Done():
return ctx.Err()
case ev, ok := <-w.ResultChan():
if !ok {
w.Stop()
w, err = secrets.Watch(ctx, metav1.ListOptions{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
TimeoutSeconds: ptr.To(int64(600)),
FieldSelector: fmt.Sprintf("metadata.name=%s", secretName),
Watch: true,
})
if err != nil {
return fmt.Errorf("failed to re-watch config Secret %q: %w", secretName, err)
}
continue
}
switch ev.Type {
case watch.Added, watch.Modified:
// New config available to load.
var ok bool
secret, ok = ev.Object.(*corev1.Secret)
if !ok {
return fmt.Errorf("unexpected object type %T in watch event for config Secret %q", ev.Object, secretName)
}
if secret == nil || secret.Data == nil {
continue
}
if err := l.configFromSecret(ctx, secret); err != nil {
return fmt.Errorf("error reloading config Secret %q: %v", secret.Name, err)
}
case watch.Error:
return fmt.Errorf("error watching config Secret %q: %v", secretName, ev.Object)
default:
// Ignore, no action required.
continue
}
}
}
}
func (l *configLoader) configFromSecret(ctx context.Context, s *corev1.Secret) error {
b := s.Data[kubetypes.KubeAPIServerConfigFile]
if len(b) == 0 {
return fmt.Errorf("config Secret %q does not contain expected config in key %q", s.Name, kubetypes.KubeAPIServerConfigFile)
}
if err := l.reloadConfig(ctx, b); err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,245 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package config
import (
"context"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes/fake"
ktesting "k8s.io/client-go/testing"
"tailscale.com/kube/k8s-proxy/conf"
"tailscale.com/kube/kubetypes"
"tailscale.com/types/ptr"
)
func TestWatchConfig(t *testing.T) {
type phase struct {
config string
cancel bool
expectedConf *conf.ConfigV1Alpha1
expectedErr string
}
// Same set of behaviour tests for each config source.
for _, env := range []string{"file", "kube"} {
t.Run(env, func(t *testing.T) {
t.Parallel()
for _, tc := range []struct {
name string
initialConfig string
phases []phase
}{
{
name: "no_config",
phases: []phase{{
expectedErr: "error loading initial config",
}},
},
{
name: "valid_config",
initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`,
phases: []phase{{
expectedConf: &conf.ConfigV1Alpha1{
AuthKey: ptr.To("abc123"),
},
}},
},
{
name: "can_cancel",
initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`,
phases: []phase{
{
expectedConf: &conf.ConfigV1Alpha1{
AuthKey: ptr.To("abc123"),
},
},
{
cancel: true,
},
},
},
{
name: "can_reload",
initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`,
phases: []phase{
{
expectedConf: &conf.ConfigV1Alpha1{
AuthKey: ptr.To("abc123"),
},
},
{
config: `{"version": "v1alpha1", "authKey": "def456"}`,
expectedConf: &conf.ConfigV1Alpha1{
AuthKey: ptr.To("def456"),
},
},
},
},
{
name: "ignores_events_with_no_changes",
initialConfig: `{"version": "v1alpha1", "authKey": "abc123"}`,
phases: []phase{
{
expectedConf: &conf.ConfigV1Alpha1{
AuthKey: ptr.To("abc123"),
},
},
{
config: `{"version": "v1alpha1", "authKey": "abc123"}`,
},
},
},
} {
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
root := t.TempDir()
cl := fake.NewClientset()
var cfgPath string
var writeFile func(*testing.T, string)
if env == "file" {
cfgPath = filepath.Join(root, kubetypes.KubeAPIServerConfigFile)
writeFile = func(t *testing.T, content string) {
if err := os.WriteFile(cfgPath, []byte(content), 0o644); err != nil {
t.Fatalf("error writing config file %q: %v", cfgPath, err)
}
}
} else {
cfgPath = "kube:default/config-secret"
writeFile = func(t *testing.T, content string) {
s := secretFrom(content)
mustCreateOrUpdate(t, cl, s)
}
}
configChan := make(chan *conf.Config)
l := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan)
l.cfgIgnored = make(chan struct{})
errs := make(chan error)
ctx, cancel := context.WithCancel(t.Context())
defer cancel()
writeFile(t, tc.initialConfig)
go func() {
errs <- l.WatchConfig(ctx, cfgPath)
}()
for i, p := range tc.phases {
if p.config != "" {
writeFile(t, p.config)
}
if p.cancel {
cancel()
}
select {
case cfg := <-configChan:
if diff := cmp.Diff(*p.expectedConf, cfg.Parsed); diff != "" {
t.Errorf("unexpected config (-want +got):\n%s", diff)
}
case err := <-errs:
if p.cancel {
if err != nil {
t.Fatalf("unexpected error after cancel: %v", err)
}
} else if p.expectedErr == "" {
t.Fatalf("unexpected error: %v", err)
} else if !strings.Contains(err.Error(), p.expectedErr) {
t.Fatalf("expected error to contain %q, got %q", p.expectedErr, err.Error())
}
case <-l.cfgIgnored:
if p.expectedConf != nil {
t.Fatalf("expected config to be reloaded, but got ignored signal")
}
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for expected event in phase: %d", i)
}
}
})
}
})
}
}
func TestWatchConfigSecret_Rewatches(t *testing.T) {
cl := fake.NewClientset()
var watchCount int
var watcher *watch.RaceFreeFakeWatcher
expected := []string{
`{"version": "v1alpha1", "authKey": "abc123"}`,
`{"version": "v1alpha1", "authKey": "def456"}`,
`{"version": "v1alpha1", "authKey": "ghi789"}`,
}
cl.PrependWatchReactor("secrets", func(action ktesting.Action) (handled bool, ret watch.Interface, err error) {
watcher = watch.NewRaceFreeFake()
watcher.Add(secretFrom(expected[watchCount]))
if action.GetVerb() == "watch" && action.GetResource().Resource == "secrets" {
watchCount++
}
return true, watcher, nil
})
configChan := make(chan *conf.Config)
l := NewConfigLoader(zap.Must(zap.NewDevelopment()).Sugar(), cl.CoreV1(), configChan)
mustCreateOrUpdate(t, cl, secretFrom(expected[0]))
errs := make(chan error)
go func() {
errs <- l.watchConfigSecretChanges(t.Context(), "default", "config-secret")
}()
for i := range 2 {
select {
case cfg := <-configChan:
if exp := expected[i]; cfg.Parsed.AuthKey == nil || !strings.Contains(exp, *cfg.Parsed.AuthKey) {
t.Fatalf("expected config to have authKey %q, got: %v", exp, cfg.Parsed.AuthKey)
}
if i == 0 {
watcher.Stop()
}
case err := <-errs:
t.Fatalf("unexpected error: %v", err)
case <-l.cfgIgnored:
t.Fatalf("expected config to be reloaded, but got ignored signal")
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for expected event")
}
}
if watchCount != 2 {
t.Fatalf("expected 2 watch API calls, got %d", watchCount)
}
}
func secretFrom(content string) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "config-secret",
},
Data: map[string][]byte{
kubetypes.KubeAPIServerConfigFile: []byte(content),
},
}
}
func mustCreateOrUpdate(t *testing.T, cl *fake.Clientset, s *corev1.Secret) {
t.Helper()
if _, err := cl.CoreV1().Secrets("default").Create(t.Context(), s, metav1.CreateOptions{}); err != nil {
if _, updateErr := cl.CoreV1().Secrets("default").Update(t.Context(), s, metav1.UpdateOptions{}); updateErr != nil {
t.Fatalf("error writing config Secret %q: %v", s.Name, updateErr)
}
}
}

View File

@@ -14,6 +14,7 @@ import (
"fmt"
"os"
"os/signal"
"reflect"
"strings"
"syscall"
"time"
@@ -21,20 +22,37 @@ import (
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"golang.org/x/sync/errgroup"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/utils/strings/slices"
"tailscale.com/client/local"
"tailscale.com/cmd/k8s-proxy/internal/config"
"tailscale.com/hostinfo"
"tailscale.com/ipn"
"tailscale.com/ipn/store"
apiproxy "tailscale.com/k8s-operator/api-proxy"
"tailscale.com/kube/certs"
"tailscale.com/kube/k8s-proxy/conf"
klc "tailscale.com/kube/localclient"
"tailscale.com/kube/services"
"tailscale.com/kube/state"
"tailscale.com/tailcfg"
"tailscale.com/tsnet"
)
func main() {
logger := zap.Must(zap.NewProduction()).Sugar()
encoderCfg := zap.NewProductionEncoderConfig()
encoderCfg.EncodeTime = zapcore.RFC3339TimeEncoder
logger := zap.Must(zap.Config{
Level: zap.NewAtomicLevelAt(zap.DebugLevel),
Encoding: "json",
OutputPaths: []string{"stderr"},
ErrorOutputPaths: []string{"stderr"},
EncoderConfig: encoderCfg,
}.Build()).Sugar()
defer logger.Sync()
if err := run(logger); err != nil {
logger.Fatal(err.Error())
}
@@ -42,18 +60,58 @@ func main() {
func run(logger *zap.SugaredLogger) error {
var (
configFile = os.Getenv("TS_K8S_PROXY_CONFIG")
configPath = os.Getenv("TS_K8S_PROXY_CONFIG")
podUID = os.Getenv("POD_UID")
)
if configFile == "" {
if configPath == "" {
return errors.New("TS_K8S_PROXY_CONFIG unset")
}
// TODO(tomhjp): Support reloading config.
// TODO(tomhjp): Support reading config from a Secret.
cfg, err := conf.Load(configFile)
// serveCtx to live for the lifetime of the process, only gets cancelled
// once the Tailscale Service has been drained
serveCtx, serveCancel := context.WithCancel(context.Background())
defer serveCancel()
// ctx to cancel to start the shutdown process.
ctx, cancel := context.WithCancel(serveCtx)
defer cancel()
sigsChan := make(chan os.Signal, 1)
signal.Notify(sigsChan, syscall.SIGINT, syscall.SIGTERM)
go func() {
select {
case <-ctx.Done():
case s := <-sigsChan:
logger.Infof("Received shutdown signal %s, exiting", s)
cancel()
}
}()
var group *errgroup.Group
group, ctx = errgroup.WithContext(ctx)
restConfig, err := getRestConfig(logger)
if err != nil {
return fmt.Errorf("error loading config file %q: %w", configFile, err)
return fmt.Errorf("error getting rest config: %w", err)
}
clientset, err := kubernetes.NewForConfig(restConfig)
if err != nil {
return fmt.Errorf("error creating Kubernetes clientset: %w", err)
}
// Load and watch config.
cfgChan := make(chan *conf.Config)
cfgLoader := config.NewConfigLoader(logger, clientset.CoreV1(), cfgChan)
group.Go(func() error {
return cfgLoader.WatchConfig(ctx, configPath)
})
// Get initial config.
var cfg *conf.Config
select {
case <-ctx.Done():
return group.Wait()
case cfg = <-cfgChan:
}
if cfg.Parsed.LogLevel != nil {
@@ -82,6 +140,14 @@ func run(logger *zap.SugaredLogger) error {
hostinfo.SetApp(*cfg.Parsed.App)
}
// TODO(tomhjp): Pass this setting directly into the store instead of using
// environment variables.
if cfg.Parsed.APIServerProxy != nil && cfg.Parsed.APIServerProxy.IssueCerts.EqualBool(true) {
os.Setenv("TS_CERT_SHARE_MODE", "rw")
} else {
os.Setenv("TS_CERT_SHARE_MODE", "ro")
}
st, err := getStateStore(cfg.Parsed.State, logger)
if err != nil {
return err
@@ -115,10 +181,6 @@ func run(logger *zap.SugaredLogger) error {
ts.Hostname = *cfg.Parsed.Hostname
}
// ctx to live for the lifetime of the process.
ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
defer cancel()
// Make sure we crash loop if Up doesn't complete in reasonable time.
upCtx, upCancel := context.WithTimeout(ctx, time.Minute)
defer upCancel()
@@ -126,9 +188,6 @@ func run(logger *zap.SugaredLogger) error {
return fmt.Errorf("error starting tailscale server: %w", err)
}
defer ts.Close()
group, groupCtx := errgroup.WithContext(ctx)
lc, err := ts.LocalClient()
if err != nil {
return fmt.Errorf("error getting local client: %w", err)
@@ -136,23 +195,13 @@ func run(logger *zap.SugaredLogger) error {
// Setup for updating state keys.
if podUID != "" {
w, err := lc.WatchIPNBus(groupCtx, ipn.NotifyInitialNetMap)
if err != nil {
return fmt.Errorf("error watching IPN bus: %w", err)
}
defer w.Close()
group.Go(func() error {
if err := state.KeepKeysUpdated(st, w.Next); err != nil && err != groupCtx.Err() {
return fmt.Errorf("error keeping state keys updated: %w", err)
}
return nil
return state.KeepKeysUpdated(ctx, st, klc.New(lc))
})
}
if cfg.Parsed.AcceptRoutes != nil {
_, err = lc.EditPrefs(groupCtx, &ipn.MaskedPrefs{
_, err = lc.EditPrefs(ctx, &ipn.MaskedPrefs{
RouteAllSet: true,
Prefs: ipn.Prefs{RouteAll: *cfg.Parsed.AcceptRoutes},
})
@@ -161,34 +210,97 @@ func run(logger *zap.SugaredLogger) error {
}
}
// Setup for the API server proxy.
restConfig, err := getRestConfig(logger)
if err != nil {
return fmt.Errorf("error getting rest config: %w", err)
// TODO(tomhjp): There seems to be a bug that on restart the device does
// not get reassigned it's already working Service IPs unless we clear and
// reset the serve config.
if err := lc.SetServeConfig(ctx, &ipn.ServeConfig{}); err != nil {
return fmt.Errorf("error clearing existing ServeConfig: %w", err)
}
authMode := true
if cfg.Parsed.KubeAPIServer != nil {
v, ok := cfg.Parsed.KubeAPIServer.AuthMode.Get()
if ok {
authMode = v
var cm *certs.CertManager
if shouldIssueCerts(cfg) {
logger.Infof("Will issue TLS certs for Tailscale Service")
cm = certs.NewCertManager(klc.New(lc), logger.Infof)
}
if err := setServeConfig(ctx, lc, cm, apiServerProxyService(cfg)); err != nil {
return err
}
if cfg.Parsed.AdvertiseServices != nil {
if _, err := lc.EditPrefs(ctx, &ipn.MaskedPrefs{
AdvertiseServicesSet: true,
Prefs: ipn.Prefs{
AdvertiseServices: cfg.Parsed.AdvertiseServices,
},
}); err != nil {
return fmt.Errorf("error setting prefs AdvertiseServices: %w", err)
}
}
ap, err := apiproxy.NewAPIServerProxy(logger.Named("apiserver-proxy"), restConfig, ts, authMode)
// Setup for the API server proxy.
authMode := true
if cfg.Parsed.APIServerProxy != nil && cfg.Parsed.APIServerProxy.AuthMode.EqualBool(false) {
authMode = false
}
ap, err := apiproxy.NewAPIServerProxy(logger.Named("apiserver-proxy"), restConfig, ts, authMode, false)
if err != nil {
return fmt.Errorf("error creating api server proxy: %w", err)
}
// TODO(tomhjp): Work out whether we should use TS_CERT_SHARE_MODE or not,
// and possibly issue certs upfront here before serving.
group.Go(func() error {
if err := ap.Run(groupCtx); err != nil {
if err := ap.Run(serveCtx); err != nil {
return fmt.Errorf("error running API server proxy: %w", err)
}
return nil
})
return group.Wait()
for {
select {
case <-ctx.Done():
// Context cancelled, exit.
logger.Info("Context cancelled, exiting")
shutdownCtx, shutdownCancel := context.WithTimeout(serveCtx, 20*time.Second)
unadvertiseErr := services.EnsureServicesNotAdvertised(shutdownCtx, lc, logger.Infof)
shutdownCancel()
serveCancel()
return errors.Join(unadvertiseErr, group.Wait())
case cfg = <-cfgChan:
// Handle config reload.
// TODO(tomhjp): Make auth mode reloadable.
var prefs ipn.MaskedPrefs
cfgLogger := logger
currentPrefs, err := lc.GetPrefs(ctx)
if err != nil {
return fmt.Errorf("error getting current prefs: %w", err)
}
if !slices.Equal(currentPrefs.AdvertiseServices, cfg.Parsed.AdvertiseServices) {
cfgLogger = cfgLogger.With("AdvertiseServices", fmt.Sprintf("%v -> %v", currentPrefs.AdvertiseServices, cfg.Parsed.AdvertiseServices))
prefs.AdvertiseServicesSet = true
prefs.Prefs.AdvertiseServices = cfg.Parsed.AdvertiseServices
}
if cfg.Parsed.Hostname != nil && *cfg.Parsed.Hostname != currentPrefs.Hostname {
cfgLogger = cfgLogger.With("Hostname", fmt.Sprintf("%s -> %s", currentPrefs.Hostname, *cfg.Parsed.Hostname))
prefs.HostnameSet = true
prefs.Hostname = *cfg.Parsed.Hostname
}
if cfg.Parsed.AcceptRoutes != nil && *cfg.Parsed.AcceptRoutes != currentPrefs.RouteAll {
cfgLogger = cfgLogger.With("AcceptRoutes", fmt.Sprintf("%v -> %v", currentPrefs.RouteAll, *cfg.Parsed.AcceptRoutes))
prefs.RouteAllSet = true
prefs.Prefs.RouteAll = *cfg.Parsed.AcceptRoutes
}
if !prefs.IsEmpty() {
if _, err := lc.EditPrefs(ctx, &prefs); err != nil {
return fmt.Errorf("error editing prefs: %w", err)
}
}
if err := setServeConfig(ctx, lc, cm, apiServerProxyService(cfg)); err != nil {
return fmt.Errorf("error setting serve config: %w", err)
}
cfgLogger.Infof("Config reloaded")
}
}
}
func getStateStore(path *string, logger *zap.SugaredLogger) (ipn.StateStore, error) {
@@ -226,3 +338,79 @@ func getRestConfig(logger *zap.SugaredLogger) (*rest.Config, error) {
return restConfig, nil
}
func apiServerProxyService(cfg *conf.Config) tailcfg.ServiceName {
if cfg.Parsed.APIServerProxy != nil &&
cfg.Parsed.APIServerProxy.Enabled.EqualBool(true) &&
cfg.Parsed.APIServerProxy.ServiceName != nil &&
*cfg.Parsed.APIServerProxy.ServiceName != "" {
return tailcfg.ServiceName(*cfg.Parsed.APIServerProxy.ServiceName)
}
return ""
}
func shouldIssueCerts(cfg *conf.Config) bool {
return cfg.Parsed.APIServerProxy != nil &&
cfg.Parsed.APIServerProxy.IssueCerts.EqualBool(true)
}
// setServeConfig sets up serve config such that it's serving for the passed in
// Tailscale Service, and does nothing if it's already up to date.
func setServeConfig(ctx context.Context, lc *local.Client, cm *certs.CertManager, name tailcfg.ServiceName) error {
existingServeConfig, err := lc.GetServeConfig(ctx)
if err != nil {
return fmt.Errorf("error getting existing serve config: %w", err)
}
// Ensure serve config is cleared if no Tailscale Service.
if name == "" {
if reflect.DeepEqual(*existingServeConfig, ipn.ServeConfig{}) {
// Already up to date.
return nil
}
if cm != nil {
cm.EnsureCertLoops(ctx, &ipn.ServeConfig{})
}
return lc.SetServeConfig(ctx, &ipn.ServeConfig{})
}
status, err := lc.StatusWithoutPeers(ctx)
if err != nil {
return fmt.Errorf("error getting local client status: %w", err)
}
serviceHostPort := ipn.HostPort(fmt.Sprintf("%s.%s:443", name.WithoutPrefix(), status.CurrentTailnet.MagicDNSSuffix))
serveConfig := ipn.ServeConfig{
// Configure for the Service hostname.
Services: map[tailcfg.ServiceName]*ipn.ServiceConfig{
name: {
TCP: map[uint16]*ipn.TCPPortHandler{
443: {
HTTPS: true,
},
},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
serviceHostPort: {
Handlers: map[string]*ipn.HTTPHandler{
"/": {
Proxy: fmt.Sprintf("http://%s:80", strings.TrimSuffix(status.Self.DNSName, ".")),
},
},
},
},
},
},
}
if reflect.DeepEqual(*existingServeConfig, serveConfig) {
// Already up to date.
return nil
}
if cm != nil {
cm.EnsureCertLoops(ctx, &serveConfig)
}
return lc.SetServeConfig(ctx, &serveConfig)
}