Code review comments

Signed-off-by: Tom Proctor <tomhjp@users.noreply.github.com>
This commit is contained in:
Tom Proctor 2024-07-17 14:21:31 +01:00
parent b8421e9996
commit 8462e3ae30
6 changed files with 20 additions and 131 deletions

View File

@ -19,10 +19,7 @@
"tailscale.com/tstest"
)
// TestIngress requires some setup not handled by this test:
// - Kubernetes cluster with tailscale operator installed
// - Current kubeconfig context set to connect to that cluster
// - OAuth client ID and secret in TS_API_CLIENT_ID and TS_API_CLIENT_SECRET env
// See [TestMain] for test requirements.
func TestIngress(t *testing.T) {
if tsClient == nil {
t.Skip("TestIngress requires credentials for a tailscale client")
@ -93,6 +90,9 @@ func TestIngress(t *testing.T) {
var resp *http.Response
if err := tstest.WaitFor(time.Second*60, func() error {
// TODO(tomhjp): Get the tailnet DNS name from the associated secret instead.
// If we are not the first tailnet node with the requested name, we'll get
// a -N suffix.
resp, err = tsClient.HTTPClient.Get(fmt.Sprintf("http://%s-%s:80", svc.Namespace, svc.Name))
if err != nil {
return err

View File

@ -45,6 +45,14 @@
}
)
// This test suite is currently not run in CI.
// It requires some setup not handled by this code:
// - Kubernetes cluster with tailscale operator installed
// - Current kubeconfig context set to connect to that cluster (directly, no operator proxy)
// - Operator installed with --set apiServerProxyConfig.mode="true"
// - ACLs that define tag:e2e-test-proxy tag. TODO(tomhjp): Can maybe replace this prereq onwards with an API key
// - OAuth client ID and secret in TS_API_CLIENT_ID and TS_API_CLIENT_SECRET env
// - OAuth client must have acl write and device write for tag:e2e-test-proxy tag
func TestMain(m *testing.M) {
code, err := runTests(m)
if err != nil {

View File

@ -9,6 +9,7 @@
"fmt"
"strings"
"testing"
"time"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
@ -18,15 +19,10 @@
"sigs.k8s.io/controller-runtime/pkg/client/config"
"tailscale.com/client/tailscale"
"tailscale.com/tsnet"
"tailscale.com/tstest"
)
// TestProxy requires some setup not handled by this test:
// - Kubernetes cluster with tailscale operator installed
// - Current kubeconfig context set to connect to that cluster (directly, no operator proxy)
// - Operator installed with --set apiServerProxyConfig.mode="true"
// - ACLs that define tag:e2e-test-proxy tag. TODO(tomhjp): Can maybe replace this prereq onwards with an API key
// - OAuth client ID and secret in TS_API_CLIENT_ID and TS_API_CLIENT_SECRET env
// - OAuth client must have device write for tag:e2e-test-proxy tag
// See [TestMain] for test requirements.
func TestProxy(t *testing.T) {
if tsClient == nil {
t.Skip("TestProxy requires credentials for a tailscale client")
@ -84,7 +80,11 @@ func TestProxy(t *testing.T) {
allowedSecret := corev1.Secret{
ObjectMeta: objectMeta("tailscale", "operator"),
}
if err := get(ctx, proxyCl, &allowedSecret); err != nil {
// Wait for up to a minute the first time we use the proxy, to give it time
// to provision the TLS certs.
if err := tstest.WaitFor(time.Second*60, func() error {
return get(ctx, proxyCl, &allowedSecret)
}); err != nil {
t.Fatal(err)
}

View File

@ -1,9 +0,0 @@
#!/usr/bin/env sh
set -eu
helm uninstall operator --namespace tailscale
kubectl delete -f ./cmd/k8s-operator/deploy/crds
helm uninstall ingress

View File

@ -1,71 +0,0 @@
#!/usr/bin/env bash
set -eu
# This script builds the latest operator and proxy images and manifests and
# installs them as well as ingress-nginx chart to the current kube cluster. Run
# it with KIND=<cluster-name> to upload images to a kind cluster else you must
# pass REPO env var pointing at a public repository where the images will be
# pushed.
# This script is also run from ./scripts/kubetests/test_on_kind.sh
# Run it with:
# OAUTH_CLIENT_ID=<oauth client ID> \
# OAUTH_CLIENT_SECRET=<oauth-client-secret> \
# [REPO=<image registry>] \
# [KIND=<kind-cluster-name>] \
# ./scripts/kubetests/setup.sh
OAUTH_CLIENT_ID="$OAUTH_CLIENT_ID"
OAUTH_CLIENT_SECRET="$OAUTH_CLIENT_SECRET"
KIND=${KIND:-}
if [[ ! -z "$KIND" ]]; then
REPO="tailscale-for-kind"
fi
REPO="$REPO"
eval `./tool/go run ./cmd/mkversion`
args=(TAGS="${VERSION_SHORT}")
make kube-generate-all # ensure things are up to date
if [[ ! -z "$KIND" ]]; then
args+=" PLATFORM=local"
fi
make ${args[@]} REPO="${REPO}/proxy" publishdevimage
make ${args[@]} REPO="${REPO}/operator" publishdevoperator
if [[ ! -z "$KIND" ]]; then
kind load docker-image "${REPO}/operator:${VERSION_SHORT}" --name "${KIND}"
kind load docker-image "${REPO}/proxy:${VERSION_SHORT}" --name "${KIND}"
fi
kubectl apply -f ./cmd/k8s-operator/deploy/crds/
helm upgrade \
--install \
operator ./cmd/k8s-operator/deploy/chart \
--namespace tailscale \
--create-namespace \
--set operator.image.repo="${REPO}/operator" \
--set operator.image.tag="${VERSION_SHORT} \
--set opertor.image.pullPolicy="IfNotPresent" \
--set proxy.image.repo="${REPO}/proxy \
--set proxy.image.tag="${VERSION_SHORT}" \
--set installCRDs=false \
--set-string apiServerProxyConfig.mode="true" \
--set oauth.clientId="${OAUTH_CLIENT_ID}" \
--set oauth.clientSecret="${OAUTH_CLIENT_SECRET}" \
--set operatorConfig.logging=debug \
--wait
# ingress-nginx is used in tests.
# Note that this command CANNOT be ran with --wait as the Service will never
# become ready (load balancer cannot be provisioned on kind).
helm upgrade --install ingress ingress-nginx/ingress-nginx
# TODO: either wait for the ingress-controller Pod to become ready or do
# something else to wait for the parts we care about to be ready.

View File

@ -1,39 +0,0 @@
#!/usr/bin/env sh
# Copyright (c) Tailscale Inc & AUTHORS
# SPDX-License-Identifier: BSD-3-Clause
set -eu
# This script creates a kind cluster, sets up test dependencies and runs e2e
# tests. It builds the latest operator and proxy image as well as manifests from
# this repo. The operator and proxy images are uploaded to the local container
# registry (i.e docker) and the kind cluster.
#
# Run it with:
# OAUTH_CLIENT_ID=<oauth client ID> \
# OAUTH_CLIENT_SECRET=<oauth-client-secret> \
# [K8S_VERSION=<k8s version>] \
# [CLUSTER_NAME=<cluster_name] \
# ./scripts/kubetests/test_on_kind.sh
K8S_VERSION="${K8S_VERSION:=1.30}"
CLUSTER_NAME="${CLUSTER_NAME:=ts-e2e}"
# Kind recommends to use the exact image SHAs with a given kind build
case $K8S_VERSION in
1.30*) kind_image=kindest/node:v1.30.0@sha256:047357ac0cfea04663786a612ba1eaba9702bef25227a794b52890dd8bcd692e ;;
1.29*) kind_image=kindest/node:v1.29.4@sha256:3abb816a5b1061fb15c6e9e60856ec40d56b7b52bcea5f5f1350bc6e2320b6f8 ;;
1.28*) kind_image=kindest/node:v1.28.9@sha256:dca54bc6a6079dd34699d53d7d4ffa2e853e46a20cd12d619a09207e35300bd0 ;;
1.27*) kind_image=kindest/node:v1.27.13@sha256:17439fa5b32290e3ead39ead1250dca1d822d94a10d26f1981756cd51b24b9d8 ;;
1.26*) kind_image=kindest/node:v1.26.15@sha256:84333e26cae1d70361bb7339efb568df1871419f2019c80f9a12b7e2d485fe19 ;;
1.25*) kind_image=kindest/node:v1.25.16@sha256:5da57dfc290ac3599e775e63b8b6c49c0c85d3fec771cd7d55b45fae14b38d3b ;;
esac
# TODO: check that the cluster does not already exist
kind create cluster --name "${CLUSTER_NAME}" --image "${kind_image}"
KIND="${CLUSTER_NAME}" OAUTH_CLIENT_ID="${OAUTH_CLIENT_ID}" OAUTH_CLIENT_SECRET="${OAUTH_CLIENT_SECRET}" ./scripts/kubetests/setup.sh
# TODO: now run the tests
# go test ./cmd/k8s-operator/e2e/...