Signed-off-by: Irbe Krumina <irbe@tailscale.com>
This commit is contained in:
Irbe Krumina 2024-07-16 20:02:57 +03:00
parent 8882c6b730
commit a3b1ef660a
9 changed files with 702 additions and 12 deletions

View File

@ -110,6 +110,16 @@ publishdevnameserver: ## Build and publish k8s-nameserver image to location spec
@test "${REPO}" != "ghcr.io/tailscale/k8s-nameserver" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-nameserver" && exit 1)
TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=k8s-nameserver ./build_docker.sh
publishdeveksnlb: ## Build and publish eks-nlb image to location specified by ${REPO}
@test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1)
@test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1)
@test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1)
@test "${REPO}" != "tailscale/k8s-nameserver" || (echo "REPO=... must not be tailscale/k8s-nameserver" && exit 1)
@test "${REPO}" != "ghcr.io/tailscale/k8s-nameserver" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-nameserver" && exit 1)
@test "${REPO}" != "tailscale/eks-nlb" || (echo "REPO=... must not be tailscale/eks-nlb" && exit 1)
@test "${REPO}" != "ghcr.io/tailscale/eks-nlb" || (echo "REPO=... must not be ghcr.io/tailscale/eks-nlb" && exit 1)
TAGS="${TAGS}" REPOS=${REPO} PLATFORM=${PLATFORM} PUSH=true TARGET=eks-nlb ./build_docker.sh
.PHONY: sshintegrationtest
sshintegrationtest: ## Run the SSH integration tests in various Docker containers
@GOOS=linux GOARCH=amd64 ./tool/go test -tags integrationtest -c ./ssh/tailssh -o ssh/tailssh/testcontainers/tailssh.test && \

View File

@ -87,6 +87,22 @@ case "$TARGET" in
--target="${PLATFORM}" \
/usr/local/bin/k8s-nameserver
;;
eks-nlb)
DEFAULT_REPOS="tailscale/eks-nlb"
REPOS="${REPOS:-${DEFAULT_REPOS}}"
go run github.com/tailscale/mkctr \
--gopaths="tailscale.com/cmd/eks-nlb:/usr/local/bin/eks-nlb" \
--ldflags=" \
-X tailscale.com/version.longStamp=${VERSION_LONG} \
-X tailscale.com/version.shortStamp=${VERSION_SHORT} \
-X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \
--base="${BASE}" \
--tags="${TAGS}" \
--repos="${REPOS}" \
--push="${PUSH}" \
--target="${PLATFORM}" \
/usr/local/bin/eks-nlb
;;
*)
echo "unknown target: $TARGET"
exit 1

63
cmd/eks-nlb/README.md Normal file
View File

@ -0,0 +1,63 @@
eks-nlb can be used to set up routing from an AWS NLB to wireguard port of Tailscale running in a Pod.
### Pods must:
- have tailscale.com/enlb-configmap annotation set to a ConfigMap that contains NLB ARN and the ID of the EKS cluster VPC
(see structure in example.yamls)
- have TS_DEBUG_PRETENDPOINT env var set directly on 'tailscale' container config or provided via ConfigMap
- have a container named 'tailscale' that runs tailscale
- have wireguard port set to 41641
- have metrics exposed on port 9001 (temporary health check solution)
## Deploy
Deploy (in default namespace):
1. Create a Secret with AWS creds
```sh
kubectl create secret generic aws-creds --from-literal aws_access_key_id=<AWS_ACCESS_KEY_ID> \
--from-literal aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
```
2. (Optional) Modify image in ./deploy.yaml
3. Deploy:
```
$ kubectl apply -f ./deploy.yaml
```
## Usage example
See an example manifest in ./example.yaml
To use:
- deploy the controller
- create an NLB load balancer, set up security groups etc
- create a Secret with tailscale auth key
```
kubectl create secret generic ts-creds --from-literal=authkey=<ts-auth-key>
```
- populate 'eks-config' ConfigMap with NLB ARN and the VPC of the EKS cluster
- poulate 'pretendpoint' ConfigMap with pairs of load balancer external IPs + port
For this, eks-nlb will ensure that the single replica is exposed on the port specified in via TS_DEBUG_PRETENDPOINT env var read from 'pretendpoint' ConfigMap on the load balancer whose ARN is passed via tailscale.com/awsnlbarn annotation to the StatefulSet.
TODO: this flow is inconvenient. We should be able to make eks-nlb dynamically set TS_DEBUG_PRETENDPOINT once we can have tailscaled dynamically reloading its config.
The controller will:
- create a target group with the Pod IP routing traffic to 41641 and using 9001 as health check port
- expose this target on the NLB via the port parsed from TS_DEBUG_PRETENDPOINT
## Dev
Build and push images with `REPO="<registry>/eksnlb" TAGS=<tags> make publishdeveksnlb`

63
cmd/eks-nlb/deploy.yaml Normal file
View File

@ -0,0 +1,63 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: eks-nlb
namespace: tailscale
spec:
replicas: 1
selector:
matchLabels:
app: eks-nlb
template:
metadata:
labels:
app: eks-nlb
spec:
serviceAccountName: eks-nlb
containers:
- name: eks-nlb
image: gcr.io/csi-test-290908/eksnlb:v0.0.15 # this image is publicly available
env:
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: aws_access_key_id
name: aws-creds
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: aws_secret_access_key
name: aws-creds
- name: AWS_DEFAULT_REGION
value: eu-central-1
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: eks-nlb
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "update", "patch", "create", "watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: eks-nlb
subjects:
- kind: ServiceAccount
name: eks-nlb
namespace: tailscale
roleRef:
kind: ClusterRole
name: eks-nlb
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: tailscale
name: eks-nlb

95
cmd/eks-nlb/example.yaml Normal file
View File

@ -0,0 +1,95 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: tailscale
namespace: tailscale
spec:
replicas: 1
selector:
matchLabels:
app: tailscale
template:
metadata:
labels:
app: tailscale
annotations:
tailscale.com/eksnlb-configmap: eks-config
spec:
serviceAccountName: tailscale
containers:
- name: tailscale
image: tailscale/tailscale:unstable
env:
- name: TS_AUTHKEY
valueFrom:
secretKeyRef:
name: ts-creds
key: authkey
- name: TS_KUBE_SECRET
value: tailscale-secret
- name: TS_HOSTNAME
value: eks-nlb-test
- name: TS_USERSPACE
value: "false"
- name: TS_TAILSCALED_EXTRA_ARGS
value: "--port=41641 --debug=0.0.0.0:9001"
- name: TS_DEBUG_PRETENDPOINT
valueFrom:
configMapKeyRef:
name: pretendpoint
key: pretendpoint
securityContext:
capabilities:
add:
- NET_ADMIN
resources:
limits:
memory: 64Mi
cpu: 10m
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: tailscale
namespace: tailscale
rules:
- apiGroups: [""] # "" indicates the core API group
resources: ["secrets"]
verbs: ["get", "update", "patch", "create"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: tailscale
namespace: tailscale
subjects:
- kind: ServiceAccount
name: tailscale
namespace: tailscale
roleRef:
kind: Role
name: tailscale
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: tailscale
namespace: tailscale
---
apiVersion: v1
data:
vpc_id:
lb_arn:
kind: ConfigMap
metadata:
name: eks-config
namespace: tailscale
---
apiVersion: v1
data:
pretendpoint: <lb-ip-1>:<port>,<lb-ip-2>:<port>
kind: ConfigMap
metadata:
name: pretendpoint
namespace: tailscale

68
cmd/eks-nlb/main.go Normal file
View File

@ -0,0 +1,68 @@
package main
import (
"github.com/go-logr/zapr"
"go.uber.org/zap/zapcore"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
logf "sigs.k8s.io/controller-runtime/pkg/log"
kzap "sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
"tailscale.com/version"
)
// TODO: add an option to configure namespaces to watch
const tsNamespace = "tailscale"
func main() {
zlog := kzap.NewRaw(kzap.Level(zapcore.DebugLevel)).Sugar()
logf.SetLogger(zapr.NewLogger(zlog.Desugar()))
startLog := zlog.Named("startup")
restConfig := config.GetConfigOrDie()
nsFilter := cache.ByObject{
Field: client.InNamespace(tsNamespace).AsSelector(),
}
mgrOpts := manager.Options{
// TODO (irbekrm): stricter filtering what we watch/cache/call
// reconcilers on. c/r by default starts a watch on any
// resources that we GET via the controller manager's client.
Cache: cache.Options{
ByObject: map[client.Object]cache.ByObject{
&corev1.Pod{}: nsFilter,
&corev1.ConfigMap{}: nsFilter,
},
},
}
mgr, err := manager.New(restConfig, mgrOpts)
if err != nil {
startLog.Fatalf("could not create manager: %v", err)
}
// TODO: cache metadata only as else this will cache all Pods in cluster
// -> high memory consumption.
err = builder.
ControllerManagedBy(mgr).
Named("pods-reconciler").
For(&corev1.Pod{}).
Complete(&podReconciler{
logger: zlog.Named("pods-reconciler"),
Client: mgr.GetClient(),
})
if err != nil {
startLog.Fatalf("could not create pods reconciler: %v", err)
}
zlog.Infof("Startup complete, operator running, version: %s", version.Long())
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
startLog.Fatalf("could not start manager: %v", err)
}
}

372
cmd/eks-nlb/reconciler.go Normal file
View File

@ -0,0 +1,372 @@
package main
import (
"context"
"errors"
"fmt"
"math/rand"
"net"
"strconv"
"strings"
"github.com/aws/aws-sdk-go-v2/config"
elb "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2"
elbtypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"tailscale.com/types/ptr"
)
const (
eksNLBConfigAnnotation = "tailscale.com/eksnlb-configmap"
pretendpointEnvVar = "TS_DEBUG_PRETENDPOINT"
wireguardPort int32 = 41641
metricsPort string = "9001"
)
type podReconciler struct {
client.Client
logger *zap.SugaredLogger
}
type podConfig struct {
portFromEnv int32
lbAddrsFromEnv []string
lbARN string
vpcID string
podLabels map[string]string
backendIP string // Pod IP
}
func (pr *podReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
logger := pr.logger.With("pod-ns", req.Namespace, "pod-name", req.Name)
logger.Debugf("starting reconcile")
defer logger.Debugf("reconcile finished")
pod := new(corev1.Pod)
err = pr.Get(ctx, req.NamespacedName, pod)
if apierrors.IsNotFound(err) {
logger.Debugf("Pod not found, assuming it was deleted")
return reconcile.Result{}, nil
} else if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to get Pod: %w", err)
}
if !pod.DeletionTimestamp.IsZero() {
logger.Debugf("Pod is being deleted; currently doing nothing")
// TODO: clean up load balancer resources
return reconcile.Result{}, nil
}
if pod.Annotations[eksNLBConfigAnnotation] == "" {
logger.Debugf("Pod does not have %s annotation, do nothing", eksNLBConfigAnnotation)
return res, nil
// TODO: clean up if removed
}
// TODO: validate Pod config
// TODO: add a finalizer
// Parse Pod config
pc, err := pr.parseClusterConfig(ctx, pod)
if err != nil {
return res, fmt.Errorf("error parsing Pod config: %w", err)
}
if pc.backendIP == "" {
logger.Info("[unexpected] Pod does not have an IP address allocated, waiting...")
return res, nil
}
cfg, err := config.LoadDefaultConfig(ctx)
if err != nil {
return res, fmt.Errorf("unable to load SDK config, %v", err)
}
cl := elb.NewFromConfig(cfg)
resourceName := fmt.Sprintf("%s-%s", pod.Name, pod.Namespace)
tgci := elb.CreateTargetGroupInput{
VpcId: &pc.vpcID,
Name: &resourceName,
HealthCheckEnabled: ptr.To(true), // TODO: internal pointer
HealthCheckPort: ptr.To(metricsPort),
HealthCheckProtocol: "TCP",
// TODO: other health check params
// IpAddressType: "ipv4", // TODO: determine from Pod IP
Port: ptr.To(wireguardPort),
Protocol: "UDP",
TargetType: elbtypes.TargetTypeEnumIp,
}
// CreateTargetGroup is idempotent
tgco, err := cl.CreateTargetGroup(ctx, &tgci)
if err != nil {
return res, fmt.Errorf("error creating target group %q", err)
}
if len(tgco.TargetGroups) == 0 {
logger.Debugf("No target groups found after creation, waiting...")
return res, nil
}
// Loop over and look up matching IP addresses
var tg *elbtypes.TargetGroup
for _, maybeTG := range tgco.TargetGroups {
if strings.EqualFold(*maybeTG.TargetGroupName, resourceName) {
logger.Debugf("found target group %s", resourceName)
tg = &maybeTG
// TODO: verify ports etc
}
}
if tg == nil {
logger.Infof("[unexpected] target group not found")
return res, nil
}
if tg.TargetGroupArn == nil {
logger.Infof("[unexpected] target group %+#v has no ARN", tg)
return res, nil
}
logger.Debugf("found target group %v", tg.TargetGroupArn)
// List targets
hi := elb.DescribeTargetHealthInput{TargetGroupArn: tg.TargetGroupArn}
ho, err := cl.DescribeTargetHealth(ctx, &hi)
if err != nil {
return res, fmt.Errorf("error describing target health: %w", err)
}
var targetExists bool
for _, health := range ho.TargetHealthDescriptions {
if health.Target.Id == &pc.backendIP {
logger.Debugf("Target found %#+v", health.Target)
targetExists = true
} else {
// TODO: Deregister the target
logger.Debugf("Found target that should be deregistered: %#+v", health.Target)
}
}
if !targetExists {
logger.Debugf("target for %v does not exist, creating...", pc.backendIP)
target := elb.RegisterTargetsInput{TargetGroupArn: tg.TargetGroupArn, Targets: []elbtypes.TargetDescription{
{Id: ptr.To(pc.backendIP), Port: ptr.To(wireguardPort)},
}}
_, err := cl.RegisterTargets(ctx, &target)
if err != nil {
return res, fmt.Errorf("error registering target: %w", err)
}
}
li := elb.DescribeListenersInput{LoadBalancerArn: &pc.lbARN}
lo, err := cl.DescribeListeners(ctx, &li)
if err != nil {
return res, fmt.Errorf("error listing listeners: %w", err)
}
var lis *elbtypes.Listener
port := pc.portFromEnv
if port != 0 {
for _, l := range lo.Listeners {
if l.Port == &pc.portFromEnv {
logger.Debugf("found existing listener on port %q", pc.portFromEnv)
lis = &l
}
}
} else {
// figure out a free port
searchFreePort := true
for searchFreePort {
suggestPort := int32(rand.Intn(65535)) // 1 - 65335
found := false
for _, l := range lo.Listeners {
if l.Port == &suggestPort {
found = true
break
}
}
if !found {
port = suggestPort
searchFreePort = false
}
}
if port == 0 {
return res, fmt.Errorf("unable to find a free port to expose on the listener: %w", err)
}
}
for _, maybeLB := range lo.Listeners {
if maybeLB.Port == ptr.To(port) {
logger.Debugf("Found listener for port %v", port)
lis = &maybeLB
break
}
}
if lis == nil {
logger.Infof("listener for port %v not found, creating", port)
lci := elb.CreateListenerInput{
LoadBalancerArn: &pc.lbARN,
Port: ptr.To(port),
Protocol: "UDP",
DefaultActions: []elbtypes.Action{
{TargetGroupArn: tg.TargetGroupArn, Type: elbtypes.ActionTypeEnumForward},
},
}
lco, err := cl.CreateListener(ctx, &lci)
if err != nil {
return res, fmt.Errorf("error creating listener: %w", err)
}
logger.Infof("created listener with arn: %v", lco.Listeners[0].ListenerArn)
}
dli := elb.DescribeLoadBalancersInput{LoadBalancerArns: []string{pc.lbARN}}
dlo, err := cl.DescribeLoadBalancers(ctx, &dli)
if len(dlo.LoadBalancers) != 1 {
return res, fmt.Errorf("expected exactly 1 NLB with ARN %s, got %d", pc.lbARN, len(dlo.LoadBalancers))
}
lb := dlo.LoadBalancers[0]
addrs := make([]string, 0)
for _, z := range lb.AvailabilityZones {
for _, a := range z.LoadBalancerAddresses {
addrs = append(addrs, *a.IpAddress) // IPv6?
}
}
if err := pr.ensurePretendPointUpToDate(ctx, pod, port, addrs); err != nil {
return res, fmt.Errorf("error ensuring TS_DEBUG_PRETENDPOINT value is up to date: %w", err)
}
return reconcile.Result{}, nil
}
func (pr *podReconciler) ensurePretendPointUpToDate(ctx context.Context, p *corev1.Pod, port int32, addrs []string) error {
var cont *corev1.Container
for _, c := range p.Spec.Containers {
if c.Name == "tailscale" {
cont = &c
break
}
}
if cont == nil {
return errors.New("pod does not have a 'tailscale' container")
}
// calculate value
addrPorts := make([]string, 0)
for _, a := range addrs {
addrPorts = append(addrPorts, strings.Join([]string{a, string(port)}, ","))
}
pretendpoint := strings.Join(addrPorts, ",")
for _, envVar := range cont.Env {
if envVar.Name == pretendpointEnvVar {
if envVar.Value != "" {
// TODO: log an error out if this is not up to date
pr.logger.Infof("env var set, do nothing")
return nil
} else if cmConfig := envVar.ValueFrom.ConfigMapKeyRef; cmConfig != nil {
cm := &corev1.ConfigMap{}
n := types.NamespacedName{Name: cmConfig.Name, Namespace: p.Namespace}
err := pr.Get(ctx, n, cm)
if err != nil && !apierrors.IsNotFound(err) {
return fmt.Errorf("error retrieving ConfigMap: %w", err)
}
if apierrors.IsNotFound(err) {
pr.logger.Infof("Creating ConfigMap")
cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: cmConfig.Name},
Data: map[string]string{cmConfig.Key: pretendpoint}}
return pr.Create(ctx, cm)
}
if cm.Data[cmConfig.Key] != pretendpoint {
pr.logger.Infof("Updating ConfigMap with wireguard endpoints value: %v", pretendpoint)
cm.Data[cmConfig.Key] = pretendpoint
return pr.Update(ctx, cm)
}
}
}
}
return nil
}
func (pr *podReconciler) parseClusterConfig(ctx context.Context, p *corev1.Pod) (*podConfig, error) {
var cont *corev1.Container
for _, c := range p.Spec.Containers {
if c.Name == "tailscale" {
cont = &c
break
}
}
if cont == nil {
return nil, errors.New("pod does not have a 'tailscale' container")
}
var pretendpoint string
for _, envVar := range cont.Env {
if envVar.Name == pretendpointEnvVar {
if envVar.Value != "" {
pretendpoint = envVar.Value
} else if cmConfig := envVar.ValueFrom.ConfigMapKeyRef; cmConfig != nil {
// Get the configmap
// Read the value if exists
cm := &corev1.ConfigMap{}
n := types.NamespacedName{Name: cmConfig.Name, Namespace: p.Namespace}
err := pr.Get(ctx, n, cm)
if apierrors.IsNotFound(err) {
pr.logger.Info("ConfigMap %s does not exist, it will be created")
} else if err != nil {
return nil, fmt.Errorf("error retrieving ConfigMap: %w", err)
} else if cm.Data[cmConfig.Key] != "" {
pretendpoint = cm.Data[cmConfig.Key]
pr.logger.Infof("read wireguard endoints for ConfigMap: %v", pretendpoint)
}
}
break
}
}
if pretendpoint == "" {
return nil, nil
}
addrs := strings.Split(pretendpoint, ",")
var maybePort string
var lbAddrs []string
for _, a := range addrs {
h, port, err := net.SplitHostPort(a)
if err != nil {
return nil, fmt.Errorf("error splitting host port: %v", err)
}
// if the ports are not the same, there is probably some issue, recreate the listener
if maybePort != "" && maybePort != port {
return nil, nil
}
maybePort = port
lbAddrs = append(lbAddrs, h)
}
port, err := strconv.ParseInt(maybePort, 10, 32)
if err != nil {
return nil, fmt.Errorf("error parsing port %q as int: %w", maybePort, err)
}
cm := &corev1.ConfigMap{}
if err := pr.Get(ctx, types.NamespacedName{Namespace: p.Namespace, Name: p.Annotations[eksNLBConfigAnnotation]}, cm); err != nil {
return nil, fmt.Errorf("ConfigMap %s not found", eksNLBConfigAnnotation)
}
vpcID := cm.Data["vpc_id"]
if vpcID == "" {
return nil, fmt.Errorf("vpc_id field not set for %s ConfigMap", eksNLBConfigAnnotation)
}
lbARN := cm.Data["lb_arn"]
if lbARN == "" {
return nil, fmt.Errorf("lb_arn not set for %s ConfigMap", eksNLBConfigAnnotation)
}
return &podConfig{
portFromEnv: int32(port),
lbAddrsFromEnv: lbAddrs,
vpcID: vpcID,
lbARN: lbARN,
podLabels: p.Labels,
backendIP: p.Status.PodIP,
}, nil
}

9
go.mod
View File

@ -9,9 +9,10 @@ require (
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa
github.com/andybalholm/brotli v1.1.0
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
github.com/aws/aws-sdk-go-v2 v1.24.1
github.com/aws/aws-sdk-go-v2 v1.30.3
github.com/aws/aws-sdk-go-v2/config v1.26.5
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.64
github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.33.3
github.com/aws/aws-sdk-go-v2/service/s3 v1.33.0
github.com/aws/aws-sdk-go-v2/service/ssm v1.44.7
github.com/bramvdbogaerde/go-scp v1.4.0
@ -162,8 +163,8 @@ require (
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.16.16 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.25 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect
@ -173,7 +174,7 @@ require (
github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect
github.com/aws/smithy-go v1.19.0 // indirect
github.com/aws/smithy-go v1.20.3 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bkielbasa/cyclop v1.2.0 // indirect
github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb // indirect

18
go.sum
View File

@ -113,8 +113,8 @@ github.com/ashanbrown/forbidigo v1.5.1/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1
github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s=
github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI=
github.com/aws/aws-sdk-go-v2 v1.18.0/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU=
github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4=
github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY=
github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno=
github.com/aws/aws-sdk-go-v2/config v1.18.22/go.mod h1:mN7Li1wxaPxSSy4Xkr6stFuinJGf3VZW3ZSNvO0q6sI=
@ -129,16 +129,18 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUt
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.64 h1:9QJQs36z61YB8nxGwRDfWXEDYbU6H7jdI6zFiAX1vag=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.64/go.mod h1:4Q7R9MFpXRdjO3YnAfUTdnuENs32WzBkASt6VxSYDYQ=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.33/go.mod h1:7i0PF1ME/2eUPFcjkVIwq+DOygHEoK92t5cDqNgYbIw=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.27/go.mod h1:UrHnn3QV/d0pBZ6QBAEQcqFLf8FAzLmoUfPVIueOvoM=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.34/go.mod h1:Etz2dj6UHYuw+Xw830KfzCfWGMzqvUTCjUj5b76GVDc=
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM=
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.25 h1:AzwRi5OKKwo4QNqPf7TjeO+tK8AyOK3GVSwmRPo7/Cs=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.25/go.mod h1:SUbB4wcbSEyCvqBxv/O/IBf93RbEze7U7OnoTlpPB+g=
github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.33.3 h1:yiBmRRlVwehTN2TF0wbUkM7BluYFOLZU/U2SeQHE+q8=
github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.33.3/go.mod h1:L5bVuO4PeXuDuMYZfL3IW69E6mz6PDCYpp6IKDlcLMA=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ=
@ -163,8 +165,8 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.18.10/go.mod h1:BgQOMsg8av8jset59jel
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0=
github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U=
github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM=
github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE=
github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=