fix: remove operator (#3195)

This commit is contained in:
Livio Amstutz
2022-02-11 15:00:14 +01:00
committed by GitHub
parent c086c4fd55
commit 2d208dd8ba
224 changed files with 21 additions and 23969 deletions

View File

@@ -12,8 +12,6 @@ on:
env:
REGISTRY: ghcr.io
OPERATOR_IMAGE_NAME: ${{ github.repository }}-operator
CRDB_IMAGE_NAME: ${{ github.repository }}-crbackup
ARTIFACTS_FOLDER: './artifacts'
jobs:
@@ -158,140 +156,10 @@ jobs:
push: true
cache-from: type=gha,scope=${{ github.workflow }}
cache-to: type=gha,scope=${{ github.workflow }},mode=max
zitadel-operator:
name: Build ZITADEL Operator ${{ matrix.goos }}-${{ matrix.goarch }}
needs: refs
runs-on: ubuntu-18.04
strategy:
matrix:
goos: [ 'linux', 'darwin', 'windows' ]
goarch: [ 'amd64' ]
steps:
- name: Source checkout
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- uses: docker/build-push-action@v2
with:
context: .
file: ./build/operator/Dockerfile
platforms: linux/amd64
cache-from: type=gha,scope=${{ matrix.goos }}-${{ matrix.goarch }}
cache-to: type=gha,scope=${{ matrix.goos }}-${{ matrix.goarch }},mode=max
outputs: type=local,dest=/tmp/operator
build-args: |
OS=${{ matrix.goos }}
ARCH=${{ matrix.goarch }}
VERSION=${{ needs.refs.outputs.version }}
GITHUBOAUTHCLIENTID=${{ secrets.GITHUBOAUTHCLIENTID }}
GITHUBOAUTHCLIENTSECRET=${{ secrets.GITHUBOAUTHCLIENTSECRET }}
- shell: bash
run: |
mv /tmp/operator/zitadelctl /tmp/operator/zitadelctl-${{ matrix.goos }}-${{ matrix.goarch }}
- uses: actions/upload-artifact@v2
with:
name: zitadelctl-${{ matrix.goos }}-${{ matrix.goarch }}
path: /tmp/operator/zitadelctl-${{ matrix.goos }}-${{ matrix.goarch }}
zitadel-operator-codecov:
name: Upload ZITADEL Operator Codecov ${{ matrix.goos }}-${{ matrix.goarch }}
needs: [refs, zitadel-operator]
runs-on: ubuntu-18.04
strategy:
matrix:
goos: [ 'linux' ]
goarch: [ 'amd64' ]
steps:
- name: Source checkout
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- uses: docker/build-push-action@v2
with:
context: .
file: ./build/operator/Dockerfile
platforms: linux/amd64
cache-from: type=gha,scope=${{ matrix.goos }}-${{ matrix.goarch }}
target: go-codecov
outputs: type=local,dest=/tmp/operator
- uses: codecov/codecov-action@v1
with:
files: /tmp/operator/profile.cov
name: codecov-go
zitadel-operator-image:
name: Package ZITADEL Operator Image ${{ matrix.goos }}-${{ matrix.goarch }}
needs: [refs, zitadel-operator]
runs-on: ubuntu-18.04
if: ${{ github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/main' }}
strategy:
matrix:
goos: [ 'linux' ]
goarch: [ 'amd64' ]
steps:
- name: Source checkout
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
registry: ${{ env.REGISTRY }}
- uses: docker/build-push-action@v2
with:
context: .
file: ./build/operator/Dockerfile
platforms: linux/amd64
tags: ${{ env.REGISTRY }}/${{ env.OPERATOR_IMAGE_NAME }}:${{ needs.refs.outputs.sha_short }},${{ env.REGISTRY }}/${{ env.OPERATOR_IMAGE_NAME }}:${{ needs.refs.outputs.short_ref }}
push: true
cache-from: type=gha,scope=${{ matrix.goos }}-${{ matrix.goarch }}
build-args: |
OS=${{ matrix.goos }}
ARCH=${{ matrix.goarch }}
VERSION=${{ needs.refs.outputs.version }}
crdb-image:
name: Package ZITADEL CockroachDB Image
needs: refs
runs-on: ubuntu-18.04
if: ${{ github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/main' }}
steps:
- name: Source checkout
uses: actions/checkout@v2
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
registry: ${{ env.REGISTRY }}
- uses: docker/build-push-action@v2
name: buildandpush
with:
context: .
file: ./build/cr-backup/Dockerfile
platforms: linux/amd64
tags: ${{ env.REGISTRY }}/${{ env.CRDB_IMAGE_NAME }}:${{ needs.refs.outputs.sha_short }},${{ env.REGISTRY }}/${{ env.CRDB_IMAGE_NAME }}:${{ needs.refs.outputs.short_ref }}
push: true
cache-from: type=gha,scope=${{ github.workflow }}
cache-to: type=gha,scope=${{ github.workflow }}
release:
name: Semantic Release Images and Artifacts
runs-on: ubuntu-18.04
needs: [ refs, zitadel-image, zitadel-operator-image, crdb-image ]
needs: [ refs, zitadel-image ]
if: ${{ github.event_name == 'workflow_dispatch' || github.ref == 'refs/heads/main' }}
env:
DOCKER_USERNAME: ${{ github.repository_owner }}
@@ -307,10 +175,6 @@ jobs:
registry: ${{ env.REGISTRY }}
- name: Docker Pull ZITADEL Image
run: docker pull $REGISTRY/$GITHUB_REPOSITORY:${{ needs.refs.outputs.sha_short }}
- name: Docker Pull ZITADEL Operator Image
run: docker pull $REGISTRY/$OPERATOR_IMAGE_NAME:${{ needs.refs.outputs.sha_short }}
- name: Docker Pull CockroachDB Image
run: docker pull $REGISTRY/$CRDB_IMAGE_NAME:${{ needs.refs.outputs.sha_short }}
- name: Download zitadelctl Artifacts
uses: actions/download-artifact@v2
with:
@@ -338,39 +202,27 @@ jobs:
- name: Docker Tag Version
run: |
docker tag $REGISTRY/$GITHUB_REPOSITORY:${{ needs.refs.outputs.sha_short }} $REGISTRY/$GITHUB_REPOSITORY:${{ steps.semantic.outputs.new_release_version }}
docker tag $REGISTRY/$OPERATOR_IMAGE_NAME:${{ needs.refs.outputs.sha_short }} $REGISTRY/$OPERATOR_IMAGE_NAME:${{ steps.semantic.outputs.new_release_version }}
docker tag $REGISTRY/$CRDB_IMAGE_NAME:${{ needs.refs.outputs.sha_short }} $REGISTRY/$CRDB_IMAGE_NAME:${{ steps.semantic.outputs.new_release_version }}
if: steps.semantic.outputs.new_release_published == 'true'
- name: Docker Tag Latest
run: |
docker tag $REGISTRY/$GITHUB_REPOSITORY:${{ needs.refs.outputs.sha_short }} $REGISTRY/$GITHUB_REPOSITORY:latest
docker tag $REGISTRY/$OPERATOR_IMAGE_NAME:${{ needs.refs.outputs.sha_short }} $REGISTRY/$OPERATOR_IMAGE_NAME:latest
docker tag $REGISTRY/$CRDB_IMAGE_NAME:${{ needs.refs.outputs.sha_short }} $REGISTRY/$CRDB_IMAGE_NAME:latest
if: steps.semantic.outputs.new_release_published == 'true'
- name: Docker Push Version
run: |
docker push $REGISTRY/$GITHUB_REPOSITORY:${{ steps.semantic.outputs.new_release_version }}
docker push $REGISTRY/$OPERATOR_IMAGE_NAME:${{ steps.semantic.outputs.new_release_version }}
docker push $REGISTRY/$CRDB_IMAGE_NAME:${{ steps.semantic.outputs.new_release_version }}
if: steps.semantic.outputs.new_release_published == 'true'
- name: Docker Push Latest
run: |
docker push $REGISTRY/$GITHUB_REPOSITORY:latest
docker push $REGISTRY/$OPERATOR_IMAGE_NAME:latest
docker push $REGISTRY/$CRDB_IMAGE_NAME:latest
if: steps.semantic.outputs.new_release_published == 'true'
- name: Docker Tag Version
run: |
docker tag $REGISTRY/$GITHUB_REPOSITORY:${{ needs.refs.outputs.sha_short }} $REGISTRY/$GITHUB_REPOSITORY:${{ needs.refs.outputs.short_ref }}
docker tag $REGISTRY/$OPERATOR_IMAGE_NAME:${{ needs.refs.outputs.sha_short }} $REGISTRY/$OPERATOR_IMAGE_NAME:${{ needs.refs.outputs.short_ref }}
docker tag $REGISTRY/$CRDB_IMAGE_NAME:${{ needs.refs.outputs.sha_short }} $REGISTRY/$CRDB_IMAGE_NAME:${{ needs.refs.outputs.short_ref }}
if: steps.semantic.outputs.new_release_published != 'true' && needs.refs.outputs.short_ref != 'main' && needs.refs.outputs.short_ref != ''
- name: Docker Push Version
run: |
docker push $REGISTRY/$GITHUB_REPOSITORY:${{ needs.refs.outputs.short_ref }}
docker push $REGISTRY/$OPERATOR_IMAGE_NAME:${{ needs.refs.outputs.short_ref }}
docker push $REGISTRY/$CRDB_IMAGE_NAME:${{ needs.refs.outputs.short_ref }}
if: steps.semantic.outputs.new_release_published != 'true' && needs.refs.outputs.short_ref != 'main' && needs.refs.outputs.short_ref != ''
- name: Development Release
id: create_release
@@ -398,4 +250,4 @@ jobs:
SENTRY_PROJECT: ${{ secrets.SENTRY_PROJECT }}
with:
version: zitadel-${{ needs.refs.outputs.version }}
projects: "console database-operator zitadel zitadel-operator zitadelctl"
projects: "console zitadel zitadelctl"

View File

@@ -1,22 +0,0 @@
FROM cockroachdb/cockroach:v20.2.3
RUN microdnf install curl wget tar gzip
RUN wget https://storage.googleapis.com/oauth2l/latest/linux_amd64.tgz
RUN tar zxvf linux_amd64.tgz
RUN mv linux_amd64/oauth2l /usr/local/bin/oauth2l && rm -rf linux_amd64
COPY build/cr-backup/scripts/backup-cockroach.sh /scripts/backup.sh
RUN chmod +x /scripts/backup.sh
COPY build/cr-backup/scripts/restore-cockroach.sh /scripts/restore.sh
RUN chmod +x /scripts/restore.sh
COPY build/cr-backup/scripts/clean-db-cockroach.sh /scripts/clean-db.sh
RUN chmod +x /scripts/clean-db.sh
COPY build/cr-backup/scripts/clean-migration-cockroach.sh /scripts/clean-migration.sh
RUN chmod +x /scripts/clean-migration.sh
COPY build/cr-backup/scripts/clean-user-cockroach.sh /scripts/clean-user.sh
RUN chmod +x /scripts/clean-user.sh
ENTRYPOINT [ "/cockroach" ]

View File

@@ -1,17 +0,0 @@
env=$1
bucket=$2
db=$3
folder=$4
safile=$5
certs=$6
name=$7
filenamelocal=zitadel-${db}.sql
filenamebucket=zitadel-${db}-${name}.sql
/cockroach/cockroach.sh dump --dump-mode=data --certs-dir=${certs} --host=cockroachdb-public:26257 ${db} > ${folder}/${filenamelocal}
curl -X POST \
-H "$(oauth2l header --json ${safile} cloud-platform)" \
-H "Content-Type: application/json" \
--data-binary @${folder}/${filenamelocal} \
"https://storage.googleapis.com/upload/storage/v1/b/${bucket}/o?uploadType=media&name=${env}/${name}/${filenamebucket}"

View File

@@ -1,4 +0,0 @@
certs=$1
db=$2
/cockroach/cockroach.sh sql --certs-dir=${certs} --host=cockroachdb-public:26257 -e "DROP DATABASE IF EXISTS ${db} CASCADE;"

View File

@@ -1,3 +0,0 @@
certs=$1
/cockroach/cockroach.sh sql --certs-dir=${certs} --host=cockroachdb-public:26257 -e "TRUNCATE defaultdb.flyway_schema_history;"

View File

@@ -1,4 +0,0 @@
certs=$1
db=$2
/cockroach/cockroach.sh sql --certs-dir=${certs} --host=cockroachdb-public:26257 -e "DROP USER IF EXISTS ${db};"

View File

@@ -1,33 +0,0 @@
bucket=$1
env=$2
name=$3
db=$4
safile=$5
certs=$6
urlencode() {
# urlencode <string>
old_lc_collate=$LC_COLLATE
LC_COLLATE=C
local length="${#1}"
for (( i = 0; i < length; i++ )); do
local c="${1:i:1}"
case $c in
[a-zA-Z0-9.~_-]) printf "$c" ;;
*) printf '%%%02X' "'$c" ;;
esac
done
LC_COLLATE=$old_lc_collate
}
filenamelocal=zitadel-${db}.sql
filenamebucket=zitadel-${db}-${name}.sql
curl -X GET \
-H "$(oauth2l header --json ${safile} cloud-platform)" \
-o "${filenamelocal}" \
"https://storage.googleapis.com/storage/v1/b/${bucket}/o/$(urlencode ${env}/${name}/${filenamebucket})?alt=media"
/cockroach/cockroach.sh sql --certs-dir=${certs} --host=cockroachdb-public:26257 --database=${db} < ${filenamelocal}

View File

@@ -1 +0,0 @@
**/statik/statik.go

View File

@@ -1,65 +0,0 @@
#######################
## By default we build the prod enviroment
ARG ENV=prod
#######################
## Go base build
## Speed up this step by mounting your local go mod pkg directory
#######################
FROM golang:1.16 as go-base
WORKDIR src/github.com/caos/zitadel/
COPY go.mod go.sum ./
RUN go mod download
## Go test
FROM go-base as go-test
COPY . .
#Migrations for cockroach-secure
RUN go install github.com/rakyll/statik
RUN ./build/operator/prebuild.sh ./migrations
RUN go test -race -v -coverprofile=profile.cov ./operator/...
## Go test
FROM scratch as go-codecov
COPY --from=go-test /go/src/github.com/caos/zitadel/profile.cov profile.cov
## Go prod build
FROM go-test as prod-go-build
ARG ARCH=amd64
ARG OS=linux
ARG VERSION=none
ARG GITHUBOAUTHCLIENTID=none
ARG GITHUBOAUTHCLIENTSECRET=none
RUN GOOS=${OS} GOARCH=${ARCH} ./build/operator/build.sh ${VERSION}
## Go dev build
FROM go-base as dev-go-build
RUN go get github.com/go-delve/delve/cmd/dlv
#######################
## Final Production Image
#######################
FROM alpine:latest as artifact
RUN adduser -D zitadel
ARG ARCH=amd64
ARG OS=linux
RUN apk add -U --no-cache ca-certificates
COPY --from=prod-go-build /go/src/github.com/caos/zitadel/zitadelctl /app/zitadelctl
RUN chmod a+x /app/zitadelctl
## Scratch Image
FROM scratch as final
COPY --from=artifact /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=artifact /etc/passwd /etc/passwd
COPY --from=artifact /app /
USER zitadel
HEALTHCHECK NONE
ENTRYPOINT ["/zitadelctl"]

View File

@@ -1,10 +0,0 @@
#!/bin/bash
set -e
CGO_ENABLED=0 go build \
-a \
-installsuffix cgo \
-ldflags "$(./build/operator/ldflags.sh "${1}")" \
-o zitadelctl \
./cmd/zitadelctl/main.go

View File

@@ -1,10 +0,0 @@
#!/bin/bash
set -e
VERSION=${1}
if [ "${VERSION}" == "" ]; then
VERSION="$(git rev-parse --abbrev-ref HEAD | sed -e 's/heads\///')"
fi
echo -n "-extldflags -static -X main.Version=${VERSION} -X main.githubClientID=${GITHUBOAUTHCLIENTID} -X main.githubClientSecret=${GITHUBOAUTHCLIENTSECRET}"

View File

@@ -1,5 +0,0 @@
#!/bin/bash
set -e
statik -src=$1

View File

@@ -148,13 +148,13 @@ FROM go-test as prod-go-build
ARG BUILDARCH
ARG VERSION=""
RUN CGO_ENABLED=0 GOOS=linux GOARCH=${BUILDARCH} go build -a -installsuffix cgo -ldflags "-X main.version=${VERSION:-'dev'} -extldflags \"-static\"" -o zitadel-linux-${BUILDARCH} cmd/zitadel/main.go
RUN CGO_ENABLED=0 GOOS=linux GOARCH=${BUILDARCH} go build -a -installsuffix cgo -ldflags "-X main.version=${VERSION:-'dev'} -extldflags \"-static\"" -o zitadel-linux-${BUILDARCH} main.go
#######################
## Go dev build
#######################
FROM go-base as dev-go-build
ENTRYPOINT [ "go", "run", "cmd/zitadel/main.go" ]
ENTRYPOINT [ "go", "run", "main.go" ]
#######################
## Only Copy Assets

72
go.mod
View File

@@ -3,7 +3,6 @@ module github.com/caos/zitadel
go 1.17
require (
cloud.google.com/go/storage v1.18.2
github.com/BurntSushi/toml v0.4.1
github.com/DATA-DOG/go-sqlmock v1.5.0
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.0.0
@@ -12,14 +11,9 @@ require (
github.com/VictoriaMetrics/fastcache v1.8.0
github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b
github.com/allegro/bigcache v1.2.1
github.com/aws/aws-sdk-go-v2 v1.11.2
github.com/aws/aws-sdk-go-v2/config v1.11.0
github.com/aws/aws-sdk-go-v2/credentials v1.6.4
github.com/aws/aws-sdk-go-v2/service/s3 v1.21.0
github.com/boombuler/barcode v1.0.1
github.com/caos/logging v0.2.1
github.com/caos/logging v0.3.0
github.com/caos/oidc v1.0.1
github.com/caos/orbos v1.5.14-0.20211102124704-34db02bceed2
github.com/cockroachdb/cockroach-go/v2 v2.2.4
github.com/dop251/goja v0.0.0-20211129110639-4739a1d10a51
github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d
@@ -48,6 +42,7 @@ require (
github.com/pquerna/otp v1.3.0
github.com/rakyll/statik v0.1.7
github.com/rs/cors v1.8.0
github.com/sirupsen/logrus v1.8.1
github.com/sony/sonyflake v1.0.0
github.com/spf13/cobra v1.3.0
github.com/spf13/viper v1.10.1
@@ -69,41 +64,20 @@ require (
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/text v0.3.7
golang.org/x/tools v0.1.8
google.golang.org/api v0.63.0
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa
google.golang.org/grpc v1.43.0
google.golang.org/protobuf v1.27.1
gopkg.in/square/go-jose.v2 v2.6.0
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
gotest.tools v2.2.0+incompatible
k8s.io/api v0.22.4
k8s.io/apiextensions-apiserver v0.22.2
k8s.io/apimachinery v0.22.4
k8s.io/client-go v0.22.4
sigs.k8s.io/controller-runtime v0.10.3
sigs.k8s.io/yaml v1.3.0
)
require (
cloud.google.com/go v0.99.0 // indirect
cloud.google.com/go/trace v1.0.0 // indirect
github.com/AlecAivazis/survey/v2 v2.3.2 // indirect
github.com/AppsFlyer/go-sundheit v0.2.0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver v1.5.0 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.2 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.2 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.2 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.9.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.6.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.11.1 // indirect
github.com/aws/smithy-go v1.9.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.1.1 // indirect
github.com/cenkalti/backoff/v4 v4.1.2 // indirect
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cloudflare/cfssl v0.0.0-20190726000631-633726f6bcb7 // indirect
@@ -112,69 +86,49 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect
github.com/dustin/go-humanize v1.0.0 // indirect
github.com/emirpasic/gods v1.12.0 // indirect
github.com/envoyproxy/go-control-plane v0.10.1 // indirect
github.com/evanphx/json-patch v4.11.0+incompatible // indirect
github.com/felixge/httpsnoop v1.0.2 // indirect
github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/fxamacker/cbor/v2 v2.2.0 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-git/gcfg v1.5.0 // indirect
github.com/go-git/go-billy/v5 v5.0.0 // indirect
github.com/go-git/go-git/v5 v5.2.0 // indirect
github.com/go-logr/logr v0.4.0 // indirect
github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect
github.com/gobuffalo/flect v0.2.3 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/gofrs/uuid v4.0.0+incompatible // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.1.0 // indirect
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/certificate-transparency-go v1.0.21 // indirect
github.com/google/go-cmp v0.5.6 // indirect
github.com/google/go-github/v31 v31.0.0 // indirect
github.com/google/go-querystring v1.0.0 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/gax-go/v2 v2.1.1 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
github.com/gorilla/handlers v1.5.1 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/huandu/xstrings v1.3.2 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/kevinburke/go-types v0.0.0-20210723172823-2deba1f80ba7 // indirect
github.com/kevinburke/rest v0.0.0-20210506044642-5611499aa33c // indirect
github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd // indirect
github.com/klauspost/compress v1.13.5 // indirect
github.com/klauspost/compress v1.14.2 // indirect
github.com/klauspost/cpuid v1.3.1 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/magiconair/properties v1.8.5 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
github.com/minio/md5-simd v1.1.0 // indirect
github.com/minio/sha256-simd v0.1.1 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.4.3 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/muesli/clusters v0.0.0-20200529215643-2700303c1762 // indirect
github.com/muesli/kmeans v0.2.1 // indirect
github.com/pelletier/go-toml v1.9.4 // indirect
github.com/pires/go-proxyproto v0.6.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.11.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
@@ -182,8 +136,6 @@ require (
github.com/prometheus/procfs v0.6.0 // indirect
github.com/rs/xid v1.2.1 // indirect
github.com/satori/go.uuid v1.2.0 // indirect
github.com/sergi/go-diff v1.1.0 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/spf13/afero v1.8.1 // indirect
github.com/spf13/cast v1.4.1 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
@@ -192,7 +144,6 @@ require (
github.com/ttacon/builder v0.0.0-20170518171403-c099f663e1c2 // indirect
github.com/wcharczuk/go-chart/v2 v2.1.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xanzy/ssh-agent v0.2.1 // indirect
github.com/xrash/smetrics v0.0.0-20200730060457-89a2a8a1fb0b // indirect
go.opencensus.io v0.23.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.2.0 // indirect
@@ -200,23 +151,14 @@ require (
go.opentelemetry.io/proto/otlp v0.10.0 // indirect
golang.org/x/image v0.0.0-20200927104501-e162460cd6b5 // indirect
golang.org/x/mod v0.5.1 // indirect
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect
golang.org/x/net v0.0.0-20220121210141-e204ce36a2ba // indirect
golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect
golang.org/x/term v0.0.0-20210503060354-a79de5458b56 // indirect
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/api v0.63.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.66.4 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/component-base v0.22.2 // indirect
k8s.io/klog/v2 v2.9.0 // indirect
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c // indirect
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a // indirect
sigs.k8s.io/controller-tools v0.7.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
)
replace github.com/gin-gonic/gin => github.com/gin-gonic/gin v1.7.4

429
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -1,15 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

View File

@@ -1,129 +0,0 @@
package operator
import (
"fmt"
"gopkg.in/yaml.v3"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/git"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
)
type AdaptFunc func(
monitor mntr.Monitor,
desired *tree.Tree,
current *tree.Tree,
) (
QueryFunc,
DestroyFunc,
ConfigureFunc,
map[string]*secret.Secret,
map[string]*secret.Existing,
bool,
error,
)
type EnsureFunc func(k8sClient kubernetes.ClientInt) error
type DestroyFunc func(k8sClient kubernetes.ClientInt) error
type ConfigureFunc func(k8sClient kubernetes.ClientInt, queried map[string]interface{}, gitops bool) error
type QueryFunc func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (EnsureFunc, error)
func Parse(gitClient *git.Client, file string) (*tree.Tree, error) {
if err := gitClient.Clone(); err != nil {
return nil, err
}
tree := &tree.Tree{}
if err := yaml.Unmarshal(gitClient.Read(file), tree); err != nil {
return nil, err
}
return tree, nil
}
func ResourceDestroyToZitadelDestroy(destroyFunc resources.DestroyFunc) DestroyFunc {
return func(k8sClient kubernetes.ClientInt) error {
return destroyFunc(k8sClient)
}
}
func ResourceQueryToZitadelQuery(queryFunc resources.QueryFunc) QueryFunc {
return func(k8sClient kubernetes.ClientInt, _ map[string]interface{}) (EnsureFunc, error) {
ensure, err := queryFunc(k8sClient)
ensureInternal := ResourceEnsureToZitadelEnsure(ensure)
return func(k8sClient kubernetes.ClientInt) error {
return ensureInternal(k8sClient)
}, err
}
}
func ResourceEnsureToZitadelEnsure(ensureFunc resources.EnsureFunc) EnsureFunc {
return func(k8sClient kubernetes.ClientInt) error {
return ensureFunc(k8sClient)
}
}
func EnsureFuncToQueryFunc(ensure EnsureFunc) QueryFunc {
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (ensureFunc EnsureFunc, err error) {
return ensure, err
}
}
func QueriersToEnsureFunc(monitor mntr.Monitor, infoLogs bool, queriers []QueryFunc, k8sClient kubernetes.ClientInt, queried map[string]interface{}) (EnsureFunc, error) {
if infoLogs {
monitor.Info("querying...")
} else {
monitor.Debug("querying...")
}
ensurers := make([]EnsureFunc, 0)
for _, querier := range queriers {
ensurer, err := querier(k8sClient, queried)
if err != nil {
return nil, fmt.Errorf("error while querying: %w", err)
}
ensurers = append(ensurers, ensurer)
}
if infoLogs {
monitor.Info("queried")
} else {
monitor.Debug("queried")
}
return func(k8sClient kubernetes.ClientInt) error {
if infoLogs {
monitor.Info("ensuring...")
} else {
monitor.Debug("ensuring...")
}
for _, ensurer := range ensurers {
if err := ensurer(k8sClient); err != nil {
return fmt.Errorf("error while ensuring: %w", err)
}
}
if infoLogs {
monitor.Info("ensured")
} else {
monitor.Debug("ensured")
}
return nil
}, nil
}
func DestroyersToDestroyFunc(monitor mntr.Monitor, destroyers []DestroyFunc) DestroyFunc {
return func(k8sClient kubernetes.ClientInt) error {
monitor.Info("destroying...")
for _, destroyer := range destroyers {
if err := destroyer(k8sClient); err != nil {
return fmt.Errorf("error while destroying: %w", err)
}
}
monitor.Info("destroyed")
return nil
}
}

View File

@@ -1,46 +0,0 @@
package core
import (
"errors"
"github.com/caos/orbos/pkg/tree"
"gopkg.in/yaml.v3"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
func UnmarshalUnstructuredSpec(unstruct *unstructured.Unstructured) (*tree.Tree, error) {
spec, found := unstruct.Object["spec"]
if !found {
return nil, errors.New("no spec in crd")
}
specMap, ok := spec.(map[string]interface{})
if !ok {
return nil, errors.New("no spec in crd")
}
data, err := yaml.Marshal(specMap)
if err != nil {
return nil, err
}
desired := &tree.Tree{}
if err := yaml.Unmarshal(data, &desired); err != nil {
return nil, err
}
return desired, nil
}
func MarshalToUnstructuredSpec(t *tree.Tree) (*unstructured.Unstructured, error) {
data, err := yaml.Marshal(t)
if err != nil {
return nil, err
}
unstruct := &unstructured.Unstructured{
Object: map[string]interface{}{
"spec": make(map[string]interface{}),
},
}
return unstruct, yaml.Unmarshal(data, unstruct.Object["spec"])
}

View File

@@ -1,44 +0,0 @@
package database
import (
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/api/core"
databasev1 "github.com/caos/zitadel/operator/api/database/v1"
macherrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
)
const (
Namespace = "caos-system"
kind = "Database"
apiVersion = "caos.ch/v1"
Name = "database"
)
func ReadCrd(k8sClient kubernetes.ClientInt) (*tree.Tree, error) {
unstruct, err := k8sClient.GetNamespacedCRDResource(databasev1.GroupVersion.Group, databasev1.GroupVersion.Version, kind, Namespace, Name)
if err != nil {
if macherrs.IsNotFound(err) || meta.IsNoMatchError(err) {
return nil, nil
}
return nil, err
}
return core.UnmarshalUnstructuredSpec(unstruct)
}
func WriteCrd(k8sClient kubernetes.ClientInt, t *tree.Tree) error {
unstruct, err := core.MarshalToUnstructuredSpec(t)
if err != nil {
return err
}
unstruct.SetName(Name)
unstruct.SetNamespace(Namespace)
unstruct.SetKind(kind)
unstruct.SetAPIVersion(apiVersion)
return k8sClient.ApplyNamespacedCRDResource(databasev1.GroupVersion.Group, databasev1.GroupVersion.Version, kind, Namespace, Name, unstruct)
}

View File

@@ -1,57 +0,0 @@
// +kubebuilder:object:generate=true
// +groupName=caos.ch
package v1
import (
"github.com/caos/orbos/pkg/tree"
orbdb "github.com/caos/zitadel/operator/database/kinds/orb"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "caos.ch", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:crd=Database
type Database struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec Spec `json:"spec,omitempty"`
Status Status `json:"status,omitempty"`
}
type Status struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
}
type Spec struct {
Common *tree.Common `json:",inline" yaml:",inline"`
Spec *orbdb.Spec `json:"spec" yaml:"spec"`
Database *Empty `json:"database" yaml:"database"`
}
type Empty struct{}
// +kubebuilder:object:root=true
type DatabaseList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Database `json:"items"`
}
func init() {
SchemeBuilder.Register(&Database{}, &DatabaseList{})
}

View File

@@ -1,146 +0,0 @@
// +build !ignore_autogenerated
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1
import (
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/database/kinds/orb"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Database) DeepCopyInto(out *Database) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Database.
func (in *Database) DeepCopy() *Database {
if in == nil {
return nil
}
out := new(Database)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Database) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DatabaseList) DeepCopyInto(out *DatabaseList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Database, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseList.
func (in *DatabaseList) DeepCopy() *DatabaseList {
if in == nil {
return nil
}
out := new(DatabaseList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DatabaseList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Empty) DeepCopyInto(out *Empty) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Empty.
func (in *Empty) DeepCopy() *Empty {
if in == nil {
return nil
}
out := new(Empty)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Spec) DeepCopyInto(out *Spec) {
*out = *in
if in.Common != nil {
in, out := &in.Common, &out.Common
*out = new(tree.Common)
**out = **in
}
if in.Spec != nil {
in, out := &in.Spec, &out.Spec
*out = new(orb.Spec)
(*in).DeepCopyInto(*out)
}
if in.Database != nil {
in, out := &in.Database, &out.Database
*out = new(Empty)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Spec.
func (in *Spec) DeepCopy() *Spec {
if in == nil {
return nil
}
out := new(Spec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Status) DeepCopyInto(out *Status) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status.
func (in *Status) DeepCopy() *Status {
if in == nil {
return nil
}
out := new(Status)
in.DeepCopyInto(out)
return out
}

View File

@@ -1,44 +0,0 @@
package zitadel
import (
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/api/core"
zitadelv1 "github.com/caos/zitadel/operator/api/zitadel/v1"
macherrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
)
const (
Namespace = "caos-system"
kind = "Zitadel"
apiVersion = "caos.ch/v1"
Name = "zitadel"
)
func ReadCrd(k8sClient kubernetes.ClientInt) (*tree.Tree, error) {
unstruct, err := k8sClient.GetNamespacedCRDResource(zitadelv1.GroupVersion.Group, zitadelv1.GroupVersion.Version, kind, Namespace, Name)
if err != nil {
if macherrs.IsNotFound(err) || meta.IsNoMatchError(err) {
return nil, nil
}
return nil, err
}
return core.UnmarshalUnstructuredSpec(unstruct)
}
func WriteCrd(k8sClient kubernetes.ClientInt, t *tree.Tree) error {
unstruct, err := core.MarshalToUnstructuredSpec(t)
if err != nil {
return err
}
unstruct.SetName(Name)
unstruct.SetNamespace(Namespace)
unstruct.SetKind(kind)
unstruct.SetAPIVersion(apiVersion)
return k8sClient.ApplyNamespacedCRDResource(zitadelv1.GroupVersion.Group, zitadelv1.GroupVersion.Version, kind, Namespace, Name, unstruct)
}

View File

@@ -1,57 +0,0 @@
// +kubebuilder:object:generate=true
// +groupName=caos.ch
package v1
import (
"github.com/caos/orbos/pkg/tree"
orbz "github.com/caos/zitadel/operator/zitadel/kinds/orb"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "caos.ch", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:crd=Zitadel
type Zitadel struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec Spec `json:"spec,omitempty"`
Status Status `json:"status,omitempty"`
}
type Status struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
}
type Spec struct {
Common *tree.Common `json:",inline" yaml:",inline"`
Spec *orbz.Spec `json:"spec" yaml:"spec"`
IAM *Empty `json:"iam" yaml:"iam"`
}
type Empty struct{}
// +kubebuilder:object:root=true
type ZitadelList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Zitadel `json:"items"`
}
func init() {
SchemeBuilder.Register(&Zitadel{}, &ZitadelList{})
}

View File

@@ -1,146 +0,0 @@
// +build !ignore_autogenerated
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1
import (
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/zitadel/kinds/orb"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Empty) DeepCopyInto(out *Empty) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Empty.
func (in *Empty) DeepCopy() *Empty {
if in == nil {
return nil
}
out := new(Empty)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Spec) DeepCopyInto(out *Spec) {
*out = *in
if in.Common != nil {
in, out := &in.Common, &out.Common
*out = new(tree.Common)
**out = **in
}
if in.Spec != nil {
in, out := &in.Spec, &out.Spec
*out = new(orb.Spec)
(*in).DeepCopyInto(*out)
}
if in.IAM != nil {
in, out := &in.IAM, &out.IAM
*out = new(Empty)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Spec.
func (in *Spec) DeepCopy() *Spec {
if in == nil {
return nil
}
out := new(Spec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Status) DeepCopyInto(out *Status) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status.
func (in *Status) DeepCopy() *Status {
if in == nil {
return nil
}
out := new(Status)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Zitadel) DeepCopyInto(out *Zitadel) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Zitadel.
func (in *Zitadel) DeepCopy() *Zitadel {
if in == nil {
return nil
}
out := new(Zitadel)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Zitadel) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ZitadelList) DeepCopyInto(out *ZitadelList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Zitadel, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZitadelList.
func (in *ZitadelList) DeepCopy() *ZitadelList {
if in == nil {
return nil
}
out := new(ZitadelList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ZitadelList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}

View File

@@ -1,46 +0,0 @@
package common
type image string
func (i image) String() string { return string(i) }
type dockerhubImage image
type zitadelImage image
const (
CockroachImage dockerhubImage = "cockroachdb/cockroach:v21.2.5"
PostgresImage dockerhubImage = "postgres:9.6.17"
FlywayImage dockerhubImage = "flyway/flyway:8.0.2"
AlpineImage dockerhubImage = "alpine:3.11"
ZITADELImage zitadelImage = "caos/zitadel"
BackupImage zitadelImage = "caos/zitadel-crbackup"
ZITADELOperatorImage zitadelImage = "caos/zitadel-operator"
)
func (z zitadelImage) Reference(customImageRegistry, version string) string {
reg := "ghcr.io"
if customImageRegistry != "" {
reg = customImageRegistry
}
return concat(image(z), reg, version)
}
func (d dockerhubImage) Reference(customImageRegistry string) string {
return concat(image(d), customImageRegistry, "")
}
func concat(img image, customImageRegistry, version string) string {
str := img.String()
if customImageRegistry != "" {
str = customImageRegistry + "/" + str
}
if version != "" {
str = str + ":" + version
}
return str
}

View File

@@ -1,106 +0,0 @@
package common
import (
"errors"
"fmt"
"strings"
"testing"
)
func TestDockerHubReference(t *testing.T) {
imgs := []dockerhubImage{CockroachImage, PostgresImage, FlywayImage, AlpineImage}
type args struct {
customImageRegistry string
}
tests := []struct {
name string
args args
test func(result string) error
}{{
name: "Image should be pulled from docker hub by default",
args: args{
customImageRegistry: "",
},
test: func(result string) error {
return expectRegistry(result, "")
},
}, {
name: "Given a custom image registry, the registry should be prepended",
args: args{
customImageRegistry: "myreg.io",
},
test: func(result string) error {
return expectRegistry(result, "myreg.io")
},
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
for i := range imgs {
got := imgs[i].Reference(tt.args.customImageRegistry)
if err := tt.test(got); err != nil {
t.Error(fmt.Errorf("DockerHubReference(%s): %w", imgs[i], err))
}
}
})
}
}
func TestZITADELReference(t *testing.T) {
imgs := []zitadelImage{ZITADELImage, BackupImage}
dummyVersion := "v99.99.99"
type args struct {
customImageRegistry string
}
tests := []struct {
name string
args args
test func(result string) error
}{{
name: "Image should be pulled from GHCR by default",
args: args{
customImageRegistry: "",
},
test: func(result string) error {
return expectRegistry(result, "ghcr.io/")
},
}, {
name: "Given a random docker hub image and a custom image registry, the registry should be prepended",
args: args{
customImageRegistry: "myreg.io",
},
test: func(result string) error {
return expectRegistry(result, "myreg.io")
},
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
for i := range imgs {
got := imgs[i].Reference(tt.args.customImageRegistry, dummyVersion)
if err := tt.test(got); err != nil {
t.Error(fmt.Errorf("ZITADELReference(%s): %w", imgs[i], err))
}
}
})
}
}
func expectRegistry(result, expect string) error {
if !strings.HasPrefix(result, expect) {
return fmt.Errorf("image is not prefixed by the registry %s", expect)
}
points := strings.Count(result[:strings.Index(result, ":")], ".")
if expect == "" && points > 1 {
return errors.New("doesn't look like a docker image")
}
if points > 1 {
return errors.New("too many points")
}
return nil
}

View File

@@ -1,25 +0,0 @@
package common
import (
"bytes"
"gopkg.in/yaml.v3"
)
func MarshalYAML(sth interface{}) []byte {
if sth == nil {
return nil
}
buf := new(bytes.Buffer)
encoder := yaml.NewEncoder(buf)
defer func() {
encoder.Close()
buf.Truncate(0)
}()
encoder.SetIndent(2)
if err := encoder.Encode(sth); err != nil {
panic(err)
}
return buf.Bytes()
}

View File

@@ -1,27 +0,0 @@
package crtlcrd
import (
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/zitadel/pkg/databases"
)
func Restore(
monitor mntr.Monitor,
k8sClient *kubernetes.Client,
backup string,
) error {
if err := databases.CrdClear(monitor, k8sClient); err != nil {
return err
}
if err := databases.CrdRestore(
monitor,
k8sClient,
backup,
); err != nil {
return err
}
return nil
}

View File

@@ -1,80 +0,0 @@
package crtlcrd
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
databasev1 "github.com/caos/zitadel/operator/api/database/v1"
zitadelv1 "github.com/caos/zitadel/operator/api/zitadel/v1"
"github.com/caos/zitadel/operator/crtlcrd/database"
"github.com/caos/zitadel/operator/crtlcrd/zitadel"
)
const (
Database = "database"
Zitadel = "zitadel"
)
var (
scheme = runtime.NewScheme()
)
func init() {
_ = clientgoscheme.AddToScheme(scheme)
_ = databasev1.AddToScheme(scheme)
_ = zitadelv1.AddToScheme(scheme)
}
func Start(monitor mntr.Monitor, version, metricsAddr string, features ...string) error {
cfg := ctrl.GetConfigOrDie()
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
Port: 9443,
LeaderElection: false,
LeaderElectionID: "9adsd12l.caos.ch",
})
if err != nil {
return fmt.Errorf("unable to start manager: %w", err)
}
k8sClient, err := kubernetes.NewK8sClientWithConfig(monitor, cfg)
if err != nil {
return err
}
for _, feature := range features {
switch feature {
case Database:
if err = (&database.Reconciler{
ClientInt: k8sClient,
Monitor: monitor,
Scheme: mgr.GetScheme(),
Version: version,
}).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller: %w", err)
}
case Zitadel:
if err = (&zitadel.Reconciler{
ClientInt: k8sClient,
Monitor: monitor,
Scheme: mgr.GetScheme(),
Version: version,
}).SetupWithManager(mgr); err != nil {
return fmt.Errorf("unable to create controller: %w", err)
}
}
}
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
return fmt.Errorf("problem running manager: %w", err)
}
return nil
}

View File

@@ -1,68 +0,0 @@
package database
import (
"context"
"fmt"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/api/database"
v1 "github.com/caos/zitadel/operator/api/database/v1"
orbdb "github.com/caos/zitadel/operator/database/kinds/orb"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
)
type Reconciler struct {
kubernetes.ClientInt
Monitor mntr.Monitor
Scheme *runtime.Scheme
Version string
}
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) {
internalMonitor := r.Monitor.WithFields(map[string]interface{}{
"kind": "database",
"namespace": req.NamespacedName,
})
defer func() {
r.Monitor.Error(err)
}()
if req.Namespace != database.Namespace || req.Name != database.Name {
return res, fmt.Errorf("resource must be named %s and namespaced in %s", database.Name, database.Namespace)
}
desired, err := database.ReadCrd(r.ClientInt)
if err != nil {
internalMonitor.Error(err)
return res, err
}
query, _, _, _, _, _, err := orbdb.AdaptFunc("", &r.Version, false, "database")(internalMonitor, desired, &tree.Tree{})
if err != nil {
internalMonitor.Error(err)
return res, err
}
ensure, err := query(r.ClientInt, map[string]interface{}{})
if err != nil {
internalMonitor.Error(err)
return res, err
}
if err := ensure(r.ClientInt); err != nil {
internalMonitor.Error(err)
return res, err
}
return res, nil
}
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&v1.Database{}).
Complete(r)
}

View File

@@ -1,26 +0,0 @@
package database
import (
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/api/database"
orbdb "github.com/caos/zitadel/operator/database/kinds/orb"
)
func Destroy(monitor mntr.Monitor, k8sClient kubernetes.ClientInt, version string) error {
desired, err := database.ReadCrd(k8sClient)
if err != nil {
return err
}
if desired != nil {
_, destroy, _, _, _, _, err := orbdb.AdaptFunc("", &version, false, "database")(monitor, desired, &tree.Tree{})
if err != nil {
return err
}
return destroy(k8sClient)
}
return nil
}

View File

@@ -1,24 +0,0 @@
package crtlcrd
import (
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/zitadel/operator/crtlcrd/database"
"github.com/caos/zitadel/operator/crtlcrd/zitadel"
)
func Destroy(monitor mntr.Monitor, k8sClient kubernetes.ClientInt, version string, features ...string) error {
for _, feature := range features {
switch feature {
case Zitadel:
if err := zitadel.Destroy(monitor, k8sClient, version); err != nil {
return err
}
case Database:
if err := database.Destroy(monitor, k8sClient, version); err != nil {
return err
}
}
}
return nil
}

View File

@@ -1,26 +0,0 @@
package zitadel
import (
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/api/zitadel"
orbz "github.com/caos/zitadel/operator/zitadel/kinds/orb"
)
func Destroy(monitor mntr.Monitor, k8sClient kubernetes.ClientInt, version string) error {
desired, err := zitadel.ReadCrd(k8sClient)
if err != nil {
return err
}
if desired != nil {
_, destroy, _, _, _, _, err := orbz.AdaptFunc(nil, "ensure", &version, false, []string{"operator", "iam"})(monitor, desired, &tree.Tree{})
if err != nil {
return err
}
return destroy(k8sClient)
}
return nil
}

View File

@@ -1,32 +0,0 @@
package zitadel
import (
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/zitadel/operator/zitadel/kinds/orb"
macherrs "k8s.io/apimachinery/pkg/api/errors"
)
func ScaleDown(
monitor mntr.Monitor,
k8sClient *kubernetes.Client,
version *string,
) (bool, error) {
noZitadel := false
if err := Takeoff(monitor, k8sClient, orb.AdaptFunc(nil, "scaledown", version, false, []string{"scaledown"})); err != nil {
if macherrs.IsNotFound(err) {
noZitadel = true
} else {
return noZitadel, err
}
}
return noZitadel, nil
}
func ScaleUp(
monitor mntr.Monitor,
k8sClient *kubernetes.Client,
version *string,
) error {
return Takeoff(monitor, k8sClient, orb.AdaptFunc(nil, "scaleup", version, false, []string{"scaleup"}))
}

View File

@@ -1,77 +0,0 @@
package zitadel
import (
"context"
"fmt"
"github.com/caos/zitadel/operator"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/api/zitadel"
v1 "github.com/caos/zitadel/operator/api/zitadel/v1"
orbz "github.com/caos/zitadel/operator/zitadel/kinds/orb"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
)
type Reconciler struct {
kubernetes.ClientInt
Monitor mntr.Monitor
Scheme *runtime.Scheme
Version string
}
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) {
internalMonitor := r.Monitor.WithFields(map[string]interface{}{
"kind": "zitadel",
"namespace": req.NamespacedName,
})
defer func() {
r.Monitor.Error(err)
}()
if req.Namespace != zitadel.Namespace || req.Name != zitadel.Name {
return res, fmt.Errorf("resource must be named %s and namespaced in %s", zitadel.Name, zitadel.Namespace)
}
if err := Takeoff(internalMonitor, r.ClientInt, orbz.AdaptFunc(nil, "ensure", &r.Version, false, []string{"operator", "iam"})); err != nil {
return res, err
}
return res, nil
}
func Takeoff(
monitor mntr.Monitor,
k8sClient kubernetes.ClientInt,
adaptFunc operator.AdaptFunc,
) error {
desired, err := zitadel.ReadCrd(k8sClient)
if err != nil {
return err
}
query, _, _, _, _, _, err := adaptFunc(monitor, desired, &tree.Tree{})
if err != nil {
return err
}
ensure, err := query(k8sClient, map[string]interface{}{})
if err != nil {
return err
}
if err := ensure(k8sClient); err != nil {
return err
}
return nil
}
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&v1.Zitadel{}).
Complete(r)
}

View File

@@ -1,29 +0,0 @@
package crtlgitops
import (
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/git"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/zitadel/pkg/databases"
)
func Restore(
monitor mntr.Monitor,
gitClient *git.Client,
k8sClient *kubernetes.Client,
backup string,
) error {
if err := databases.GitOpsClear(monitor, k8sClient, gitClient); err != nil {
return err
}
if err := databases.GitOpsRestore(
monitor,
k8sClient,
gitClient,
backup,
); err != nil {
return err
}
return nil
}

View File

@@ -1,44 +0,0 @@
package crtlgitops
import (
"context"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/git"
"github.com/caos/orbos/pkg/kubernetes"
orbconfig "github.com/caos/orbos/pkg/orb"
"github.com/caos/zitadel/operator/database"
orbdb "github.com/caos/zitadel/operator/database/kinds/orb"
"github.com/caos/zitadel/operator/zitadel"
orbz "github.com/caos/zitadel/operator/zitadel/kinds/orb"
)
func DestroyOperator(monitor mntr.Monitor, orbConfigPath string, k8sClient *kubernetes.Client, version *string, gitops bool) error {
orbConfig, err := orbconfig.ParseOrbConfig(orbConfigPath)
if err != nil {
return err
}
gitClient := git.New(context.Background(), monitor, "orbos", "orbos@caos.ch")
if err := gitClient.Configure(orbConfig.URL, []byte(orbConfig.Repokey)); err != nil {
return err
}
return zitadel.Destroy(monitor, gitClient, orbz.AdaptFunc(orbConfig, "ensure", version, gitops, []string{"zitadel", "iam"}), k8sClient)
}
func DestroyDatabase(monitor mntr.Monitor, orbConfigPath string, k8sClient *kubernetes.Client, version *string, gitops bool) error {
orbConfig, err := orbconfig.ParseOrbConfig(orbConfigPath)
if err != nil {
return err
}
gitClient := git.New(context.Background(), monitor, "orbos", "orbos@caos.ch")
if err := gitClient.Configure(orbConfig.URL, []byte(orbConfig.Repokey)); err != nil {
return err
}
return database.Destroy(monitor, gitClient, orbdb.AdaptFunc("", version, gitops, "operator", "database", "backup"), k8sClient)
}

View File

@@ -1,41 +0,0 @@
package crtlgitops
import (
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/git"
"github.com/caos/orbos/pkg/kubernetes"
orbconfig "github.com/caos/orbos/pkg/orb"
"github.com/caos/zitadel/operator/zitadel"
"github.com/caos/zitadel/operator/zitadel/kinds/orb"
macherrs "k8s.io/apimachinery/pkg/api/errors"
)
func ScaleDown(
monitor mntr.Monitor,
gitClient *git.Client,
k8sClient *kubernetes.Client,
orbCfg *orbconfig.Orb,
version *string,
gitops bool,
) (bool, error) {
noZitadel := false
if err := zitadel.Takeoff(monitor, gitClient, orb.AdaptFunc(orbCfg, "scaledown", version, gitops, []string{"scaledown"}), k8sClient)(); err != nil {
if macherrs.IsNotFound(err) {
noZitadel = true
} else {
return noZitadel, err
}
}
return noZitadel, nil
}
func ScaleUp(
monitor mntr.Monitor,
gitClient *git.Client,
k8sClient *kubernetes.Client,
orbCfg *orbconfig.Orb,
version *string,
gitops bool,
) error {
return zitadel.Takeoff(monitor, gitClient, orb.AdaptFunc(orbCfg, "scaleup", version, gitops, []string{"scaleup"}), k8sClient)()
}

View File

@@ -1,90 +0,0 @@
package crtlgitops
import (
"context"
"time"
"github.com/caos/zitadel/operator/database"
orbdb "github.com/caos/zitadel/operator/database/kinds/orb"
"github.com/caos/zitadel/operator/zitadel"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/git"
"github.com/caos/orbos/pkg/kubernetes"
orbconfig "github.com/caos/orbos/pkg/orb"
"github.com/caos/zitadel/operator/zitadel/kinds/orb"
)
func Operator(monitor mntr.Monitor, orbConfigPath string, k8sClient *kubernetes.Client, version *string, gitops bool) error {
takeoffChan := make(chan struct{})
go func() {
takeoffChan <- struct{}{}
}()
for range takeoffChan {
orbConfig, err := orbconfig.ParseOrbConfig(orbConfigPath)
if err != nil {
monitor.Error(err)
return err
}
gitClient := git.New(context.Background(), monitor, "orbos", "orbos@caos.ch")
if err := gitClient.Configure(orbConfig.URL, []byte(orbConfig.Repokey)); err != nil {
monitor.Error(err)
return err
}
takeoff := zitadel.Takeoff(monitor, gitClient, orb.AdaptFunc(orbConfig, "ensure", version, gitops, []string{"operator", "iam"}), k8sClient)
go func() {
started := time.Now()
takeoff()
monitor.WithFields(map[string]interface{}{
"took": time.Since(started),
}).Info("Iteration done")
time.Sleep(time.Second * 10)
takeoffChan <- struct{}{}
}()
}
return nil
}
func Database(monitor mntr.Monitor, orbConfigPath string, k8sClient *kubernetes.Client, binaryVersion *string, gitops bool) error {
takeoffChan := make(chan struct{})
go func() {
takeoffChan <- struct{}{}
}()
for range takeoffChan {
orbConfig, err := orbconfig.ParseOrbConfig(orbConfigPath)
if err != nil {
monitor.Error(err)
return err
}
gitClient := git.New(context.Background(), monitor, "orbos", "orbos@caos.ch")
if err := gitClient.Configure(orbConfig.URL, []byte(orbConfig.Repokey)); err != nil {
monitor.Error(err)
return err
}
takeoff := database.Takeoff(monitor, gitClient, orbdb.AdaptFunc("", binaryVersion, gitops, "operator", "database", "backup"), k8sClient)
go func() {
started := time.Now()
takeoff()
monitor.WithFields(map[string]interface{}{
"took": time.Since(started),
}).Info("Iteration done")
time.Sleep(time.Second * 10)
takeoffChan <- struct{}{}
}()
}
return nil
}

View File

@@ -1,34 +0,0 @@
package database
import (
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/git"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator"
)
func Destroy(
monitor mntr.Monitor,
gitClient *git.Client,
adapt operator.AdaptFunc,
k8sClient *kubernetes.Client,
) error {
internalMonitor := monitor.WithField("operator", "database")
internalMonitor.Info("Destroy")
treeDesired, err := operator.Parse(gitClient, "database.yml")
if err != nil {
return err
}
treeCurrent := &tree.Tree{}
_, destroy, _, _, _, _, err := adapt(internalMonitor, treeDesired, treeCurrent)
if err != nil {
return err
}
if err := destroy(k8sClient); err != nil {
return err
}
return nil
}

View File

@@ -1,107 +0,0 @@
package backups
import (
"fmt"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator"
"github.com/caos/zitadel/operator/database/kinds/backups/bucket"
"github.com/caos/zitadel/operator/database/kinds/backups/s3"
corev1 "k8s.io/api/core/v1"
)
func Adapt(
monitor mntr.Monitor,
desiredTree *tree.Tree,
currentTree *tree.Tree,
name string,
namespace string,
componentLabels *labels.Component,
checkDBReady operator.EnsureFunc,
timestamp string,
nodeselector map[string]string,
tolerations []corev1.Toleration,
version string,
dbURL string,
dbPort int32,
features []string,
customImageRegistry string,
) (
operator.QueryFunc,
operator.DestroyFunc,
operator.ConfigureFunc,
map[string]*secret.Secret,
map[string]*secret.Existing,
bool,
error,
) {
switch desiredTree.Common.Kind {
case "databases.caos.ch/BucketBackup":
return bucket.AdaptFunc(
name,
namespace,
labels.MustForComponent(
labels.MustReplaceAPI(
labels.GetAPIFromComponent(componentLabels),
"BucketBackup",
desiredTree.Common.Version(),
),
"backup"),
checkDBReady,
timestamp,
nodeselector,
tolerations,
version,
dbURL,
dbPort,
features,
customImageRegistry,
)(monitor, desiredTree, currentTree)
case "databases.caos.ch/S3Backup":
return s3.AdaptFunc(
name,
namespace,
labels.MustForComponent(
labels.MustReplaceAPI(
labels.GetAPIFromComponent(componentLabels),
"S3Backup",
desiredTree.Common.Version(),
),
"backup"),
checkDBReady,
timestamp,
nodeselector,
tolerations,
version,
dbURL,
dbPort,
features,
customImageRegistry,
)(monitor, desiredTree, currentTree)
default:
return nil, nil, nil, nil, nil, false, mntr.ToUserError(fmt.Errorf("unknown database kind %s", desiredTree.Common.Kind))
}
}
func GetBackupList(
monitor mntr.Monitor,
k8sClient kubernetes.ClientInt,
name string,
desiredTree *tree.Tree,
) (
[]string,
error,
) {
switch desiredTree.Common.Kind {
case "databases.caos.ch/BucketBackup":
return bucket.BackupList()(monitor, k8sClient, name, desiredTree)
case "databases.caos.ch/S3Backup":
return s3.BackupList()(monitor, k8sClient, name, desiredTree)
default:
return nil, mntr.ToUserError(fmt.Errorf("unknown database kind %s", desiredTree.Common.Kind))
}
}

View File

@@ -1,279 +0,0 @@
package bucket
import (
"fmt"
corev1 "k8s.io/api/core/v1"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/secret"
"github.com/caos/orbos/pkg/labels"
secretpkg "github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/secret/read"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator"
"github.com/caos/zitadel/operator/common"
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/backup"
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/restore"
)
const (
secretName = "backup-serviceaccountjson"
secretKey = "serviceaccountjson"
)
func AdaptFunc(
name string,
namespace string,
componentLabels *labels.Component,
checkDBReady operator.EnsureFunc,
timestamp string,
nodeselector map[string]string,
tolerations []corev1.Toleration,
version string,
dbURL string,
dbPort int32,
features []string,
customImageRegistry string,
) operator.AdaptFunc {
return func(
monitor mntr.Monitor,
desired *tree.Tree,
current *tree.Tree,
) (
operator.QueryFunc,
operator.DestroyFunc,
operator.ConfigureFunc,
map[string]*secretpkg.Secret,
map[string]*secretpkg.Existing,
bool,
error,
) {
internalMonitor := monitor.WithField("component", "backup")
desiredKind, err := ParseDesiredV0(desired)
if err != nil {
return nil, nil, nil, nil, nil, false, fmt.Errorf("parsing desired state failed: %w", err)
}
desired.Parsed = desiredKind
secrets, existing := getSecretsMap(desiredKind)
if !monitor.IsVerbose() && desiredKind.Spec.Verbose {
internalMonitor.Verbose()
}
destroyS, err := secret.AdaptFuncToDestroy(namespace, secretName)
if err != nil {
return nil, nil, nil, nil, nil, false, err
}
image := common.BackupImage.Reference(customImageRegistry, version)
_, destroyB, err := backup.AdaptFunc(
internalMonitor,
name,
namespace,
componentLabels,
checkDBReady,
desiredKind.Spec.Bucket,
desiredKind.Spec.Cron,
secretName,
secretKey,
timestamp,
nodeselector,
tolerations,
dbURL,
dbPort,
features,
image,
)
if err != nil {
return nil, nil, nil, nil, nil, false, err
}
_, destroyR, err := restore.AdaptFunc(
monitor,
name,
namespace,
componentLabels,
desiredKind.Spec.Bucket,
timestamp,
nodeselector,
tolerations,
checkDBReady,
secretName,
secretKey,
dbURL,
dbPort,
image,
)
if err != nil {
return nil, nil, nil, nil, nil, false, err
}
/*_, destroyC, err := clean.AdaptFunc(
monitor,
name,
namespace,
componentLabels,
[]string{},
[]string{},
nodeselector,
tolerations,
checkDBReady,
secretName,
secretKey,
image,
)
if err != nil {
return nil, nil, nil, nil, nil, false, err
}*/
destroyers := make([]operator.DestroyFunc, 0)
for _, feature := range features {
switch feature {
case backup.Normal, backup.Instant:
destroyers = append(destroyers,
operator.ResourceDestroyToZitadelDestroy(destroyS),
destroyB,
)
/*case clean.Instant:
destroyers = append(destroyers,
destroyC,
)*/
case restore.Instant:
destroyers = append(destroyers,
destroyR,
)
}
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
if err := desiredKind.validateSecrets(); err != nil {
return nil, err
}
value, err := read.GetSecretValue(k8sClient, desiredKind.Spec.ServiceAccountJSON, desiredKind.Spec.ExistingServiceAccountJSON)
if err != nil {
return nil, err
}
queryS, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, secretName), map[string]string{secretKey: value})
if err != nil {
return nil, err
}
queryB, _, err := backup.AdaptFunc(
internalMonitor,
name,
namespace,
componentLabels,
checkDBReady,
desiredKind.Spec.Bucket,
desiredKind.Spec.Cron,
secretName,
secretKey,
timestamp,
nodeselector,
tolerations,
dbURL,
dbPort,
features,
image,
)
if err != nil {
return nil, err
}
queryR, _, err := restore.AdaptFunc(
monitor,
name,
namespace,
componentLabels,
desiredKind.Spec.Bucket,
timestamp,
nodeselector,
tolerations,
checkDBReady,
secretName,
secretKey,
dbURL,
dbPort,
image,
)
if err != nil {
return nil, err
}
/*queryC, _, err := clean.AdaptFunc(
monitor,
name,
namespace,
componentLabels,
databases,
users,
nodeselector,
tolerations,
checkDBReady,
secretName,
secretKey,
image,
)
if err != nil {
return nil, err
}*/
queriers := make([]operator.QueryFunc, 0)
cleanupQueries := make([]operator.QueryFunc, 0)
for _, feature := range features {
switch feature {
case backup.Normal:
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryS),
queryB,
)
case backup.Instant:
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryS),
queryB,
)
cleanupQueries = append(cleanupQueries,
operator.EnsureFuncToQueryFunc(backup.GetCleanupFunc(monitor, namespace, name)),
)
/*case clean.Instant:
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryS),
queryC,
)
cleanupQueries = append(cleanupQueries,
operator.EnsureFuncToQueryFunc(clean.GetCleanupFunc(monitor, namespace, name)),
)*/
case restore.Instant:
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryS),
queryR,
)
cleanupQueries = append(cleanupQueries,
operator.EnsureFuncToQueryFunc(restore.GetCleanupFunc(monitor, namespace, name)),
)
}
}
for _, cleanup := range cleanupQueries {
queriers = append(queriers, cleanup)
}
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
},
operator.DestroyersToDestroyFunc(internalMonitor, destroyers),
func(kubernetes.ClientInt, map[string]interface{}, bool) error { return nil },
secrets,
existing,
false,
nil
}
}

View File

@@ -1,394 +0,0 @@
package bucket
import (
"testing"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/backup"
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/restore"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
)
func TestBucket_Secrets(t *testing.T) {
masterkey := "testMk"
features := []string{backup.Normal}
saJson := "testSA"
bucketName := "testBucket2"
cron := "testCron2"
monitor := mntr.Monitor{}
namespace := "testNs2"
dbURL := "testDB"
dbPort := int32(80)
kindVersion := "v0"
kind := "BucketBackup"
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "BucketBackup", kindVersion), "testComponent")
timestamp := "test2"
nodeselector := map[string]string{"test2": "test2"}
tolerations := []corev1.Toleration{
{Key: "testKey2", Operator: "testOp2"}}
backupName := "testName2"
version := "testVersion2"
desired := getDesiredTree(t, masterkey, &DesiredV0{
Common: tree.NewCommon("databases.caos.ch/"+kind, kindVersion, false),
Spec: &Spec{
Verbose: true,
Cron: cron,
Bucket: bucketName,
ServiceAccountJSON: &secret.Secret{
Value: saJson,
},
},
})
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
allSecrets := map[string]string{
"serviceaccountjson": saJson,
}
_, _, _, secrets, existing, _, err := AdaptFunc(
backupName,
namespace,
componentLabels,
checkDBReady,
timestamp,
nodeselector,
tolerations,
version,
dbURL,
dbPort,
features,
"",
)(
monitor,
desired,
&tree.Tree{},
)
assert.NoError(t, err)
for key, value := range allSecrets {
assert.Contains(t, secrets, key)
assert.Contains(t, existing, key)
assert.Equal(t, value, secrets[key].Value)
}
}
func TestBucket_AdaptBackup(t *testing.T) {
masterkey := "testMk"
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
features := []string{backup.Normal}
saJson := "testSA"
dbURL := "testDB"
dbPort := int32(80)
bucketName := "testBucket2"
cron := "testCron2"
monitor := mntr.Monitor{}
namespace := "testNs2"
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "BucketBackup", "v0"), "testComponent")
k8sLabels := map[string]string{
"app.kubernetes.io/component": "testComponent",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": "backup-serviceaccountjson",
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "BucketBackup",
}
timestamp := "test2"
nodeselector := map[string]string{"test2": "test2"}
tolerations := []corev1.Toleration{
{Key: "testKey2", Operator: "testOp2"}}
backupName := "testName2"
version := "testVersion2"
desired := getDesiredTree(t, masterkey, &DesiredV0{
Common: tree.NewCommon("databases.caos.ch/BucketBackup", "v0", false),
Spec: &Spec{
Verbose: true,
Cron: cron,
Bucket: bucketName,
ServiceAccountJSON: &secret.Secret{
Value: saJson,
},
},
})
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
SetBackup(client, namespace, k8sLabels, saJson)
query, _, _, _, _, _, err := AdaptFunc(
backupName,
namespace,
componentLabels,
checkDBReady,
timestamp,
nodeselector,
tolerations,
version,
dbURL,
dbPort,
features,
"",
)(
monitor,
desired,
&tree.Tree{},
)
assert.NoError(t, err)
databases := []string{"test1", "test2"}
queried := SetQueriedForDatabases(databases, []string{})
ensure, err := query(client, queried)
assert.NoError(t, err)
assert.NotNil(t, ensure)
assert.NoError(t, ensure(client))
}
func TestBucket_AdaptInstantBackup(t *testing.T) {
masterkey := "testMk"
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
features := []string{backup.Instant}
bucketName := "testBucket1"
cron := "testCron"
monitor := mntr.Monitor{}
namespace := "testNs"
dbURL := "testDB"
dbPort := int32(80)
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "BucketBackup", "v0"), "testComponent")
k8sLabels := map[string]string{
"app.kubernetes.io/component": "testComponent",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": "backup-serviceaccountjson",
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "BucketBackup",
}
timestamp := "test"
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{
{Key: "testKey", Operator: "testOp"}}
backupName := "testName"
version := "testVersion"
saJson := "testSA"
desired := getDesiredTree(t, masterkey, &DesiredV0{
Common: tree.NewCommon("databases.caos.ch/BucketBackup", "v0", false),
Spec: &Spec{
Verbose: true,
Cron: cron,
Bucket: bucketName,
ServiceAccountJSON: &secret.Secret{
Value: saJson,
},
},
})
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
SetInstantBackup(client, namespace, backupName, k8sLabels, saJson)
query, _, _, _, _, _, err := AdaptFunc(
backupName,
namespace,
componentLabels,
checkDBReady,
timestamp,
nodeselector,
tolerations,
version,
dbURL,
dbPort,
features,
"",
)(
monitor,
desired,
&tree.Tree{},
)
assert.NoError(t, err)
databases := []string{"test1", "test2"}
queried := SetQueriedForDatabases(databases, []string{})
ensure, err := query(client, queried)
assert.NotNil(t, ensure)
assert.NoError(t, err)
assert.NoError(t, ensure(client))
}
func TestBucket_AdaptRestore(t *testing.T) {
masterkey := "testMk"
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
features := []string{restore.Instant}
bucketName := "testBucket1"
cron := "testCron"
monitor := mntr.Monitor{}
namespace := "testNs"
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "BucketBackup", "v0"), "testComponent")
k8sLabels := map[string]string{
"app.kubernetes.io/component": "testComponent",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": "backup-serviceaccountjson",
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "BucketBackup",
}
timestamp := "test"
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{
{Key: "testKey", Operator: "testOp"}}
backupName := "testName"
version := "testVersion"
saJson := "testSA"
dbURL := "testDB"
dbPort := int32(80)
desired := getDesiredTree(t, masterkey, &DesiredV0{
Common: tree.NewCommon("databases.caos.ch/BucketBackup", "v0", false),
Spec: &Spec{
Verbose: true,
Cron: cron,
Bucket: bucketName,
ServiceAccountJSON: &secret.Secret{
Value: saJson,
},
},
})
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
SetRestore(client, namespace, backupName, k8sLabels, saJson)
query, _, _, _, _, _, err := AdaptFunc(
backupName,
namespace,
componentLabels,
checkDBReady,
timestamp,
nodeselector,
tolerations,
version,
dbURL,
dbPort,
features,
"",
)(
monitor,
desired,
&tree.Tree{},
)
assert.NoError(t, err)
databases := []string{"test1", "test2"}
queried := SetQueriedForDatabases(databases, []string{})
ensure, err := query(client, queried)
assert.NotNil(t, ensure)
assert.NoError(t, err)
assert.NoError(t, ensure(client))
}
/*
func TestBucket_AdaptClean(t *testing.T) {
masterkey := "testMk"
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
features := []string{clean.Instant}
bucketName := "testBucket1"
cron := "testCron"
monitor := mntr.Monitor{}
namespace := "testNs"
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "BucketBackup", "v0"), "testComponent")
k8sLabels := map[string]string{
"app.kubernetes.io/component": "testComponent",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": "backup-serviceaccountjson",
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "BucketBackup",
}
timestamp := "test"
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{
{Key: "testKey", Operator: "testOp"}}
backupName := "testName"
version := "testVersion"
saJson := "testSA"
dbURL := "testDB"
dbPort := int32(80)
desired := getDesiredTree(t, masterkey, &DesiredV0{
Common: tree.NewCommon("databases.caos.ch/BucketBackup", "v0", false),
Spec: &Spec{
Verbose: true,
Cron: cron,
Bucket: bucketName,
ServiceAccountJSON: &secret.Secret{
Value: saJson,
},
},
})
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
SetClean(client, namespace, backupName, k8sLabels, saJson)
query, _, _, _, _, _, err := AdaptFunc(
backupName,
namespace,
componentLabels,
checkDBReady,
timestamp,
nodeselector,
tolerations,
version,
dbURL,
dbPort,
features,
"",
)(
monitor,
desired,
&tree.Tree{},
)
assert.NoError(t, err)
databases := []string{"test1", "test2"}
users := []string{"test1", "test2"}
queried := SetQueriedForDatabases(databases, users)
ensure, err := query(client, queried)
assert.NotNil(t, ensure)
assert.NoError(t, err)
assert.NoError(t, ensure(client))
}*/

View File

@@ -1,140 +0,0 @@
package backup
import (
"time"
"github.com/caos/zitadel/operator"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/cronjob"
"github.com/caos/orbos/pkg/kubernetes/resources/job"
"github.com/caos/orbos/pkg/labels"
corev1 "k8s.io/api/core/v1"
)
const (
defaultMode int32 = 256
certPath = "/cockroach/cockroach-certs"
secretPath = "/secrets/sa.json"
backupPath = "/cockroach"
backupNameEnv = "BACKUP_NAME"
saJsonBase64Env = "SAJSON"
cronJobNamePrefix = "backup-"
internalSecretName = "client-certs"
rootSecretName = "cockroachdb.client.root"
timeout = 45 * time.Minute
Normal = "backup"
Instant = "instantbackup"
)
func AdaptFunc(
monitor mntr.Monitor,
backupName string,
namespace string,
componentLabels *labels.Component,
checkDBReady operator.EnsureFunc,
bucketName string,
cron string,
secretName string,
secretKey string,
timestamp string,
nodeselector map[string]string,
tolerations []corev1.Toleration,
dbURL string,
dbPort int32,
features []string,
image string,
) (
queryFunc operator.QueryFunc,
destroyFunc operator.DestroyFunc,
err error,
) {
command := getBackupCommand(
timestamp,
bucketName,
backupName,
certPath,
secretPath,
dbURL,
dbPort,
)
jobSpecDef := getJobSpecDef(
nodeselector,
tolerations,
secretName,
secretKey,
backupName,
command,
image,
)
destroyers := []operator.DestroyFunc{}
queriers := []operator.QueryFunc{}
cronJobDef := getCronJob(
namespace,
labels.MustForName(componentLabels, GetJobName(backupName)),
cron,
jobSpecDef,
)
destroyCJ, err := cronjob.AdaptFuncToDestroy(cronJobDef.Namespace, cronJobDef.Name)
if err != nil {
return nil, nil, err
}
queryCJ, err := cronjob.AdaptFuncToEnsure(cronJobDef)
if err != nil {
return nil, nil, err
}
jobDef := getJob(
namespace,
labels.MustForName(componentLabels, cronJobNamePrefix+backupName),
jobSpecDef,
)
destroyJ, err := job.AdaptFuncToDestroy(jobDef.Namespace, jobDef.Name)
if err != nil {
return nil, nil, err
}
queryJ, err := job.AdaptFuncToEnsure(jobDef)
if err != nil {
return nil, nil, err
}
for _, feature := range features {
switch feature {
case Normal:
destroyers = append(destroyers,
operator.ResourceDestroyToZitadelDestroy(destroyCJ),
)
queriers = append(queriers,
operator.EnsureFuncToQueryFunc(checkDBReady),
operator.ResourceQueryToZitadelQuery(queryCJ),
)
case Instant:
destroyers = append(destroyers,
operator.ResourceDestroyToZitadelDestroy(destroyJ),
)
queriers = append(queriers,
operator.EnsureFuncToQueryFunc(checkDBReady),
operator.ResourceQueryToZitadelQuery(queryJ),
)
}
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
return operator.QueriersToEnsureFunc(monitor, false, queriers, k8sClient, queried)
},
operator.DestroyersToDestroyFunc(monitor, destroyers),
nil
}
func GetJobName(backupName string) string {
return cronJobNamePrefix + backupName
}

View File

@@ -1,324 +0,0 @@
package backup
import (
"testing"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/caos/orbos/pkg/labels"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
macherrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
)
func TestBackup_AdaptInstantBackup1(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
features := []string{Instant}
monitor := mntr.Monitor{}
namespace := "testNs"
bucketName := "testBucket"
cron := "testCron"
timestamp := "test"
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{
{Key: "testKey", Operator: "testOp"}}
backupName := "testName"
image := "testImage"
secretKey := "testKey"
secretName := "testSecretName"
dbURL := "testDB"
dbPort := int32(80)
jobName := GetJobName(backupName)
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd2", "testOp2", "testVersion2"), "testKind2", "testVersion2"), "testComponent")
nameLabels := labels.MustForName(componentLabels, jobName)
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
jobDef := getJob(
namespace,
nameLabels,
getJobSpecDef(
nodeselector,
tolerations,
secretName,
secretKey,
backupName,
getBackupCommand(
timestamp,
bucketName,
backupName,
certPath,
secretPath,
dbURL,
dbPort,
),
image,
),
)
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
query, _, err := AdaptFunc(
monitor,
backupName,
namespace,
componentLabels,
checkDBReady,
bucketName,
cron,
secretName,
secretKey,
timestamp,
nodeselector,
tolerations,
dbURL,
dbPort,
features,
image,
)
assert.NoError(t, err)
queried := map[string]interface{}{}
ensure, err := query(client, queried)
assert.NoError(t, err)
assert.NoError(t, ensure(client))
}
func TestBackup_AdaptInstantBackup2(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
features := []string{Instant}
monitor := mntr.Monitor{}
namespace := "testNs2"
dbURL := "testDB"
dbPort := int32(80)
bucketName := "testBucket2"
cron := "testCron2"
timestamp := "test2"
nodeselector := map[string]string{"test2": "test2"}
tolerations := []corev1.Toleration{
{Key: "testKey2", Operator: "testOp2"}}
backupName := "testName2"
image := "testImage2"
secretKey := "testKey2"
secretName := "testSecretName2"
jobName := GetJobName(backupName)
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd2", "testOp2", "testVersion2"), "testKind2", "testVersion2"), "testComponent")
nameLabels := labels.MustForName(componentLabels, jobName)
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
jobDef := getJob(
namespace,
nameLabels,
getJobSpecDef(
nodeselector,
tolerations,
secretName,
secretKey,
backupName,
getBackupCommand(
timestamp,
bucketName,
backupName,
certPath,
secretPath,
dbURL,
dbPort,
),
image,
),
)
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
query, _, err := AdaptFunc(
monitor,
backupName,
namespace,
componentLabels,
checkDBReady,
bucketName,
cron,
secretName,
secretKey,
timestamp,
nodeselector,
tolerations,
dbURL,
dbPort,
features,
image,
)
assert.NoError(t, err)
queried := map[string]interface{}{}
ensure, err := query(client, queried)
assert.NoError(t, err)
assert.NoError(t, ensure(client))
}
func TestBackup_AdaptBackup1(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
features := []string{Normal}
monitor := mntr.Monitor{}
namespace := "testNs"
bucketName := "testBucket"
cron := "testCron"
timestamp := "test"
dbURL := "testDB"
dbPort := int32(80)
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{
{Key: "testKey", Operator: "testOp"}}
backupName := "testName"
image := "testImage"
secretKey := "testKey"
secretName := "testSecretName"
jobName := GetJobName(backupName)
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd2", "testOp2", "testVersion2"), "testKind2", "testVersion2"), "testComponent")
nameLabels := labels.MustForName(componentLabels, jobName)
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
jobDef := getCronJob(
namespace,
nameLabels,
cron,
getJobSpecDef(
nodeselector,
tolerations,
secretName,
secretKey,
backupName,
getBackupCommand(
timestamp,
bucketName,
backupName,
certPath,
secretPath,
dbURL,
dbPort,
),
image,
),
)
client.EXPECT().ApplyCronJob(jobDef).Times(1).Return(nil)
query, _, err := AdaptFunc(
monitor,
backupName,
namespace,
componentLabels,
checkDBReady,
bucketName,
cron,
secretName,
secretKey,
timestamp,
nodeselector,
tolerations,
dbURL,
dbPort,
features,
image,
)
assert.NoError(t, err)
queried := map[string]interface{}{}
ensure, err := query(client, queried)
assert.NoError(t, err)
assert.NoError(t, ensure(client))
}
func TestBackup_AdaptBackup2(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
features := []string{Normal}
monitor := mntr.Monitor{}
namespace := "testNs2"
dbURL := "testDB"
dbPort := int32(80)
bucketName := "testBucket2"
cron := "testCron2"
timestamp := "test2"
nodeselector := map[string]string{"test2": "test2"}
tolerations := []corev1.Toleration{
{Key: "testKey2", Operator: "testOp2"}}
backupName := "testName2"
image := "testImage2"
secretKey := "testKey2"
secretName := "testSecretName2"
jobName := GetJobName(backupName)
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd2", "testOp2", "testVersion2"), "testKind2", "testVersion2"), "testComponent")
nameLabels := labels.MustForName(componentLabels, jobName)
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
jobDef := getCronJob(
namespace,
nameLabels,
cron,
getJobSpecDef(
nodeselector,
tolerations,
secretName,
secretKey,
backupName,
getBackupCommand(
timestamp,
bucketName,
backupName,
certPath,
secretPath,
dbURL,
dbPort,
),
image,
),
)
client.EXPECT().ApplyCronJob(jobDef).Times(1).Return(nil)
query, _, err := AdaptFunc(
monitor,
backupName,
namespace,
componentLabels,
checkDBReady,
bucketName,
cron,
secretName,
secretKey,
timestamp,
nodeselector,
tolerations,
dbURL,
dbPort,
features,
image,
)
assert.NoError(t, err)
queried := map[string]interface{}{}
ensure, err := query(client, queried)
assert.NoError(t, err)
assert.NoError(t, ensure(client))
}

View File

@@ -1,29 +0,0 @@
package backup
import (
"fmt"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/zitadel/operator"
)
func GetCleanupFunc(
monitor mntr.Monitor,
namespace string,
backupName string,
) operator.EnsureFunc {
return func(k8sClient kubernetes.ClientInt) error {
monitor.Info("waiting for backup to be completed")
if err := k8sClient.WaitUntilJobCompleted(namespace, GetJobName(backupName), timeout); err != nil {
return fmt.Errorf("error while waiting for backup to be completed: %w", err)
}
monitor.Info("backup is completed, cleanup")
if err := k8sClient.DeleteJob(namespace, GetJobName(backupName)); err != nil {
return fmt.Errorf("error while trying to cleanup backup: %w", err)
}
monitor.Info("cleanup backup is completed")
return nil
}
}

View File

@@ -1,42 +0,0 @@
package backup
import (
"errors"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/caos/orbos/mntr"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
)
func TestBackup_Cleanup1(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
monitor := mntr.Monitor{}
name := "test"
namespace := "testNs"
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
assert.NoError(t, cleanupFunc(client))
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
assert.Error(t, cleanupFunc(client))
}
func TestBackup_Cleanup2(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
monitor := mntr.Monitor{}
name := "test2"
namespace := "testNs2"
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
assert.NoError(t, cleanupFunc(client))
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
assert.Error(t, cleanupFunc(client))
}

View File

@@ -1,41 +0,0 @@
package backup
import (
"strconv"
"strings"
)
func getBackupCommand(
timestamp string,
bucketName string,
backupName string,
certsFolder string,
serviceAccountPath string,
dbURL string,
dbPort int32,
) string {
backupCommands := make([]string, 0)
if timestamp != "" {
backupCommands = append(backupCommands, "export "+backupNameEnv+"="+timestamp)
} else {
backupCommands = append(backupCommands, "export "+backupNameEnv+"=$(date +%Y-%m-%dT%H:%M:%SZ)")
}
backupCommands = append(backupCommands, "export "+saJsonBase64Env+"=$(cat "+serviceAccountPath+" | base64 | tr -d '\n' )")
backupCommands = append(backupCommands,
strings.Join([]string{
"cockroach",
"sql",
"--certs-dir=" + certsFolder,
"--host=" + dbURL,
"--port=" + strconv.Itoa(int(dbPort)),
"-e",
"\"BACKUP TO \\\"gs://" + bucketName + "/" + backupName + "/${" + backupNameEnv + "}?AUTH=specified&CREDENTIALS=${" + saJsonBase64Env + "}\\\";\"",
}, " ",
),
)
return strings.Join(backupCommands, " && ")
}

View File

@@ -1,46 +0,0 @@
package backup
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestBackup_Command1(t *testing.T) {
timestamp := ""
bucketName := "test"
backupName := "test"
dbURL := "testDB"
dbPort := int32(80)
cmd := getBackupCommand(
timestamp,
bucketName,
backupName,
certPath,
secretPath,
dbURL,
dbPort,
)
equals := "export " + backupNameEnv + "=$(date +%Y-%m-%dT%H:%M:%SZ) && export SAJSON=$(cat /secrets/sa.json | base64 | tr -d '\n' ) && cockroach sql --certs-dir=/cockroach/cockroach-certs --host=testDB --port=80 -e \"BACKUP TO \\\"gs://test/test/${BACKUP_NAME}?AUTH=specified&CREDENTIALS=${SAJSON}\\\";\""
assert.Equal(t, equals, cmd)
}
func TestBackup_Command2(t *testing.T) {
timestamp := "test"
bucketName := "test"
backupName := "test"
dbURL := "testDB"
dbPort := int32(80)
cmd := getBackupCommand(
timestamp,
bucketName,
backupName,
certPath,
secretPath,
dbURL,
dbPort,
)
equals := "export " + backupNameEnv + "=test && export SAJSON=$(cat /secrets/sa.json | base64 | tr -d '\n' ) && cockroach sql --certs-dir=/cockroach/cockroach-certs --host=testDB --port=80 -e \"BACKUP TO \\\"gs://test/test/${BACKUP_NAME}?AUTH=specified&CREDENTIALS=${SAJSON}\\\";\""
assert.Equal(t, equals, cmd)
}

View File

@@ -1,101 +0,0 @@
package backup
import (
"github.com/caos/orbos/pkg/labels"
"github.com/caos/zitadel/operator/helpers"
batchv1 "k8s.io/api/batch/v1"
"k8s.io/api/batch/v1beta1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func getCronJob(
namespace string,
nameLabels *labels.Name,
cron string,
jobSpecDef batchv1.JobSpec,
) *v1beta1.CronJob {
return &v1beta1.CronJob{
ObjectMeta: v1.ObjectMeta{
Name: nameLabels.Name(),
Namespace: namespace,
Labels: labels.MustK8sMap(nameLabels),
},
Spec: v1beta1.CronJobSpec{
Schedule: cron,
ConcurrencyPolicy: v1beta1.ForbidConcurrent,
JobTemplate: v1beta1.JobTemplateSpec{
Spec: jobSpecDef,
},
},
}
}
func getJob(
namespace string,
nameLabels *labels.Name,
jobSpecDef batchv1.JobSpec,
) *batchv1.Job {
return &batchv1.Job{
ObjectMeta: v1.ObjectMeta{
Name: nameLabels.Name(),
Namespace: namespace,
Labels: labels.MustK8sMap(nameLabels),
},
Spec: jobSpecDef,
}
}
func getJobSpecDef(
nodeselector map[string]string,
tolerations []corev1.Toleration,
secretName string,
secretKey string,
backupName string,
command string,
image string,
) batchv1.JobSpec {
return batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
NodeSelector: nodeselector,
Tolerations: tolerations,
Containers: []corev1.Container{{
Name: backupName,
Image: image,
Command: []string{
"/bin/bash",
"-c",
command,
},
VolumeMounts: []corev1.VolumeMount{{
Name: internalSecretName,
MountPath: certPath,
}, {
Name: secretKey,
SubPath: secretKey,
MountPath: secretPath,
}},
ImagePullPolicy: corev1.PullIfNotPresent,
}},
Volumes: []corev1.Volume{{
Name: internalSecretName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: rootSecretName,
DefaultMode: helpers.PointerInt32(defaultMode),
},
},
}, {
Name: secretKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretName,
},
},
}},
},
},
}
}

View File

@@ -1,124 +0,0 @@
package backup
import (
"testing"
"github.com/caos/zitadel/operator/helpers"
"github.com/stretchr/testify/assert"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
)
func TestBackup_JobSpec1(t *testing.T) {
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{
{Key: "testKey", Operator: "testOp"}}
backupName := "testName"
command := "test"
secretKey := "testKey"
secretName := "testSecretName"
image := "testImage"
equals := batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
NodeSelector: nodeselector,
Tolerations: tolerations,
Containers: []corev1.Container{{
Name: backupName,
Image: image,
Command: []string{
"/bin/bash",
"-c",
command,
},
VolumeMounts: []corev1.VolumeMount{{
Name: internalSecretName,
MountPath: certPath,
}, {
Name: secretKey,
SubPath: secretKey,
MountPath: secretPath,
}},
ImagePullPolicy: corev1.PullIfNotPresent,
}},
Volumes: []corev1.Volume{{
Name: internalSecretName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: rootSecretName,
DefaultMode: helpers.PointerInt32(defaultMode),
},
},
}, {
Name: secretKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretName,
},
},
}},
},
},
}
assert.Equal(t, equals, getJobSpecDef(nodeselector, tolerations, secretName, secretKey, backupName, command, image))
}
func TestBackup_JobSpec2(t *testing.T) {
nodeselector := map[string]string{"test2": "test2"}
tolerations := []corev1.Toleration{
{Key: "testKey2", Operator: "testOp2"}}
backupName := "testName2"
command := "test2"
secretKey := "testKey2"
secretName := "testSecretName2"
image := "testImage2"
equals := batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
NodeSelector: nodeselector,
Tolerations: tolerations,
Containers: []corev1.Container{{
Name: backupName,
Image: image,
Command: []string{
"/bin/bash",
"-c",
command,
},
VolumeMounts: []corev1.VolumeMount{{
Name: internalSecretName,
MountPath: certPath,
}, {
Name: secretKey,
SubPath: secretKey,
MountPath: secretPath,
}},
ImagePullPolicy: corev1.PullIfNotPresent,
}},
Volumes: []corev1.Volume{{
Name: internalSecretName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: rootSecretName,
DefaultMode: helpers.PointerInt32(defaultMode),
},
},
}, {
Name: secretKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretName,
},
},
}},
},
},
}
assert.Equal(t, equals, getJobSpecDef(nodeselector, tolerations, secretName, secretKey, backupName, command, image))
}

View File

@@ -1,52 +0,0 @@
package bucket
import (
"fmt"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
)
type DesiredV0 struct {
Common *tree.Common `yaml:",inline"`
Spec *Spec
}
type Spec struct {
Verbose bool
Cron string `yaml:"cron,omitempty"`
Bucket string `yaml:"bucket,omitempty"`
ServiceAccountJSON *secret.Secret `yaml:"serviceAccountJSON,omitempty"`
ExistingServiceAccountJSON *secret.Existing `yaml:"existingServiceAccountJSON,omitempty"`
}
func (s *Spec) IsZero() bool {
if ((s.ServiceAccountJSON == nil || s.ServiceAccountJSON.IsZero()) && (s.ExistingServiceAccountJSON == nil || s.ExistingServiceAccountJSON.IsZero())) &&
!s.Verbose &&
s.Cron == "" &&
s.Bucket == "" {
return true
}
return false
}
func ParseDesiredV0(desiredTree *tree.Tree) (*DesiredV0, error) {
desiredKind := &DesiredV0{
Common: desiredTree.Common,
Spec: &Spec{},
}
if err := desiredTree.Original.Decode(desiredKind); err != nil {
return nil, mntr.ToUserError(fmt.Errorf("parsing desired state failed: %w", err))
}
return desiredKind, nil
}
func (d *DesiredV0) validateSecrets() error {
if err := secret.ValidateSecret(d.Spec.ServiceAccountJSON, d.Spec.ExistingServiceAccountJSON); err != nil {
return fmt.Errorf("validating api key failed: %w", err)
}
return nil
}

View File

@@ -1,120 +0,0 @@
package bucket
import (
"testing"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v3"
)
const (
masterkey = "testMk"
cron = "testCron"
bucketName = "testBucket"
saJson = "testSa"
yamlFile = `kind: databases.caos.ch/BucketBackup
version: v0
spec:
verbose: true
cron: testCron
bucket: testBucket
serviceAccountJSON:
encryption: AES256
encoding: Base64
value: luyAqtopzwLcaIhJj7KhWmbUsA7cQg==
`
yamlFileWithoutSecret = `kind: databases.caos.ch/BucketBackup
version: v0
spec:
verbose: true
cron: testCron
bucket: testBucket
`
yamlEmpty = `kind: databases.caos.ch/BucketBackup
version: v0`
)
var (
desired = DesiredV0{
Common: tree.NewCommon("databases.caos.ch/BucketBackup", "v0", false),
Spec: &Spec{
Verbose: true,
Cron: cron,
Bucket: bucketName,
ServiceAccountJSON: &secret.Secret{
Value: saJson,
Encryption: "AES256",
Encoding: "Base64",
},
},
}
desiredWithoutSecret = DesiredV0{
Common: tree.NewCommon("databases.caos.ch/BucketBackup", "v0", false),
Spec: &Spec{
Verbose: true,
Cron: cron,
Bucket: bucketName,
},
}
desiredEmpty = DesiredV0{
Common: tree.NewCommon("databases.caos.ch/BucketBackup", "v0", false),
Spec: &Spec{
Verbose: false,
Cron: "",
Bucket: "",
ServiceAccountJSON: &secret.Secret{
Value: "",
},
},
}
desiredNil = DesiredV0{
Common: tree.NewCommon("databases.caos.ch/BucketBackup", "v0", false),
}
)
func marshalYaml(t *testing.T, masterkey string, struc *DesiredV0) []byte {
secret.Masterkey = masterkey
data, err := yaml.Marshal(struc)
assert.NoError(t, err)
return data
}
func unmarshalYaml(t *testing.T, masterkey string, yamlFile []byte) *tree.Tree {
secret.Masterkey = masterkey
desiredTree := &tree.Tree{}
assert.NoError(t, yaml.Unmarshal(yamlFile, desiredTree))
return desiredTree
}
func getDesiredTree(t *testing.T, masterkey string, desired *DesiredV0) *tree.Tree {
return unmarshalYaml(t, masterkey, marshalYaml(t, masterkey, desired))
}
func TestBucket_DesiredParse(t *testing.T) {
result := string(marshalYaml(t, masterkey, &desiredWithoutSecret))
assert.Equal(t, yamlFileWithoutSecret, result)
desiredTree := unmarshalYaml(t, masterkey, []byte(yamlFile))
desiredKind, err := ParseDesiredV0(desiredTree)
assert.NoError(t, err)
assert.Equal(t, &desired, desiredKind)
}
func TestBucket_DesiredNotZero(t *testing.T) {
desiredTree := unmarshalYaml(t, masterkey, []byte(yamlFile))
desiredKind, err := ParseDesiredV0(desiredTree)
assert.NoError(t, err)
assert.False(t, desiredKind.Spec.IsZero())
}
func TestBucket_DesiredZero(t *testing.T) {
desiredTree := unmarshalYaml(t, masterkey, []byte(yamlEmpty))
desiredKind, err := ParseDesiredV0(desiredTree)
assert.NoError(t, err)
assert.True(t, desiredKind.Spec.IsZero())
}

View File

@@ -1,76 +0,0 @@
package bucket
import (
"context"
"fmt"
"strings"
"cloud.google.com/go/storage"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/secret/read"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/database/kinds/backups/core"
)
func BackupList() core.BackupListFunc {
return func(monitor mntr.Monitor, k8sClient kubernetes.ClientInt, name string, desired *tree.Tree) ([]string, error) {
desiredKind, err := ParseDesiredV0(desired)
if err != nil {
return nil, fmt.Errorf("parsing desired state failed: %w", err)
}
desired.Parsed = desiredKind
if !monitor.IsVerbose() && desiredKind.Spec.Verbose {
monitor.Verbose()
}
value, err := read.GetSecretValue(k8sClient, desiredKind.Spec.ServiceAccountJSON, desiredKind.Spec.ExistingServiceAccountJSON)
if err != nil {
return nil, err
}
return listFilesWithFilter(value, desiredKind.Spec.Bucket, name)
}
}
func listFilesWithFilter(serviceAccountJSON string, bucketName, name string) ([]string, error) {
ctx := context.Background()
client, err := storage.NewClient(ctx, option.WithCredentialsJSON([]byte(serviceAccountJSON)))
if err != nil {
return nil, err
}
bkt := client.Bucket(bucketName)
names := make([]string, 0)
it := bkt.Objects(ctx, &storage.Query{Prefix: name + "/"})
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
return nil, err
}
parts := strings.Split(attrs.Name, "/")
found := false
for _, name := range names {
if len(parts) < 2 {
continue
}
if name == parts[1] {
found = true
}
}
if !found {
names = append(names, parts[1])
}
}
return names, nil
}

View File

@@ -1,84 +0,0 @@
package bucket
import (
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/backup"
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/restore"
"github.com/caos/zitadel/operator/database/kinds/databases/core"
"github.com/golang/mock/gomock"
corev1 "k8s.io/api/core/v1"
macherrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
func SetQueriedForDatabases(databases, users []string) map[string]interface{} {
queried := map[string]interface{}{}
core.SetQueriedForDatabaseDBList(queried, databases, users)
return queried
}
func SetInstantBackup(
k8sClient *kubernetesmock.MockClientInt,
namespace string,
backupName string,
labels map[string]string,
saJson string,
) {
k8sClient.EXPECT().ApplySecret(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: namespace,
Labels: labels,
},
StringData: map[string]string{secretKey: saJson},
Type: "Opaque",
}).MinTimes(1).MaxTimes(1).Return(nil)
k8sClient.EXPECT().ApplyJob(gomock.Any()).Times(1).Return(nil)
k8sClient.EXPECT().GetJob(namespace, backup.GetJobName(backupName)).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, backup.GetJobName(backupName)))
k8sClient.EXPECT().WaitUntilJobCompleted(namespace, backup.GetJobName(backupName), gomock.Any()).Times(1).Return(nil)
k8sClient.EXPECT().DeleteJob(namespace, backup.GetJobName(backupName)).Times(1).Return(nil)
}
func SetBackup(
k8sClient *kubernetesmock.MockClientInt,
namespace string,
labels map[string]string,
saJson string,
) {
k8sClient.EXPECT().ApplySecret(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: namespace,
Labels: labels,
},
StringData: map[string]string{secretKey: saJson},
Type: "Opaque",
}).MinTimes(1).MaxTimes(1).Return(nil)
k8sClient.EXPECT().ApplyCronJob(gomock.Any()).Times(1).Return(nil)
}
func SetRestore(
k8sClient *kubernetesmock.MockClientInt,
namespace string,
backupName string,
labels map[string]string,
saJson string,
) {
k8sClient.EXPECT().ApplySecret(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: namespace,
Labels: labels,
},
StringData: map[string]string{secretKey: saJson},
Type: "Opaque",
}).MinTimes(1).MaxTimes(1).Return(nil)
k8sClient.EXPECT().ApplyJob(gomock.Any()).Times(1).Return(nil)
k8sClient.EXPECT().GetJob(namespace, restore.GetJobName(backupName)).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, restore.GetJobName(backupName)))
k8sClient.EXPECT().WaitUntilJobCompleted(namespace, restore.GetJobName(backupName), gomock.Any()).Times(1).Return(nil)
k8sClient.EXPECT().DeleteJob(namespace, restore.GetJobName(backupName)).Times(1).Return(nil)
}

View File

@@ -1,100 +0,0 @@
package restore
import (
"time"
"github.com/caos/zitadel/operator"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/job"
"github.com/caos/orbos/pkg/labels"
corev1 "k8s.io/api/core/v1"
)
const (
Instant = "restore"
defaultMode = int32(256)
certPath = "/cockroach/cockroach-certs"
secretPath = "/secrets/sa.json"
jobPrefix = "backup-"
jobSuffix = "-restore"
internalSecretName = "client-certs"
rootSecretName = "cockroachdb.client.root"
timeout = 45 * time.Minute
saJsonBase64Env = "SAJSON"
)
func AdaptFunc(
monitor mntr.Monitor,
backupName string,
namespace string,
componentLabels *labels.Component,
bucketName string,
timestamp string,
nodeselector map[string]string,
tolerations []corev1.Toleration,
checkDBReady operator.EnsureFunc,
secretName string,
secretKey string,
dbURL string,
dbPort int32,
image string,
) (
queryFunc operator.QueryFunc,
destroyFunc operator.DestroyFunc,
err error,
) {
jobName := jobPrefix + backupName + jobSuffix
command := getCommand(
timestamp,
bucketName,
backupName,
certPath,
secretPath,
dbURL,
dbPort,
)
jobdef := getJob(
namespace,
labels.MustForName(componentLabels, GetJobName(backupName)),
nodeselector,
tolerations,
secretName,
secretKey,
command,
image,
)
destroyJ, err := job.AdaptFuncToDestroy(jobName, namespace)
if err != nil {
return nil, nil, err
}
destroyers := []operator.DestroyFunc{
operator.ResourceDestroyToZitadelDestroy(destroyJ),
}
queryJ, err := job.AdaptFuncToEnsure(jobdef)
if err != nil {
return nil, nil, err
}
queriers := []operator.QueryFunc{
operator.EnsureFuncToQueryFunc(checkDBReady),
operator.ResourceQueryToZitadelQuery(queryJ),
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
return operator.QueriersToEnsureFunc(monitor, false, queriers, k8sClient, queried)
},
operator.DestroyersToDestroyFunc(monitor, destroyers),
nil
}
func GetJobName(backupName string) string {
return jobPrefix + backupName + jobSuffix
}

View File

@@ -1,155 +0,0 @@
package restore
import (
"testing"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/caos/orbos/pkg/labels"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
macherrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
)
func TestBackup_Adapt1(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
monitor := mntr.Monitor{}
namespace := "testNs"
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{
{Key: "testKey", Operator: "testOp"}}
timestamp := "testTs"
backupName := "testName2"
bucketName := "testBucket2"
image := "testImage2"
secretKey := "testKey"
secretName := "testSecretName"
dbURL := "testDB"
dbPort := int32(80)
jobName := GetJobName(backupName)
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "testKind", "testVersion"), "testComponent")
nameLabels := labels.MustForName(componentLabels, jobName)
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
jobDef := getJob(
namespace,
nameLabels,
nodeselector,
tolerations,
secretName,
secretKey,
getCommand(
timestamp,
bucketName,
backupName,
certPath,
secretPath,
dbURL,
dbPort,
),
image,
)
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
query, _, err := AdaptFunc(
monitor,
backupName,
namespace,
componentLabels,
bucketName,
timestamp,
nodeselector,
tolerations,
checkDBReady,
secretName,
secretKey,
dbURL,
dbPort,
image,
)
assert.NoError(t, err)
queried := map[string]interface{}{}
ensure, err := query(client, queried)
assert.NoError(t, err)
assert.NoError(t, ensure(client))
}
func TestBackup_Adapt2(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
monitor := mntr.Monitor{}
namespace := "testNs2"
nodeselector := map[string]string{"test2": "test2"}
tolerations := []corev1.Toleration{
{Key: "testKey2", Operator: "testOp2"}}
timestamp := "testTs"
backupName := "testName2"
bucketName := "testBucket2"
image := "testImage2"
secretKey := "testKey2"
secretName := "testSecretName2"
dbURL := "testDB"
dbPort := int32(80)
jobName := GetJobName(backupName)
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd2", "testOp2", "testVersion2"), "testKind2", "testVersion2"), "testComponent2")
nameLabels := labels.MustForName(componentLabels, jobName)
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
jobDef := getJob(
namespace,
nameLabels,
nodeselector,
tolerations,
secretName,
secretKey,
getCommand(
timestamp,
bucketName,
backupName,
certPath,
secretPath,
dbURL,
dbPort,
),
image,
)
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
query, _, err := AdaptFunc(
monitor,
backupName,
namespace,
componentLabels,
bucketName,
timestamp,
nodeselector,
tolerations,
checkDBReady,
secretName,
secretKey,
dbURL,
dbPort,
image,
)
assert.NoError(t, err)
queried := map[string]interface{}{}
ensure, err := query(client, queried)
assert.NoError(t, err)
assert.NoError(t, ensure(client))
}

View File

@@ -1,28 +0,0 @@
package restore
import (
"fmt"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/zitadel/operator"
)
func GetCleanupFunc(
monitor mntr.Monitor,
namespace,
backupName string,
) operator.EnsureFunc {
return func(k8sClient kubernetes.ClientInt) error {
monitor.Info("waiting for restore to be completed")
if err := k8sClient.WaitUntilJobCompleted(namespace, GetJobName(backupName), timeout); err != nil {
return fmt.Errorf("error while waiting for restore to be completed: %w", err)
}
monitor.Info("restore is completed, cleanup")
if err := k8sClient.DeleteJob(namespace, GetJobName(backupName)); err != nil {
return fmt.Errorf("error while trying to cleanup restore: %w", err)
}
monitor.Info("restore cleanup is completed")
return nil
}
}

View File

@@ -1,40 +0,0 @@
package restore
import (
"errors"
"github.com/caos/orbos/mntr"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"testing"
)
func TestBackup_Cleanup1(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
monitor := mntr.Monitor{}
name := "test"
namespace := "testNs"
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
assert.NoError(t, cleanupFunc(client))
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
assert.Error(t, cleanupFunc(client))
}
func TestBackup_Cleanup2(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
monitor := mntr.Monitor{}
name := "test2"
namespace := "testNs2"
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
assert.NoError(t, cleanupFunc(client))
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
assert.Error(t, cleanupFunc(client))
}

View File

@@ -1,36 +0,0 @@
package restore
import (
"strconv"
"strings"
)
func getCommand(
timestamp string,
bucketName string,
backupName string,
certsFolder string,
serviceAccountPath string,
dbURL string,
dbPort int32,
) string {
backupCommands := make([]string, 0)
backupCommands = append(backupCommands, "export "+saJsonBase64Env+"=$(cat "+serviceAccountPath+" | base64 | tr -d '\n' )")
backupCommands = append(backupCommands,
strings.Join([]string{
"cockroach",
"sql",
"--certs-dir=" + certsFolder,
"--host=" + dbURL,
"--port=" + strconv.Itoa(int(dbPort)),
"-e",
"\"RESTORE FROM \\\"gs://" + bucketName + "/" + backupName + "/" + timestamp + "?AUTH=specified&CREDENTIALS=${" + saJsonBase64Env + "}\\\";\"",
}, " ",
),
)
return strings.Join(backupCommands, " && ")
}

View File

@@ -1,47 +0,0 @@
package restore
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestBackup_Command1(t *testing.T) {
timestamp := "test1"
bucketName := "testBucket"
backupName := "testBackup"
dbURL := "testDB"
dbPort := int32(80)
cmd := getCommand(
timestamp,
bucketName,
backupName,
certPath,
secretPath,
dbURL,
dbPort,
)
equals := "export SAJSON=$(cat /secrets/sa.json | base64 | tr -d '\n' ) && cockroach sql --certs-dir=/cockroach/cockroach-certs --host=testDB --port=80 -e \"RESTORE FROM \\\"gs://testBucket/testBackup/test1?AUTH=specified&CREDENTIALS=${SAJSON}\\\";\""
assert.Equal(t, equals, cmd)
}
func TestBackup_Command2(t *testing.T) {
timestamp := "test2"
bucketName := "testBucket"
backupName := "testBackup"
dbURL := "testDB2"
dbPort := int32(81)
cmd := getCommand(
timestamp,
bucketName,
backupName,
certPath,
secretPath,
dbURL,
dbPort,
)
equals := "export SAJSON=$(cat /secrets/sa.json | base64 | tr -d '\n' ) && cockroach sql --certs-dir=/cockroach/cockroach-certs --host=testDB2 --port=81 -e \"RESTORE FROM \\\"gs://testBucket/testBackup/test2?AUTH=specified&CREDENTIALS=${SAJSON}\\\";\""
assert.Equal(t, equals, cmd)
}

View File

@@ -1,71 +0,0 @@
package restore
import (
"github.com/caos/orbos/pkg/labels"
"github.com/caos/zitadel/operator/helpers"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func getJob(
namespace string,
nameLabels *labels.Name,
nodeselector map[string]string,
tolerations []corev1.Toleration,
secretName string,
secretKey string,
command string,
image string,
) *batchv1.Job {
return &batchv1.Job{
ObjectMeta: v1.ObjectMeta{
Name: nameLabels.Name(),
Namespace: namespace,
Labels: labels.MustK8sMap(nameLabels),
},
Spec: batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
NodeSelector: nodeselector,
Tolerations: tolerations,
RestartPolicy: corev1.RestartPolicyNever,
Containers: []corev1.Container{{
Name: nameLabels.Name(),
Image: image,
Command: []string{
"/bin/bash",
"-c",
command,
},
VolumeMounts: []corev1.VolumeMount{{
Name: internalSecretName,
MountPath: certPath,
}, {
Name: secretKey,
SubPath: secretKey,
MountPath: secretPath,
}},
ImagePullPolicy: corev1.PullIfNotPresent,
}},
Volumes: []corev1.Volume{{
Name: internalSecretName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: rootSecretName,
DefaultMode: helpers.PointerInt32(defaultMode),
},
},
}, {
Name: secretKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretName,
},
},
}},
},
},
},
}
}

View File

@@ -1,164 +0,0 @@
package restore
import (
"testing"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/zitadel/operator/helpers"
"github.com/stretchr/testify/assert"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestBackup_Job1(t *testing.T) {
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{
{Key: "testKey", Operator: "testOp"}}
command := "test"
secretKey := "testKey"
secretName := "testSecretName"
jobName := "testJob"
namespace := "testNs"
image := "testImage"
k8sLabels := map[string]string{
"app.kubernetes.io/component": "testComponent",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": jobName,
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testOpVersion",
"caos.ch/apiversion": "testVersion",
"caos.ch/kind": "testKind"}
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testOpVersion"), "testKind", "testVersion"), "testComponent")
nameLabels := labels.MustForName(componentLabels, jobName)
equals :=
&batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: jobName,
Namespace: namespace,
Labels: k8sLabels,
},
Spec: batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
NodeSelector: nodeselector,
Tolerations: tolerations,
Containers: []corev1.Container{{
Name: jobName,
Image: image,
Command: []string{
"/bin/bash",
"-c",
command,
},
VolumeMounts: []corev1.VolumeMount{{
Name: internalSecretName,
MountPath: certPath,
}, {
Name: secretKey,
SubPath: secretKey,
MountPath: secretPath,
}},
ImagePullPolicy: corev1.PullIfNotPresent,
}},
Volumes: []corev1.Volume{{
Name: internalSecretName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: rootSecretName,
DefaultMode: helpers.PointerInt32(defaultMode),
},
},
}, {
Name: secretKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretName,
},
},
}},
},
},
},
}
assert.Equal(t, equals, getJob(namespace, nameLabels, nodeselector, tolerations, secretName, secretKey, command, image))
}
func TestBackup_Job2(t *testing.T) {
nodeselector := map[string]string{"test2": "test2"}
tolerations := []corev1.Toleration{
{Key: "testKey2", Operator: "testOp2"}}
command := "test2"
secretKey := "testKey2"
secretName := "testSecretName2"
jobName := "testJob2"
namespace := "testNs2"
testImage := "testImage2"
k8sLabels := map[string]string{
"app.kubernetes.io/component": "testComponent2",
"app.kubernetes.io/managed-by": "testOp2",
"app.kubernetes.io/name": jobName,
"app.kubernetes.io/part-of": "testProd2",
"app.kubernetes.io/version": "testOpVersion2",
"caos.ch/apiversion": "testVersion2",
"caos.ch/kind": "testKind2"}
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd2", "testOp2", "testOpVersion2"), "testKind2", "testVersion2"), "testComponent2")
nameLabels := labels.MustForName(componentLabels, jobName)
equals :=
&batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: jobName,
Namespace: namespace,
Labels: k8sLabels,
},
Spec: batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
NodeSelector: nodeselector,
Tolerations: tolerations,
Containers: []corev1.Container{{
Name: jobName,
Image: testImage,
Command: []string{
"/bin/bash",
"-c",
command,
},
VolumeMounts: []corev1.VolumeMount{{
Name: internalSecretName,
MountPath: certPath,
}, {
Name: secretKey,
SubPath: secretKey,
MountPath: secretPath,
}},
ImagePullPolicy: corev1.PullIfNotPresent,
}},
Volumes: []corev1.Volume{{
Name: internalSecretName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: rootSecretName,
DefaultMode: helpers.PointerInt32(defaultMode),
},
},
}, {
Name: secretKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretName,
},
},
}},
},
},
},
}
assert.Equal(t, equals, getJob(namespace, nameLabels, nodeselector, tolerations, secretName, secretKey, command, testImage))
}

View File

@@ -1,30 +0,0 @@
package bucket
import (
"github.com/caos/orbos/pkg/secret"
)
func getSecretsMap(desiredKind *DesiredV0) (map[string]*secret.Secret, map[string]*secret.Existing) {
var (
secrets = make(map[string]*secret.Secret, 0)
existing = make(map[string]*secret.Existing, 0)
)
if desiredKind.Spec == nil {
desiredKind.Spec = &Spec{}
}
if desiredKind.Spec.ServiceAccountJSON == nil {
desiredKind.Spec.ServiceAccountJSON = &secret.Secret{}
}
if desiredKind.Spec.ExistingServiceAccountJSON == nil {
desiredKind.Spec.ExistingServiceAccountJSON = &secret.Existing{}
}
sakey := "serviceaccountjson"
secrets[sakey] = desiredKind.Spec.ServiceAccountJSON
existing[sakey] = desiredKind.Spec.ExistingServiceAccountJSON
return secrets, existing
}

View File

@@ -1,26 +0,0 @@
package bucket
import (
"testing"
"github.com/caos/orbos/pkg/secret"
"github.com/stretchr/testify/assert"
)
func TestBucket_getSecretsFull(t *testing.T) {
secrets, existing := getSecretsMap(&desired)
assert.Equal(t, desired.Spec.ServiceAccountJSON, secrets["serviceaccountjson"])
assert.Equal(t, desired.Spec.ExistingServiceAccountJSON, existing["serviceaccountjson"])
}
func TestBucket_getSecretsEmpty(t *testing.T) {
secrets, existing := getSecretsMap(&desiredWithoutSecret)
assert.Equal(t, &secret.Secret{}, secrets["serviceaccountjson"])
assert.Equal(t, &secret.Existing{}, existing["serviceaccountjson"])
}
func TestBucket_getSecretsNil(t *testing.T) {
secrets, existing := getSecretsMap(&desiredNil)
assert.Equal(t, &secret.Secret{}, secrets["serviceaccountjson"])
assert.Equal(t, &secret.Existing{}, existing["serviceaccountjson"])
}

View File

@@ -1,9 +0,0 @@
package core
import (
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/tree"
)
type BackupListFunc func(monitor mntr.Monitor, k8sClient kubernetes.ClientInt, name string, desired *tree.Tree) ([]string, error)

View File

@@ -1,296 +0,0 @@
package s3
import (
"fmt"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/secret"
"github.com/caos/orbos/pkg/labels"
secretpkg "github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/secret/read"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator"
"github.com/caos/zitadel/operator/common"
"github.com/caos/zitadel/operator/database/kinds/backups/s3/backup"
"github.com/caos/zitadel/operator/database/kinds/backups/s3/restore"
corev1 "k8s.io/api/core/v1"
)
const (
accessKeyIDName = "backup-accessaccountkey"
accessKeyIDKey = "accessaccountkey"
secretAccessKeyName = "backup-secretaccesskey"
secretAccessKeyKey = "secretaccesskey"
sessionTokenName = "backup-sessiontoken"
sessionTokenKey = "sessiontoken"
)
func AdaptFunc(
name string,
namespace string,
componentLabels *labels.Component,
checkDBReady operator.EnsureFunc,
timestamp string,
nodeselector map[string]string,
tolerations []corev1.Toleration,
version string,
dbURL string,
dbPort int32,
features []string,
customImageRegistry string,
) operator.AdaptFunc {
return func(
monitor mntr.Monitor,
desired *tree.Tree,
current *tree.Tree,
) (
operator.QueryFunc,
operator.DestroyFunc,
operator.ConfigureFunc,
map[string]*secretpkg.Secret,
map[string]*secretpkg.Existing,
bool,
error,
) {
internalMonitor := monitor.WithField("component", "backup")
desiredKind, err := ParseDesiredV0(desired)
if err != nil {
return nil, nil, nil, nil, nil, false, fmt.Errorf("parsing desired state failed: %s", err)
}
desired.Parsed = desiredKind
secrets, existing := getSecretsMap(desiredKind)
if !monitor.IsVerbose() && desiredKind.Spec.Verbose {
internalMonitor.Verbose()
}
destroySAKI, err := secret.AdaptFuncToDestroy(namespace, accessKeyIDName)
if err != nil {
return nil, nil, nil, nil, nil, false, err
}
destroySSAK, err := secret.AdaptFuncToDestroy(namespace, secretAccessKeyName)
if err != nil {
return nil, nil, nil, nil, nil, false, err
}
destroySSTK, err := secret.AdaptFuncToDestroy(namespace, sessionTokenName)
if err != nil {
return nil, nil, nil, nil, nil, false, err
}
image := common.BackupImage.Reference(customImageRegistry, version)
_, destroyB, err := backup.AdaptFunc(
internalMonitor,
name,
namespace,
componentLabels,
checkDBReady,
desiredKind.Spec.Bucket,
desiredKind.Spec.Cron,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
desiredKind.Spec.Region,
desiredKind.Spec.Endpoint,
timestamp,
nodeselector,
tolerations,
dbURL,
dbPort,
features,
image,
)
if err != nil {
return nil, nil, nil, nil, nil, false, err
}
_, destroyR, err := restore.AdaptFunc(
monitor,
name,
namespace,
componentLabels,
desiredKind.Spec.Bucket,
timestamp,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
desiredKind.Spec.Region,
desiredKind.Spec.Endpoint,
nodeselector,
tolerations,
checkDBReady,
dbURL,
dbPort,
image,
)
if err != nil {
return nil, nil, nil, nil, nil, false, err
}
destroyers := make([]operator.DestroyFunc, 0)
for _, feature := range features {
switch feature {
case backup.Normal, backup.Instant:
destroyers = append(destroyers,
operator.ResourceDestroyToZitadelDestroy(destroySSAK),
operator.ResourceDestroyToZitadelDestroy(destroySAKI),
operator.ResourceDestroyToZitadelDestroy(destroySSTK),
destroyB,
)
case restore.Instant:
destroyers = append(destroyers,
destroyR,
)
}
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
if err := desiredKind.validateSecrets(); err != nil {
return nil, err
}
valueAKI, err := read.GetSecretValue(k8sClient, desiredKind.Spec.AccessKeyID, desiredKind.Spec.ExistingAccessKeyID)
if err != nil {
return nil, err
}
querySAKI, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, accessKeyIDName), map[string]string{accessKeyIDKey: valueAKI})
if err != nil {
return nil, err
}
valueSAK, err := read.GetSecretValue(k8sClient, desiredKind.Spec.SecretAccessKey, desiredKind.Spec.ExistingSecretAccessKey)
if err != nil {
return nil, err
}
querySSAK, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, secretAccessKeyName), map[string]string{secretAccessKeyKey: valueSAK})
if err != nil {
return nil, err
}
valueST, err := read.GetSecretValue(k8sClient, desiredKind.Spec.SessionToken, desiredKind.Spec.ExistingSessionToken)
if err != nil {
return nil, err
}
querySST, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, sessionTokenName), map[string]string{sessionTokenKey: valueST})
if err != nil {
return nil, err
}
queryB, _, err := backup.AdaptFunc(
internalMonitor,
name,
namespace,
componentLabels,
checkDBReady,
desiredKind.Spec.Bucket,
desiredKind.Spec.Cron,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
desiredKind.Spec.Region,
desiredKind.Spec.Endpoint,
timestamp,
nodeselector,
tolerations,
dbURL,
dbPort,
features,
image,
)
if err != nil {
return nil, err
}
queryR, _, err := restore.AdaptFunc(
monitor,
name,
namespace,
componentLabels,
desiredKind.Spec.Bucket,
timestamp,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
desiredKind.Spec.Region,
desiredKind.Spec.Endpoint,
nodeselector,
tolerations,
checkDBReady,
dbURL,
dbPort,
image,
)
if err != nil {
return nil, err
}
queriers := make([]operator.QueryFunc, 0)
cleanupQueries := make([]operator.QueryFunc, 0)
for _, feature := range features {
switch feature {
case backup.Normal:
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(querySAKI),
operator.ResourceQueryToZitadelQuery(querySSAK),
operator.ResourceQueryToZitadelQuery(querySST),
queryB,
)
case backup.Instant:
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(querySAKI),
operator.ResourceQueryToZitadelQuery(querySSAK),
operator.ResourceQueryToZitadelQuery(querySST),
queryB,
)
cleanupQueries = append(cleanupQueries,
operator.EnsureFuncToQueryFunc(backup.GetCleanupFunc(monitor, namespace, name)),
)
case restore.Instant:
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(querySAKI),
operator.ResourceQueryToZitadelQuery(querySSAK),
operator.ResourceQueryToZitadelQuery(querySST),
queryR,
)
cleanupQueries = append(cleanupQueries,
operator.EnsureFuncToQueryFunc(restore.GetCleanupFunc(monitor, namespace, name)),
)
}
}
for _, cleanup := range cleanupQueries {
queriers = append(queriers, cleanup)
}
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
},
operator.DestroyersToDestroyFunc(internalMonitor, destroyers),
func(kubernetes.ClientInt, map[string]interface{}, bool) error { return nil },
secrets,
existing,
false,
nil
}
}

View File

@@ -1,500 +0,0 @@
package s3
import (
"testing"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/backup"
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/restore"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
)
func TestBucket_Secrets(t *testing.T) {
masterkey := "testMk"
features := []string{backup.Normal}
region := "testRegion"
endpoint := "testEndpoint"
akid := "testAKID"
sak := "testSAK"
st := "testST"
bucketName := "testBucket2"
cron := "testCron2"
monitor := mntr.Monitor{}
namespace := "testNs2"
dbURL := "testDB"
dbPort := int32(80)
kindVersion := "v0"
kind := "BucketBackup"
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "BucketBackup", kindVersion), "testComponent")
timestamp := "test2"
nodeselector := map[string]string{"test2": "test2"}
tolerations := []corev1.Toleration{
{Key: "testKey2", Operator: "testOp2"}}
backupName := "testName2"
version := "testVersion2"
desired := getDesiredTree(t, masterkey, &DesiredV0{
Common: tree.NewCommon("databases.caos.ch/"+kind, kindVersion, false),
Spec: &Spec{
Verbose: true,
Cron: cron,
Bucket: bucketName,
Endpoint: endpoint,
Region: region,
AccessKeyID: &secret.Secret{
Value: akid,
},
SecretAccessKey: &secret.Secret{
Value: sak,
},
SessionToken: &secret.Secret{
Value: st,
},
},
})
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
allSecrets := map[string]string{
"accesskeyid": "testAKID",
"secretaccesskey": "testSAK",
"sessiontoken": "testST",
}
_, _, _, secrets, existing, _, err := AdaptFunc(
backupName,
namespace,
componentLabels,
checkDBReady,
timestamp,
nodeselector,
tolerations,
version,
dbURL,
dbPort,
features,
"",
)(
monitor,
desired,
&tree.Tree{},
)
assert.NoError(t, err)
for key, value := range allSecrets {
assert.Contains(t, secrets, key)
assert.Contains(t, existing, key)
assert.Equal(t, value, secrets[key].Value)
}
}
func TestBucket_AdaptBackup(t *testing.T) {
masterkey := "testMk"
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
features := []string{backup.Normal}
region := "testRegion"
endpoint := "testEndpoint"
akid := "testAKID"
sak := "testSAK"
st := "testST"
dbURL := "testDB"
dbPort := int32(80)
bucketName := "testBucket2"
cron := "testCron2"
monitor := mntr.Monitor{}
namespace := "testNs2"
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "BucketBackup", "v0"), "testComponent")
k8sLabelsAKID := map[string]string{
"app.kubernetes.io/component": "testComponent",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": accessKeyIDName,
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "BucketBackup",
}
k8sLabelsSAK := map[string]string{
"app.kubernetes.io/component": "testComponent",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": secretAccessKeyName,
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "BucketBackup",
}
k8sLabelsST := map[string]string{
"app.kubernetes.io/component": "testComponent",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": sessionTokenName,
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "BucketBackup",
}
timestamp := "test2"
nodeselector := map[string]string{"test2": "test2"}
tolerations := []corev1.Toleration{
{Key: "testKey2", Operator: "testOp2"}}
backupName := "testName2"
version := "testVersion2"
desired := getDesiredTree(t, masterkey, &DesiredV0{
Common: tree.NewCommon("databases.caos.ch/BucketBackup", "v0", false),
Spec: &Spec{
Verbose: true,
Cron: cron,
Bucket: bucketName,
Endpoint: endpoint,
Region: region,
AccessKeyID: &secret.Secret{
Value: akid,
},
SecretAccessKey: &secret.Secret{
Value: sak,
},
SessionToken: &secret.Secret{
Value: st,
},
},
})
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
SetBackup(client, namespace, k8sLabelsAKID, k8sLabelsSAK, k8sLabelsST, akid, sak, st)
query, _, _, _, _, _, err := AdaptFunc(
backupName,
namespace,
componentLabels,
checkDBReady,
timestamp,
nodeselector,
tolerations,
version,
dbURL,
dbPort,
features,
"",
)(
monitor,
desired,
&tree.Tree{},
)
assert.NoError(t, err)
databases := []string{"test1", "test2"}
queried := SetQueriedForDatabases(databases, []string{})
ensure, err := query(client, queried)
assert.NoError(t, err)
assert.NotNil(t, ensure)
assert.NoError(t, ensure(client))
}
func TestBucket_AdaptInstantBackup(t *testing.T) {
masterkey := "testMk"
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
features := []string{backup.Instant}
bucketName := "testBucket1"
cron := "testCron"
monitor := mntr.Monitor{}
namespace := "testNs"
dbURL := "testDB"
dbPort := int32(80)
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "BucketBackup", "v0"), "testComponent")
k8sLabelsAKID := map[string]string{
"app.kubernetes.io/component": "testComponent",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": accessKeyIDName,
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "BucketBackup",
}
k8sLabelsSAK := map[string]string{
"app.kubernetes.io/component": "testComponent",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": secretAccessKeyName,
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "BucketBackup",
}
k8sLabelsST := map[string]string{
"app.kubernetes.io/component": "testComponent",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": sessionTokenName,
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "BucketBackup",
}
timestamp := "test"
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{
{Key: "testKey", Operator: "testOp"}}
backupName := "testName"
version := "testVersion"
region := "testRegion"
endpoint := "testEndpoint"
akid := "testAKID"
sak := "testSAK"
st := "testST"
desired := getDesiredTree(t, masterkey, &DesiredV0{
Common: tree.NewCommon("databases.caos.ch/BucketBackup", "v0", false),
Spec: &Spec{
Verbose: true,
Cron: cron,
Bucket: bucketName,
Endpoint: endpoint,
Region: region,
AccessKeyID: &secret.Secret{
Value: akid,
},
SecretAccessKey: &secret.Secret{
Value: sak,
},
SessionToken: &secret.Secret{
Value: st,
},
},
})
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
SetInstantBackup(client, namespace, backupName, k8sLabelsAKID, k8sLabelsSAK, k8sLabelsST, akid, sak, st)
query, _, _, _, _, _, err := AdaptFunc(
backupName,
namespace,
componentLabels,
checkDBReady,
timestamp,
nodeselector,
tolerations,
version,
dbURL,
dbPort,
features,
"",
)(
monitor,
desired,
&tree.Tree{},
)
assert.NoError(t, err)
databases := []string{"test1", "test2"}
queried := SetQueriedForDatabases(databases, []string{})
ensure, err := query(client, queried)
assert.NotNil(t, ensure)
assert.NoError(t, err)
assert.NoError(t, ensure(client))
}
func TestBucket_AdaptRestore(t *testing.T) {
masterkey := "testMk"
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
features := []string{restore.Instant}
bucketName := "testBucket1"
cron := "testCron"
monitor := mntr.Monitor{}
namespace := "testNs"
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "BucketBackup", "v0"), "testComponent")
k8sLabelsAKID := map[string]string{
"app.kubernetes.io/component": "testComponent",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": accessKeyIDName,
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "BucketBackup",
}
k8sLabelsSAK := map[string]string{
"app.kubernetes.io/component": "testComponent",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": secretAccessKeyName,
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "BucketBackup",
}
k8sLabelsST := map[string]string{
"app.kubernetes.io/component": "testComponent",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": sessionTokenName,
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "BucketBackup",
}
timestamp := "test"
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{
{Key: "testKey", Operator: "testOp"}}
backupName := "testName"
version := "testVersion"
region := "testRegion"
endpoint := "testEndpoint"
akid := "testAKID"
sak := "testSAK"
st := "testST"
dbURL := "testDB"
dbPort := int32(80)
desired := getDesiredTree(t, masterkey, &DesiredV0{
Common: tree.NewCommon("databases.caos.ch/BucketBackup", "v0", false),
Spec: &Spec{
Verbose: true,
Cron: cron,
Bucket: bucketName,
Endpoint: endpoint,
Region: region,
AccessKeyID: &secret.Secret{
Value: akid,
},
SecretAccessKey: &secret.Secret{
Value: sak,
},
SessionToken: &secret.Secret{
Value: st,
},
},
})
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
SetRestore(client, namespace, backupName, k8sLabelsAKID, k8sLabelsSAK, k8sLabelsST, akid, sak, st)
query, _, _, _, _, _, err := AdaptFunc(
backupName,
namespace,
componentLabels,
checkDBReady,
timestamp,
nodeselector,
tolerations,
version,
dbURL,
dbPort,
features,
"",
)(
monitor,
desired,
&tree.Tree{},
)
assert.NoError(t, err)
databases := []string{"test1", "test2"}
queried := SetQueriedForDatabases(databases, []string{})
ensure, err := query(client, queried)
assert.NotNil(t, ensure)
assert.NoError(t, err)
assert.NoError(t, ensure(client))
}
/*
func TestBucket_AdaptClean(t *testing.T) {
masterkey := "testMk"
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
features := []string{clean.Instant}
bucketName := "testBucket1"
cron := "testCron"
monitor := mntr.Monitor{}
namespace := "testNs"
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "BucketBackup", "v0"), "testComponent")
k8sLabels := map[string]string{
"app.kubernetes.io/component": "testComponent",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": "backup-serviceaccountjson",
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "BucketBackup",
}
timestamp := "test"
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{
{Key: "testKey", Operator: "testOp"}}
backupName := "testName"
version := "testVersion"
saJson := "testSA"
dbURL := "testDB"
dbPort := int32(80)
desired := getDesiredTree(t, masterkey, &DesiredV0{
Common: &tree.Common{
Kind: "databases.caos.ch/BucketBackup",
Version: "v0",
},
Spec: &Spec{
Verbose: true,
Cron: cron,
Bucket: bucketName,
ServiceAccountJSON: &secret.Secret{
Value: saJson,
},
},
})
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
SetClean(client, namespace, backupName, k8sLabels, saJson)
query, _, _, _, _, _, err := AdaptFunc(
backupName,
namespace,
componentLabels,
checkDBReady,
timestamp,
nodeselector,
tolerations,
version,
dbURL,
dbPort,
features,
)(
monitor,
desired,
&tree.Tree{},
)
assert.NoError(t, err)
databases := []string{"test1", "test2"}
users := []string{"test1", "test2"}
queried := SetQueriedForDatabases(databases, users)
ensure, err := query(client, queried)
assert.NotNil(t, ensure)
assert.NoError(t, err)
assert.NoError(t, ensure(client))
}*/

View File

@@ -1,154 +0,0 @@
package backup
import (
"time"
"github.com/caos/zitadel/operator"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/cronjob"
"github.com/caos/orbos/pkg/kubernetes/resources/job"
"github.com/caos/orbos/pkg/labels"
corev1 "k8s.io/api/core/v1"
)
const (
defaultMode int32 = 256
certPath = "/cockroach/cockroach-certs"
accessKeyIDPath = "/secrets/accessaccountkey"
secretAccessKeyPath = "/secrets/secretaccesskey"
sessionTokenPath = "/secrets/sessiontoken"
backupNameEnv = "BACKUP_NAME"
cronJobNamePrefix = "backup-"
internalSecretName = "client-certs"
rootSecretName = "cockroachdb.client.root"
timeout = 15 * time.Minute
Normal = "backup"
Instant = "instantbackup"
)
func AdaptFunc(
monitor mntr.Monitor,
backupName string,
namespace string,
componentLabels *labels.Component,
checkDBReady operator.EnsureFunc,
bucketName string,
cron string,
accessKeyIDName string,
accessKeyIDKey string,
secretAccessKeyName string,
secretAccessKeyKey string,
sessionTokenName string,
sessionTokenKey string,
region string,
endpoint string,
timestamp string,
nodeselector map[string]string,
tolerations []corev1.Toleration,
dbURL string,
dbPort int32,
features []string,
image string,
) (
queryFunc operator.QueryFunc,
destroyFunc operator.DestroyFunc,
err error,
) {
command := getBackupCommand(
timestamp,
bucketName,
backupName,
certPath,
accessKeyIDPath,
secretAccessKeyPath,
sessionTokenPath,
region,
endpoint,
dbURL,
dbPort,
)
jobSpecDef := getJobSpecDef(
nodeselector,
tolerations,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
backupName,
image,
command,
)
destroyers := []operator.DestroyFunc{}
queriers := []operator.QueryFunc{}
cronJobDef := getCronJob(
namespace,
labels.MustForName(componentLabels, GetJobName(backupName)),
cron,
jobSpecDef,
)
destroyCJ, err := cronjob.AdaptFuncToDestroy(cronJobDef.Namespace, cronJobDef.Name)
if err != nil {
return nil, nil, err
}
queryCJ, err := cronjob.AdaptFuncToEnsure(cronJobDef)
if err != nil {
return nil, nil, err
}
jobDef := getJob(
namespace,
labels.MustForName(componentLabels, cronJobNamePrefix+backupName),
jobSpecDef,
)
destroyJ, err := job.AdaptFuncToDestroy(jobDef.Namespace, jobDef.Name)
if err != nil {
return nil, nil, err
}
queryJ, err := job.AdaptFuncToEnsure(jobDef)
if err != nil {
return nil, nil, err
}
for _, feature := range features {
switch feature {
case Normal:
destroyers = append(destroyers,
operator.ResourceDestroyToZitadelDestroy(destroyCJ),
)
queriers = append(queriers,
operator.EnsureFuncToQueryFunc(checkDBReady),
operator.ResourceQueryToZitadelQuery(queryCJ),
)
case Instant:
destroyers = append(destroyers,
operator.ResourceDestroyToZitadelDestroy(destroyJ),
)
queriers = append(queriers,
operator.EnsureFuncToQueryFunc(checkDBReady),
operator.ResourceQueryToZitadelQuery(queryJ),
)
}
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
return operator.QueriersToEnsureFunc(monitor, false, queriers, k8sClient, queried)
},
operator.DestroyersToDestroyFunc(monitor, destroyers),
nil
}
func GetJobName(backupName string) string {
return cronJobNamePrefix + backupName
}

View File

@@ -1,404 +0,0 @@
package backup
import (
"testing"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/caos/orbos/pkg/labels"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
macherrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
)
func TestBackup_AdaptInstantBackup1(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
features := []string{Instant}
monitor := mntr.Monitor{}
namespace := "testNs"
bucketName := "testBucket"
cron := "testCron"
timestamp := "test"
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{
{Key: "testKey", Operator: "testOp"}}
backupName := "testName"
version := "testVersion"
accessKeyIDName := "testAKIN"
accessKeyIDKey := "testAKIK"
secretAccessKeyName := "testSAKN"
secretAccessKeyKey := "testSAKK"
sessionTokenName := "testSTN"
sessionTokenKey := "testSTK"
region := "region"
endpoint := "endpoint"
dbURL := "testDB"
dbPort := int32(80)
jobName := GetJobName(backupName)
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd2", "testOp2", "testVersion2"), "testKind2", "testVersion2"), "testComponent")
nameLabels := labels.MustForName(componentLabels, jobName)
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
jobDef := getJob(
namespace,
nameLabels,
getJobSpecDef(
nodeselector,
tolerations,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
backupName,
version,
getBackupCommand(
timestamp,
bucketName,
backupName,
certPath,
accessKeyIDPath,
secretAccessKeyPath,
sessionTokenPath,
region,
endpoint,
dbURL,
dbPort,
),
),
)
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
query, _, err := AdaptFunc(
monitor,
backupName,
namespace,
componentLabels,
checkDBReady,
bucketName,
cron,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
region,
endpoint,
timestamp,
nodeselector,
tolerations,
dbURL,
dbPort,
features,
version,
)
assert.NoError(t, err)
queried := map[string]interface{}{}
ensure, err := query(client, queried)
assert.NoError(t, err)
assert.NoError(t, ensure(client))
}
func TestBackup_AdaptInstantBackup2(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
features := []string{Instant}
monitor := mntr.Monitor{}
namespace := "testNs2"
dbURL := "testDB"
dbPort := int32(80)
bucketName := "testBucket2"
cron := "testCron2"
timestamp := "test2"
nodeselector := map[string]string{"test2": "test2"}
tolerations := []corev1.Toleration{
{Key: "testKey2", Operator: "testOp2"}}
backupName := "testName2"
version := "testVersion2"
accessKeyIDName := "testAKIN2"
accessKeyIDKey := "testAKIK2"
secretAccessKeyName := "testSAKN2"
secretAccessKeyKey := "testSAKK2"
sessionTokenName := "testSTN2"
sessionTokenKey := "testSTK2"
region := "region2"
endpoint := "endpoint2"
jobName := GetJobName(backupName)
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd2", "testOp2", "testVersion2"), "testKind2", "testVersion2"), "testComponent")
nameLabels := labels.MustForName(componentLabels, jobName)
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
jobDef := getJob(
namespace,
nameLabels,
getJobSpecDef(
nodeselector,
tolerations,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
backupName,
version,
getBackupCommand(
timestamp,
bucketName,
backupName,
certPath,
accessKeyIDPath,
secretAccessKeyPath,
sessionTokenPath,
region,
endpoint,
dbURL,
dbPort,
),
),
)
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
query, _, err := AdaptFunc(
monitor,
backupName,
namespace,
componentLabels,
checkDBReady,
bucketName,
cron,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
region,
endpoint,
timestamp,
nodeselector,
tolerations,
dbURL,
dbPort,
features,
version,
)
assert.NoError(t, err)
queried := map[string]interface{}{}
ensure, err := query(client, queried)
assert.NoError(t, err)
assert.NoError(t, ensure(client))
}
func TestBackup_AdaptBackup1(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
features := []string{Normal}
monitor := mntr.Monitor{}
namespace := "testNs"
bucketName := "testBucket"
cron := "testCron"
timestamp := "test"
dbURL := "testDB"
dbPort := int32(80)
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{
{Key: "testKey", Operator: "testOp"}}
backupName := "testName"
version := "testVersion"
accessKeyIDName := "testAKIN"
accessKeyIDKey := "testAKIK"
secretAccessKeyName := "testSAKN"
secretAccessKeyKey := "testSAKK"
sessionTokenName := "testSTN"
sessionTokenKey := "testSTK"
region := "region"
endpoint := "endpoint"
jobName := GetJobName(backupName)
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd2", "testOp2", "testVersion2"), "testKind2", "testVersion2"), "testComponent")
nameLabels := labels.MustForName(componentLabels, jobName)
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
jobDef := getCronJob(
namespace,
nameLabels,
cron,
getJobSpecDef(
nodeselector,
tolerations,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
backupName,
version,
getBackupCommand(
timestamp,
bucketName,
backupName,
certPath,
accessKeyIDPath,
secretAccessKeyPath,
sessionTokenPath,
region,
endpoint,
dbURL,
dbPort,
),
),
)
client.EXPECT().ApplyCronJob(jobDef).Times(1).Return(nil)
query, _, err := AdaptFunc(
monitor,
backupName,
namespace,
componentLabels,
checkDBReady,
bucketName,
cron,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
region,
endpoint,
timestamp,
nodeselector,
tolerations,
dbURL,
dbPort,
features,
version,
)
assert.NoError(t, err)
queried := map[string]interface{}{}
ensure, err := query(client, queried)
assert.NoError(t, err)
assert.NoError(t, ensure(client))
}
func TestBackup_AdaptBackup2(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
features := []string{Normal}
monitor := mntr.Monitor{}
namespace := "testNs2"
dbURL := "testDB"
dbPort := int32(80)
bucketName := "testBucket2"
cron := "testCron2"
timestamp := "test2"
nodeselector := map[string]string{"test2": "test2"}
tolerations := []corev1.Toleration{
{Key: "testKey2", Operator: "testOp2"}}
backupName := "testName2"
version := "testVersion2"
accessKeyIDName := "testAKIN2"
accessKeyIDKey := "testAKIK2"
secretAccessKeyName := "testSAKN2"
secretAccessKeyKey := "testSAKK2"
sessionTokenName := "testSTN2"
sessionTokenKey := "testSTK2"
region := "region2"
endpoint := "endpoint2"
jobName := GetJobName(backupName)
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd2", "testOp2", "testVersion2"), "testKind2", "testVersion2"), "testComponent")
nameLabels := labels.MustForName(componentLabels, jobName)
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
jobDef := getCronJob(
namespace,
nameLabels,
cron,
getJobSpecDef(
nodeselector,
tolerations,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
backupName,
version,
getBackupCommand(
timestamp,
bucketName,
backupName,
certPath,
accessKeyIDPath,
secretAccessKeyPath,
sessionTokenPath,
region,
endpoint,
dbURL,
dbPort,
),
),
)
client.EXPECT().ApplyCronJob(jobDef).Times(1).Return(nil)
query, _, err := AdaptFunc(
monitor,
backupName,
namespace,
componentLabels,
checkDBReady,
bucketName,
cron,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
region,
endpoint,
timestamp,
nodeselector,
tolerations,
dbURL,
dbPort,
features,
version,
)
assert.NoError(t, err)
queried := map[string]interface{}{}
ensure, err := query(client, queried)
assert.NoError(t, err)
assert.NoError(t, ensure(client))
}

View File

@@ -1,27 +0,0 @@
package backup
import (
"fmt"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/zitadel/operator"
)
func GetCleanupFunc(
monitor mntr.Monitor,
namespace string,
backupName string,
) operator.EnsureFunc {
return func(k8sClient kubernetes.ClientInt) error {
monitor.Info("waiting for backup to be completed")
if err := k8sClient.WaitUntilJobCompleted(namespace, GetJobName(backupName), timeout); err != nil {
return fmt.Errorf("error while waiting for backup to be completed: %w", err)
}
monitor.Info("backup is completed, cleanup")
if err := k8sClient.DeleteJob(namespace, GetJobName(backupName)); err != nil {
return fmt.Errorf("error while trying to cleanup backup: %w", err)
}
monitor.Info("cleanup backup is completed")
return nil
}
}

View File

@@ -1,41 +0,0 @@
package backup
import (
"fmt"
"testing"
"github.com/caos/orbos/mntr"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
)
func TestBackup_Cleanup1(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
monitor := mntr.Monitor{}
name := "test"
namespace := "testNs"
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
assert.NoError(t, cleanupFunc(client))
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(fmt.Errorf("fail"))
assert.Error(t, cleanupFunc(client))
}
func TestBackup_Cleanup2(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
monitor := mntr.Monitor{}
name := "test2"
namespace := "testNs2"
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
assert.NoError(t, cleanupFunc(client))
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(fmt.Errorf("fail"))
assert.Error(t, cleanupFunc(client))
}

View File

@@ -1,53 +0,0 @@
package backup
import (
"strconv"
"strings"
)
func getBackupCommand(
timestamp string,
bucketName string,
backupName string,
certsFolder string,
accessKeyIDPath string,
secretAccessKeyPath string,
sessionTokenPath string,
region string,
endpoint string,
dbURL string,
dbPort int32,
) string {
backupCommands := make([]string, 0)
if timestamp != "" {
backupCommands = append(backupCommands, "export "+backupNameEnv+"="+timestamp)
} else {
backupCommands = append(backupCommands, "export "+backupNameEnv+"=$(date +%Y-%m-%dT%H:%M:%SZ)")
}
parameters := []string{
"AWS_ACCESS_KEY_ID=$(cat " + accessKeyIDPath + ")",
"AWS_SECRET_ACCESS_KEY=$(cat " + secretAccessKeyPath + ")",
"AWS_SESSION_TOKEN=$(cat " + sessionTokenPath + ")",
"AWS_ENDPOINT=" + endpoint,
}
if region != "" {
parameters = append(parameters, "AWS_REGION="+region)
}
backupCommands = append(backupCommands,
strings.Join([]string{
"cockroach",
"sql",
"--certs-dir=" + certsFolder,
"--host=" + dbURL,
"--port=" + strconv.Itoa(int(dbPort)),
"-e",
"\"BACKUP TO \\\"s3://" + bucketName + "/" + backupName + "/${" + backupNameEnv + "}?" + strings.Join(parameters, "&") + "\\\";\"",
}, " ",
),
)
return strings.Join(backupCommands, " && ")
}

View File

@@ -1,58 +0,0 @@
package backup
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestBackup_Command1(t *testing.T) {
timestamp := ""
bucketName := "test"
backupName := "test"
dbURL := "testDB"
dbPort := int32(80)
region := "region"
endpoint := "endpoint"
cmd := getBackupCommand(
timestamp,
bucketName,
backupName,
certPath,
accessKeyIDPath,
secretAccessKeyPath,
sessionTokenPath,
region,
endpoint,
dbURL,
dbPort,
)
equals := "export " + backupNameEnv + "=$(date +%Y-%m-%dT%H:%M:%SZ) && cockroach sql --certs-dir=" + certPath + " --host=testDB --port=80 -e \"BACKUP TO \\\"s3://test/test/${BACKUP_NAME}?AWS_ACCESS_KEY_ID=$(cat " + accessKeyIDPath + ")&AWS_SECRET_ACCESS_KEY=$(cat " + secretAccessKeyPath + ")&AWS_SESSION_TOKEN=$(cat " + sessionTokenPath + ")&AWS_ENDPOINT=endpoint&AWS_REGION=region\\\";\""
assert.Equal(t, equals, cmd)
}
func TestBackup_Command2(t *testing.T) {
timestamp := "test"
bucketName := "test"
backupName := "test"
dbURL := "testDB"
dbPort := int32(80)
region := "region"
endpoint := "endpoint"
cmd := getBackupCommand(
timestamp,
bucketName,
backupName,
certPath,
accessKeyIDPath,
secretAccessKeyPath,
sessionTokenPath,
region,
endpoint,
dbURL,
dbPort,
)
equals := "export " + backupNameEnv + "=test && cockroach sql --certs-dir=" + certPath + " --host=testDB --port=80 -e \"BACKUP TO \\\"s3://test/test/${BACKUP_NAME}?AWS_ACCESS_KEY_ID=$(cat " + accessKeyIDPath + ")&AWS_SECRET_ACCESS_KEY=$(cat " + secretAccessKeyPath + ")&AWS_SESSION_TOKEN=$(cat " + sessionTokenPath + ")&AWS_ENDPOINT=endpoint&AWS_REGION=region\\\";\""
assert.Equal(t, equals, cmd)
}

View File

@@ -1,127 +0,0 @@
package backup
import (
"github.com/caos/orbos/pkg/labels"
"github.com/caos/zitadel/operator/helpers"
batchv1 "k8s.io/api/batch/v1"
"k8s.io/api/batch/v1beta1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func getCronJob(
namespace string,
nameLabels *labels.Name,
cron string,
jobSpecDef batchv1.JobSpec,
) *v1beta1.CronJob {
return &v1beta1.CronJob{
ObjectMeta: v1.ObjectMeta{
Name: nameLabels.Name(),
Namespace: namespace,
Labels: labels.MustK8sMap(nameLabels),
},
Spec: v1beta1.CronJobSpec{
Schedule: cron,
ConcurrencyPolicy: v1beta1.ForbidConcurrent,
JobTemplate: v1beta1.JobTemplateSpec{
Spec: jobSpecDef,
},
},
}
}
func getJob(
namespace string,
nameLabels *labels.Name,
jobSpecDef batchv1.JobSpec,
) *batchv1.Job {
return &batchv1.Job{
ObjectMeta: v1.ObjectMeta{
Name: nameLabels.Name(),
Namespace: namespace,
Labels: labels.MustK8sMap(nameLabels),
},
Spec: jobSpecDef,
}
}
func getJobSpecDef(
nodeselector map[string]string,
tolerations []corev1.Toleration,
accessKeyIDName string,
accessKeyIDKey string,
secretAccessKeyName string,
secretAccessKeyKey string,
sessionTokenName string,
sessionTokenKey string,
backupName string,
image string,
command string,
) batchv1.JobSpec {
return batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
NodeSelector: nodeselector,
Tolerations: tolerations,
Containers: []corev1.Container{{
Name: backupName,
Image: image,
Command: []string{
"/bin/bash",
"-c",
command,
},
VolumeMounts: []corev1.VolumeMount{{
Name: internalSecretName,
MountPath: certPath,
}, {
Name: accessKeyIDKey,
SubPath: accessKeyIDKey,
MountPath: accessKeyIDPath,
}, {
Name: secretAccessKeyKey,
SubPath: secretAccessKeyKey,
MountPath: secretAccessKeyPath,
}, {
Name: sessionTokenKey,
SubPath: sessionTokenKey,
MountPath: sessionTokenPath,
}},
ImagePullPolicy: corev1.PullIfNotPresent,
}},
Volumes: []corev1.Volume{{
Name: internalSecretName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: rootSecretName,
DefaultMode: helpers.PointerInt32(defaultMode),
},
},
}, {
Name: accessKeyIDKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: accessKeyIDName,
},
},
}, {
Name: secretAccessKeyKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretAccessKeyName,
},
},
}, {
Name: sessionTokenKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: sessionTokenName,
},
},
}},
},
},
}
}

View File

@@ -1,199 +0,0 @@
package backup
import (
"github.com/caos/zitadel/operator/common"
"github.com/caos/zitadel/operator/helpers"
"github.com/stretchr/testify/assert"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
"testing"
)
func TestBackup_JobSpec1(t *testing.T) {
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{
{Key: "testKey", Operator: "testOp"}}
backupName := "testName"
version := "testVersion"
command := "test"
accessKeyIDName := "testAKIN"
accessKeyIDKey := "testAKIK"
secretAccessKeyName := "testSAKN"
secretAccessKeyKey := "testSAKK"
sessionTokenName := "testSTN"
sessionTokenKey := "testSTK"
equals := batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
NodeSelector: nodeselector,
Tolerations: tolerations,
Containers: []corev1.Container{{
Name: backupName,
Image: common.BackupImage.Reference("", version),
Command: []string{
"/bin/bash",
"-c",
command,
},
VolumeMounts: []corev1.VolumeMount{{
Name: internalSecretName,
MountPath: certPath,
}, {
Name: accessKeyIDKey,
SubPath: accessKeyIDKey,
MountPath: accessKeyIDPath,
}, {
Name: secretAccessKeyKey,
SubPath: secretAccessKeyKey,
MountPath: secretAccessKeyPath,
}, {
Name: sessionTokenKey,
SubPath: sessionTokenKey,
MountPath: sessionTokenPath,
}},
ImagePullPolicy: corev1.PullIfNotPresent,
}},
Volumes: []corev1.Volume{{
Name: internalSecretName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: rootSecretName,
DefaultMode: helpers.PointerInt32(defaultMode),
},
},
}, {
Name: accessKeyIDKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: accessKeyIDName,
},
},
}, {
Name: secretAccessKeyKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretAccessKeyName,
},
},
}, {
Name: sessionTokenKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: sessionTokenName,
},
},
}},
},
},
}
assert.Equal(t, equals, getJobSpecDef(
nodeselector,
tolerations,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
backupName,
common.BackupImage.Reference("", version),
command))
}
func TestBackup_JobSpec2(t *testing.T) {
nodeselector := map[string]string{"test2": "test2"}
tolerations := []corev1.Toleration{
{Key: "testKey2", Operator: "testOp2"}}
backupName := "testName2"
version := "testVersion2"
command := "test2"
accessKeyIDName := "testAKIN2"
accessKeyIDKey := "testAKIK2"
secretAccessKeyName := "testSAKN2"
secretAccessKeyKey := "testSAKK2"
sessionTokenName := "testSTN2"
sessionTokenKey := "testSTK2"
equals := batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
NodeSelector: nodeselector,
Tolerations: tolerations,
Containers: []corev1.Container{{
Name: backupName,
Image: common.BackupImage.Reference("", version),
Command: []string{
"/bin/bash",
"-c",
command,
},
VolumeMounts: []corev1.VolumeMount{{
Name: internalSecretName,
MountPath: certPath,
}, {
Name: accessKeyIDKey,
SubPath: accessKeyIDKey,
MountPath: accessKeyIDPath,
}, {
Name: secretAccessKeyKey,
SubPath: secretAccessKeyKey,
MountPath: secretAccessKeyPath,
}, {
Name: sessionTokenKey,
SubPath: sessionTokenKey,
MountPath: sessionTokenPath,
}},
ImagePullPolicy: corev1.PullIfNotPresent,
}},
Volumes: []corev1.Volume{{
Name: internalSecretName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: rootSecretName,
DefaultMode: helpers.PointerInt32(defaultMode),
},
},
}, {
Name: accessKeyIDKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: accessKeyIDName,
},
},
}, {
Name: secretAccessKeyKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretAccessKeyName,
},
},
}, {
Name: sessionTokenKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: sessionTokenName,
},
},
}},
},
},
}
assert.Equal(t, equals, getJobSpecDef(
nodeselector,
tolerations,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
backupName,
common.BackupImage.Reference("", version),
command,
))
}

View File

@@ -1,65 +0,0 @@
package s3
import (
"fmt"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
)
type DesiredV0 struct {
Common *tree.Common `yaml:",inline"`
Spec *Spec
}
type Spec struct {
Verbose bool
Cron string `yaml:"cron,omitempty"`
Bucket string `yaml:"bucket,omitempty"`
Endpoint string `yaml:"endpoint,omitempty"`
Region string `yaml:"region,omitempty"`
AccessKeyID *secret.Secret `yaml:"accessKeyID,omitempty"`
ExistingAccessKeyID *secret.Existing `yaml:"existingAccessKeyID,omitempty"`
SecretAccessKey *secret.Secret `yaml:"secretAccessKey,omitempty"`
ExistingSecretAccessKey *secret.Existing `yaml:"existingSecretAccessKey,omitempty"`
SessionToken *secret.Secret `yaml:"sessionToken,omitempty"`
ExistingSessionToken *secret.Existing `yaml:"existingSessionToken,omitempty"`
}
func (s *Spec) IsZero() bool {
if ((s.AccessKeyID == nil || s.AccessKeyID.IsZero()) && (s.ExistingAccessKeyID == nil || s.ExistingAccessKeyID.IsZero())) &&
((s.SecretAccessKey == nil || s.SecretAccessKey.IsZero()) && (s.ExistingSecretAccessKey == nil || s.ExistingSecretAccessKey.IsZero())) &&
((s.SessionToken == nil || s.SessionToken.IsZero()) && (s.ExistingSessionToken == nil || s.ExistingSessionToken.IsZero())) &&
!s.Verbose &&
s.Bucket == "" &&
s.Cron == "" &&
s.Endpoint == "" &&
s.Region == "" {
return true
}
return false
}
func ParseDesiredV0(desiredTree *tree.Tree) (*DesiredV0, error) {
desiredKind := &DesiredV0{
Common: desiredTree.Common,
Spec: &Spec{},
}
if err := desiredTree.Original.Decode(desiredKind); err != nil {
return nil, fmt.Errorf("parsing desired state failed: %s", err)
}
return desiredKind, nil
}
func (d *DesiredV0) validateSecrets() error {
if err := secret.ValidateSecret(d.Spec.AccessKeyID, d.Spec.ExistingAccessKeyID); err != nil {
return fmt.Errorf("validating access key id failed: %w", err)
}
if err := secret.ValidateSecret(d.Spec.SecretAccessKey, d.Spec.ExistingSecretAccessKey); err != nil {
return fmt.Errorf("validating secret access key failed: %w", err)
}
return nil
}

View File

@@ -1,155 +0,0 @@
package s3
import (
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v3"
"testing"
)
const (
masterkey = "testMk"
cron = "testCron"
bucketName = "testBucket"
region = "testRegion"
endpoint = "testEndpoint"
akid = "testAKID"
sak = "testSAK"
st = "testST"
yamlFile = `kind: databases.caos.ch/BucketBackup
version: v0
spec:
verbose: true
cron: testCron
bucket: testBucket
region: testRegion
endpoint: testEndpoint
accessKeyID:
encryption: AES256
encoding: Base64
value: l7GEXvmCT8hBXereT4FIG4j5vKQIycjS
secretAccessKey:
encryption: AES256
encoding: Base64
value: NWYnOpFpME-9FESqWi0bFQ3M6e0iNQw=
sessionToken:
encryption: AES256
encoding: Base64
value: xVY9pEXuh0Wbf2P2X_yThXwqRX08sA==
`
yamlFileWithoutSecret = `kind: databases.caos.ch/BucketBackup
version: v0
spec:
verbose: true
cron: testCron
bucket: testBucket
endpoint: testEndpoint
region: testRegion
`
yamlEmpty = `kind: databases.caos.ch/BucketBackup
version: v0`
)
var (
desired = DesiredV0{
Common: tree.NewCommon("databases.caos.ch/BucketBackup", "v0", false),
Spec: &Spec{
Verbose: true,
Cron: cron,
Bucket: bucketName,
Endpoint: endpoint,
Region: region,
AccessKeyID: &secret.Secret{
Value: akid,
Encryption: "AES256",
Encoding: "Base64",
},
SecretAccessKey: &secret.Secret{
Value: sak,
Encryption: "AES256",
Encoding: "Base64",
},
SessionToken: &secret.Secret{
Value: st,
Encryption: "AES256",
Encoding: "Base64",
},
},
}
desiredWithoutSecret = DesiredV0{
Common: tree.NewCommon("databases.caos.ch/BucketBackup", "v0", false),
Spec: &Spec{
Verbose: true,
Cron: cron,
Bucket: bucketName,
Region: region,
Endpoint: endpoint,
},
}
desiredEmpty = DesiredV0{
Common: tree.NewCommon("databases.caos.ch/BucketBackup", "v0", false),
Spec: &Spec{
Verbose: false,
Cron: "",
Bucket: "",
Endpoint: "",
Region: "",
AccessKeyID: &secret.Secret{
Value: "",
},
SecretAccessKey: &secret.Secret{
Value: "",
},
SessionToken: &secret.Secret{
Value: "",
},
},
}
desiredNil = DesiredV0{
Common: tree.NewCommon("databases.caos.ch/BucketBackup", "v0", false),
}
)
func marshalYaml(t *testing.T, masterkey string, struc *DesiredV0) []byte {
secret.Masterkey = masterkey
data, err := yaml.Marshal(struc)
assert.NoError(t, err)
return data
}
func unmarshalYaml(t *testing.T, masterkey string, yamlFile []byte) *tree.Tree {
secret.Masterkey = masterkey
desiredTree := &tree.Tree{}
assert.NoError(t, yaml.Unmarshal(yamlFile, desiredTree))
return desiredTree
}
func getDesiredTree(t *testing.T, masterkey string, desired *DesiredV0) *tree.Tree {
return unmarshalYaml(t, masterkey, marshalYaml(t, masterkey, desired))
}
func TestBucket_DesiredParse(t *testing.T) {
assert.Equal(t, yamlFileWithoutSecret, string(marshalYaml(t, masterkey, &desiredWithoutSecret)))
desiredTree := unmarshalYaml(t, masterkey, []byte(yamlFile))
desiredKind, err := ParseDesiredV0(desiredTree)
assert.NoError(t, err)
assert.Equal(t, &desired, desiredKind)
}
func TestBucket_DesiredNotZero(t *testing.T) {
desiredTree := unmarshalYaml(t, masterkey, []byte(yamlFile))
desiredKind, err := ParseDesiredV0(desiredTree)
assert.NoError(t, err)
assert.False(t, desiredKind.Spec.IsZero())
}
func TestBucket_DesiredZero(t *testing.T) {
desiredTree := unmarshalYaml(t, masterkey, []byte(yamlEmpty))
desiredKind, err := ParseDesiredV0(desiredTree)
assert.NoError(t, err)
assert.True(t, desiredKind.Spec.IsZero())
}

View File

@@ -1,105 +0,0 @@
package s3
import (
"context"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/secret/read"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/database/kinds/backups/core"
"strings"
)
func BackupList() core.BackupListFunc {
return func(monitor mntr.Monitor, k8sClient kubernetes.ClientInt, name string, desired *tree.Tree) ([]string, error) {
desiredKind, err := ParseDesiredV0(desired)
if err != nil {
return nil, fmt.Errorf("parsing desired state failed: %s", err)
}
desired.Parsed = desiredKind
if !monitor.IsVerbose() && desiredKind.Spec.Verbose {
monitor.Verbose()
}
valueAKI, err := read.GetSecretValue(k8sClient, desiredKind.Spec.AccessKeyID, desiredKind.Spec.ExistingAccessKeyID)
if err != nil {
return nil, err
}
valueSAK, err := read.GetSecretValue(k8sClient, desiredKind.Spec.SecretAccessKey, desiredKind.Spec.ExistingSecretAccessKey)
if err != nil {
return nil, err
}
valueST, err := read.GetSecretValue(k8sClient, desiredKind.Spec.SessionToken, desiredKind.Spec.ExistingSessionToken)
if err != nil {
return nil, err
}
return listFilesWithFilter(valueAKI, valueSAK, valueST, desiredKind.Spec.Region, desiredKind.Spec.Endpoint, desiredKind.Spec.Bucket, name)
}
}
func listFilesWithFilter(akid, secretkey, sessionToken string, region string, endpoint string, bucketName, name string) ([]string, error) {
customResolver := aws.EndpointResolverFunc(func(service, region string) (aws.Endpoint, error) {
return aws.Endpoint{
URL: "https://" + endpoint,
SigningRegion: region,
}, nil
})
cfg, err := config.LoadDefaultConfig(
context.Background(),
config.WithRegion(region),
config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(akid, secretkey, sessionToken)),
config.WithEndpointResolver(customResolver),
)
if err != nil {
return nil, err
}
client := s3.NewFromConfig(cfg)
prefix := name + "/"
params := &s3.ListObjectsV2Input{
Bucket: aws.String(bucketName),
Prefix: aws.String(prefix),
}
paginator := s3.NewListObjectsV2Paginator(client, params, func(o *s3.ListObjectsV2PaginatorOptions) {
o.Limit = 10
})
names := make([]string, 0)
for paginator.HasMorePages() {
output, err := paginator.NextPage(context.Background())
if err != nil {
return nil, err
}
for _, value := range output.Contents {
if strings.HasPrefix(*value.Key, prefix) {
parts := strings.Split(*value.Key, "/")
if len(parts) < 2 {
continue
}
found := false
for _, name := range names {
if name == parts[1] {
found = true
}
}
if !found {
names = append(names, parts[1])
}
names = append(names)
}
}
}
return names, nil
}

View File

@@ -1,149 +0,0 @@
package s3
import (
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/backup"
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/restore"
"github.com/caos/zitadel/operator/database/kinds/databases/core"
"github.com/golang/mock/gomock"
corev1 "k8s.io/api/core/v1"
macherrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
func SetQueriedForDatabases(databases, users []string) map[string]interface{} {
queried := map[string]interface{}{}
core.SetQueriedForDatabaseDBList(queried, databases, users)
return queried
}
func SetInstantBackup(
k8sClient *kubernetesmock.MockClientInt,
namespace string,
backupName string,
labelsAKID map[string]string,
labelsSAK map[string]string,
labelsST map[string]string,
akid, sak, st string,
) {
k8sClient.EXPECT().ApplySecret(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: accessKeyIDName,
Namespace: namespace,
Labels: labelsAKID,
},
StringData: map[string]string{accessKeyIDKey: akid},
Type: "Opaque",
}).MinTimes(1).MaxTimes(1).Return(nil)
k8sClient.EXPECT().ApplySecret(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretAccessKeyName,
Namespace: namespace,
Labels: labelsSAK,
},
StringData: map[string]string{secretAccessKeyKey: sak},
Type: "Opaque",
}).MinTimes(1).MaxTimes(1).Return(nil)
k8sClient.EXPECT().ApplySecret(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: sessionTokenName,
Namespace: namespace,
Labels: labelsST,
},
StringData: map[string]string{sessionTokenKey: st},
Type: "Opaque",
}).MinTimes(1).MaxTimes(1).Return(nil)
k8sClient.EXPECT().ApplyJob(gomock.Any()).Times(1).Return(nil)
k8sClient.EXPECT().GetJob(namespace, backup.GetJobName(backupName)).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, backup.GetJobName(backupName)))
k8sClient.EXPECT().WaitUntilJobCompleted(namespace, backup.GetJobName(backupName), gomock.Any()).Times(1).Return(nil)
k8sClient.EXPECT().DeleteJob(namespace, backup.GetJobName(backupName)).Times(1).Return(nil)
}
func SetBackup(
k8sClient *kubernetesmock.MockClientInt,
namespace string,
labelsAKID map[string]string,
labelsSAK map[string]string,
labelsST map[string]string,
akid, sak, st string,
) {
k8sClient.EXPECT().ApplySecret(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: accessKeyIDName,
Namespace: namespace,
Labels: labelsAKID,
},
StringData: map[string]string{accessKeyIDKey: akid},
Type: "Opaque",
}).MinTimes(1).MaxTimes(1).Return(nil)
k8sClient.EXPECT().ApplySecret(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretAccessKeyName,
Namespace: namespace,
Labels: labelsSAK,
},
StringData: map[string]string{secretAccessKeyKey: sak},
Type: "Opaque",
}).MinTimes(1).MaxTimes(1).Return(nil)
k8sClient.EXPECT().ApplySecret(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: sessionTokenName,
Namespace: namespace,
Labels: labelsST,
},
StringData: map[string]string{sessionTokenKey: st},
Type: "Opaque",
}).MinTimes(1).MaxTimes(1).Return(nil)
k8sClient.EXPECT().ApplyCronJob(gomock.Any()).Times(1).Return(nil)
}
func SetRestore(
k8sClient *kubernetesmock.MockClientInt,
namespace string,
backupName string,
labelsAKID map[string]string,
labelsSAK map[string]string,
labelsST map[string]string,
akid, sak, st string,
) {
k8sClient.EXPECT().ApplySecret(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: accessKeyIDName,
Namespace: namespace,
Labels: labelsAKID,
},
StringData: map[string]string{accessKeyIDKey: akid},
Type: "Opaque",
}).MinTimes(1).MaxTimes(1).Return(nil)
k8sClient.EXPECT().ApplySecret(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretAccessKeyName,
Namespace: namespace,
Labels: labelsSAK,
},
StringData: map[string]string{secretAccessKeyKey: sak},
Type: "Opaque",
}).MinTimes(1).MaxTimes(1).Return(nil)
k8sClient.EXPECT().ApplySecret(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: sessionTokenName,
Namespace: namespace,
Labels: labelsST,
},
StringData: map[string]string{sessionTokenKey: st},
Type: "Opaque",
}).MinTimes(1).MaxTimes(1).Return(nil)
k8sClient.EXPECT().ApplyJob(gomock.Any()).Times(1).Return(nil)
k8sClient.EXPECT().GetJob(namespace, restore.GetJobName(backupName)).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, restore.GetJobName(backupName)))
k8sClient.EXPECT().WaitUntilJobCompleted(namespace, restore.GetJobName(backupName), gomock.Any()).Times(1).Return(nil)
k8sClient.EXPECT().DeleteJob(namespace, restore.GetJobName(backupName)).Times(1).Return(nil)
}

View File

@@ -1,115 +0,0 @@
package restore
import (
"time"
"github.com/caos/zitadel/operator"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/job"
"github.com/caos/orbos/pkg/labels"
corev1 "k8s.io/api/core/v1"
)
const (
Instant = "restore"
defaultMode = int32(256)
certPath = "/cockroach/cockroach-certs"
accessKeyIDPath = "/secrets/accessaccountkey"
secretAccessKeyPath = "/secrets/secretaccesskey"
sessionTokenPath = "/secrets/sessiontoken"
jobPrefix = "backup-"
jobSuffix = "-restore"
internalSecretName = "client-certs"
rootSecretName = "cockroachdb.client.root"
timeout = 15 * time.Minute
)
func AdaptFunc(
monitor mntr.Monitor,
backupName string,
namespace string,
componentLabels *labels.Component,
bucketName string,
timestamp string,
accessKeyIDName string,
accessKeyIDKey string,
secretAccessKeyName string,
secretAccessKeyKey string,
sessionTokenName string,
sessionTokenKey string,
region string,
endpoint string,
nodeselector map[string]string,
tolerations []corev1.Toleration,
checkDBReady operator.EnsureFunc,
dbURL string,
dbPort int32,
image string,
) (
queryFunc operator.QueryFunc,
destroyFunc operator.DestroyFunc,
err error,
) {
jobName := jobPrefix + backupName + jobSuffix
command := getCommand(
timestamp,
bucketName,
backupName,
certPath,
accessKeyIDPath,
secretAccessKeyPath,
sessionTokenPath,
region,
endpoint,
dbURL,
dbPort,
)
jobdef := getJob(
namespace,
labels.MustForName(componentLabels, GetJobName(backupName)),
nodeselector,
tolerations,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
image,
command,
)
destroyJ, err := job.AdaptFuncToDestroy(jobName, namespace)
if err != nil {
return nil, nil, err
}
destroyers := []operator.DestroyFunc{
operator.ResourceDestroyToZitadelDestroy(destroyJ),
}
queryJ, err := job.AdaptFuncToEnsure(jobdef)
if err != nil {
return nil, nil, err
}
queriers := []operator.QueryFunc{
operator.EnsureFuncToQueryFunc(checkDBReady),
operator.ResourceQueryToZitadelQuery(queryJ),
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
return operator.QueriersToEnsureFunc(monitor, false, queriers, k8sClient, queried)
},
operator.DestroyersToDestroyFunc(monitor, destroyers),
nil
}
func GetJobName(backupName string) string {
return jobPrefix + backupName + jobSuffix
}

View File

@@ -1,196 +0,0 @@
package restore
import (
"github.com/caos/zitadel/operator/common"
"testing"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/caos/orbos/pkg/labels"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
macherrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
)
func TestBackup_Adapt1(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
monitor := mntr.Monitor{}
namespace := "testNs"
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{
{Key: "testKey", Operator: "testOp"}}
timestamp := "testTs"
backupName := "testName2"
bucketName := "testBucket2"
version := "testVersion"
accessKeyIDName := "testAKIN"
accessKeyIDKey := "testAKIK"
secretAccessKeyName := "testSAKN"
secretAccessKeyKey := "testSAKK"
sessionTokenName := "testSTN"
sessionTokenKey := "testSTK"
region := "region"
endpoint := "endpoint"
dbURL := "testDB"
dbPort := int32(80)
jobName := GetJobName(backupName)
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "testKind", "testVersion"), "testComponent")
nameLabels := labels.MustForName(componentLabels, jobName)
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
jobDef := getJob(
namespace,
nameLabels,
nodeselector,
tolerations,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
common.BackupImage.Reference("", version),
getCommand(
timestamp,
bucketName,
backupName,
certPath,
accessKeyIDPath,
secretAccessKeyPath,
sessionTokenPath,
region,
endpoint,
dbURL,
dbPort,
),
)
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
query, _, err := AdaptFunc(
monitor,
backupName,
namespace,
componentLabels,
bucketName,
timestamp,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
region,
endpoint,
nodeselector,
tolerations,
checkDBReady,
dbURL,
dbPort,
common.BackupImage.Reference("", version),
)
assert.NoError(t, err)
queried := map[string]interface{}{}
ensure, err := query(client, queried)
assert.NoError(t, err)
assert.NoError(t, ensure(client))
}
func TestBackup_Adapt2(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
monitor := mntr.Monitor{}
namespace := "testNs2"
nodeselector := map[string]string{"test2": "test2"}
tolerations := []corev1.Toleration{
{Key: "testKey2", Operator: "testOp2"}}
timestamp := "testTs"
backupName := "testName2"
bucketName := "testBucket2"
version := "testVersion2"
accessKeyIDName := "testAKIN2"
accessKeyIDKey := "testAKIK2"
secretAccessKeyName := "testSAKN2"
secretAccessKeyKey := "testSAKK2"
sessionTokenName := "testSTN2"
sessionTokenKey := "testSTK2"
region := "region2"
endpoint := "endpoint2"
dbURL := "testDB"
dbPort := int32(80)
jobName := GetJobName(backupName)
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd2", "testOp2", "testVersion2"), "testKind2", "testVersion2"), "testComponent2")
nameLabels := labels.MustForName(componentLabels, jobName)
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
return nil
}
jobDef := getJob(
namespace,
nameLabels,
nodeselector,
tolerations,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
common.BackupImage.Reference("", version),
getCommand(
timestamp,
bucketName,
backupName,
certPath,
accessKeyIDPath,
secretAccessKeyPath,
sessionTokenPath,
region,
endpoint,
dbURL,
dbPort,
),
)
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
query, _, err := AdaptFunc(
monitor,
backupName,
namespace,
componentLabels,
bucketName,
timestamp,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
region,
endpoint,
nodeselector,
tolerations,
checkDBReady,
dbURL,
dbPort,
common.BackupImage.Reference("", version),
)
assert.NoError(t, err)
queried := map[string]interface{}{}
ensure, err := query(client, queried)
assert.NoError(t, err)
assert.NoError(t, ensure(client))
}

View File

@@ -1,27 +0,0 @@
package restore
import (
"fmt"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/zitadel/operator"
)
func GetCleanupFunc(
monitor mntr.Monitor,
namespace,
backupName string,
) operator.EnsureFunc {
return func(k8sClient kubernetes.ClientInt) error {
monitor.Info("waiting for restore to be completed")
if err := k8sClient.WaitUntilJobCompleted(namespace, GetJobName(backupName), timeout); err != nil {
return fmt.Errorf("error while waiting for restore to be completed: %s", err.Error())
}
monitor.Info("restore is completed, cleanup")
if err := k8sClient.DeleteJob(namespace, GetJobName(backupName)); err != nil {
return fmt.Errorf("error while trying to cleanup restore: %s", err.Error())
}
monitor.Info("restore cleanup is completed")
return nil
}
}

View File

@@ -1,40 +0,0 @@
package restore
import (
"errors"
"github.com/caos/orbos/mntr"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"testing"
)
func TestBackup_Cleanup1(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
monitor := mntr.Monitor{}
name := "test"
namespace := "testNs"
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
assert.NoError(t, cleanupFunc(client))
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
assert.Error(t, cleanupFunc(client))
}
func TestBackup_Cleanup2(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
monitor := mntr.Monitor{}
name := "test2"
namespace := "testNs2"
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
assert.NoError(t, cleanupFunc(client))
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
assert.Error(t, cleanupFunc(client))
}

View File

@@ -1,48 +0,0 @@
package restore
import (
"strconv"
"strings"
)
func getCommand(
timestamp string,
bucketName string,
backupName string,
certsFolder string,
accessKeyIDPath string,
secretAccessKeyPath string,
sessionTokenPath string,
region string,
endpoint string,
dbURL string,
dbPort int32,
) string {
backupCommands := make([]string, 0)
parameters := []string{
"AWS_ACCESS_KEY_ID=$(cat " + accessKeyIDPath + ")",
"AWS_SECRET_ACCESS_KEY=$(cat " + secretAccessKeyPath + ")",
"AWS_SESSION_TOKEN=$(cat " + sessionTokenPath + ")",
"AWS_ENDPOINT=" + endpoint,
}
if region != "" {
parameters = append(parameters, "AWS_REGION="+region)
}
backupCommands = append(backupCommands,
strings.Join([]string{
"cockroach",
"sql",
"--certs-dir=" + certsFolder,
"--host=" + dbURL,
"--port=" + strconv.Itoa(int(dbPort)),
"-e",
"\"RESTORE FROM \\\"s3://" + bucketName + "/" + backupName + "/" + timestamp + "?" + strings.Join(parameters, "&") + "\\\";\"",
}, " ",
),
)
return strings.Join(backupCommands, " && ")
}

View File

@@ -1,59 +0,0 @@
package restore
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestBackup_Command1(t *testing.T) {
timestamp := "test1"
bucketName := "testBucket"
backupName := "testBackup"
dbURL := "testDB"
dbPort := int32(80)
region := "region"
endpoint := "endpoint"
cmd := getCommand(
timestamp,
bucketName,
backupName,
certPath,
accessKeyIDPath,
secretAccessKeyPath,
sessionTokenPath,
region,
endpoint,
dbURL,
dbPort,
)
equals := "cockroach sql --certs-dir=" + certPath + " --host=testDB --port=80 -e \"RESTORE FROM \\\"s3://testBucket/testBackup/test1?AWS_ACCESS_KEY_ID=$(cat " + accessKeyIDPath + ")&AWS_SECRET_ACCESS_KEY=$(cat " + secretAccessKeyPath + ")&AWS_SESSION_TOKEN=$(cat " + sessionTokenPath + ")&AWS_ENDPOINT=endpoint&AWS_REGION=region\\\";\""
assert.Equal(t, equals, cmd)
}
func TestBackup_Command2(t *testing.T) {
timestamp := "test2"
bucketName := "testBucket"
backupName := "testBackup"
dbURL := "testDB2"
dbPort := int32(81)
region := "region2"
endpoint := "endpoint2"
cmd := getCommand(
timestamp,
bucketName,
backupName,
certPath,
accessKeyIDPath,
secretAccessKeyPath,
sessionTokenPath,
region,
endpoint,
dbURL,
dbPort,
)
equals := "cockroach sql --certs-dir=" + certPath + " --host=testDB2 --port=81 -e \"RESTORE FROM \\\"s3://testBucket/testBackup/test2?AWS_ACCESS_KEY_ID=$(cat " + accessKeyIDPath + ")&AWS_SECRET_ACCESS_KEY=$(cat " + secretAccessKeyPath + ")&AWS_SESSION_TOKEN=$(cat " + sessionTokenPath + ")&AWS_ENDPOINT=endpoint2&AWS_REGION=region2\\\";\""
assert.Equal(t, equals, cmd)
}

View File

@@ -1,98 +0,0 @@
package restore
import (
"github.com/caos/orbos/pkg/labels"
"github.com/caos/zitadel/operator/helpers"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func getJob(
namespace string,
nameLabels *labels.Name,
nodeselector map[string]string,
tolerations []corev1.Toleration,
accessKeyIDName string,
accessKeyIDKey string,
secretAccessKeyName string,
secretAccessKeyKey string,
sessionTokenName string,
sessionTokenKey string,
image string,
command string,
) *batchv1.Job {
return &batchv1.Job{
ObjectMeta: v1.ObjectMeta{
Name: nameLabels.Name(),
Namespace: namespace,
Labels: labels.MustK8sMap(nameLabels),
},
Spec: batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
NodeSelector: nodeselector,
Tolerations: tolerations,
RestartPolicy: corev1.RestartPolicyNever,
Containers: []corev1.Container{{
Name: nameLabels.Name(),
Image: image,
Command: []string{
"/bin/bash",
"-c",
command,
},
VolumeMounts: []corev1.VolumeMount{{
Name: internalSecretName,
MountPath: certPath,
}, {
Name: accessKeyIDKey,
SubPath: accessKeyIDKey,
MountPath: accessKeyIDPath,
}, {
Name: secretAccessKeyKey,
SubPath: secretAccessKeyKey,
MountPath: secretAccessKeyPath,
}, {
Name: sessionTokenKey,
SubPath: sessionTokenKey,
MountPath: sessionTokenPath,
}},
ImagePullPolicy: corev1.PullIfNotPresent,
}},
Volumes: []corev1.Volume{{
Name: internalSecretName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: rootSecretName,
DefaultMode: helpers.PointerInt32(defaultMode),
},
},
}, {
Name: accessKeyIDKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: accessKeyIDName,
},
},
}, {
Name: secretAccessKeyKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretAccessKeyName,
},
},
}, {
Name: sessionTokenKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: sessionTokenName,
},
},
}},
},
},
},
}
}

View File

@@ -1,240 +0,0 @@
package restore
import (
"github.com/caos/orbos/pkg/labels"
"github.com/caos/zitadel/operator/helpers"
"github.com/stretchr/testify/assert"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"testing"
)
func TestBackup_Job1(t *testing.T) {
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{
{Key: "testKey", Operator: "testOp"}}
image := "testVersion"
command := "test"
accessKeyIDName := "testAKIN"
accessKeyIDKey := "testAKIK"
secretAccessKeyName := "testSAKN"
secretAccessKeyKey := "testSAKK"
sessionTokenName := "testSTN"
sessionTokenKey := "testSTK"
jobName := "testJob"
namespace := "testNs"
k8sLabels := map[string]string{
"app.kubernetes.io/component": "testComponent",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": jobName,
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testOpVersion",
"caos.ch/apiversion": "testVersion",
"caos.ch/kind": "testKind"}
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testOpVersion"), "testKind", "testVersion"), "testComponent")
nameLabels := labels.MustForName(componentLabels, jobName)
equals :=
&batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: jobName,
Namespace: namespace,
Labels: k8sLabels,
},
Spec: batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
NodeSelector: nodeselector,
Tolerations: tolerations,
Containers: []corev1.Container{{
Name: jobName,
Image: image,
Command: []string{
"/bin/bash",
"-c",
command,
},
VolumeMounts: []corev1.VolumeMount{{
Name: internalSecretName,
MountPath: certPath,
}, {
Name: accessKeyIDKey,
SubPath: accessKeyIDKey,
MountPath: accessKeyIDPath,
}, {
Name: secretAccessKeyKey,
SubPath: secretAccessKeyKey,
MountPath: secretAccessKeyPath,
}, {
Name: sessionTokenKey,
SubPath: sessionTokenKey,
MountPath: sessionTokenPath,
}},
ImagePullPolicy: corev1.PullIfNotPresent,
}},
Volumes: []corev1.Volume{{
Name: internalSecretName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: rootSecretName,
DefaultMode: helpers.PointerInt32(defaultMode),
},
},
}, {
Name: accessKeyIDKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: accessKeyIDName,
},
},
}, {
Name: secretAccessKeyKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretAccessKeyName,
},
},
}, {
Name: sessionTokenKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: sessionTokenName,
},
},
}},
},
},
},
}
assert.Equal(t, equals, getJob(
namespace,
nameLabels,
nodeselector,
tolerations,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
image,
command,
))
}
func TestBackup_Job2(t *testing.T) {
nodeselector := map[string]string{"test2": "test2"}
tolerations := []corev1.Toleration{
{Key: "testKey2", Operator: "testOp2"}}
image := "testVersion2"
command := "test2"
accessKeyIDName := "testAKIN2"
accessKeyIDKey := "testAKIK2"
secretAccessKeyName := "testSAKN2"
secretAccessKeyKey := "testSAKK2"
sessionTokenName := "testSTN2"
sessionTokenKey := "testSTK2"
jobName := "testJob2"
namespace := "testNs2"
k8sLabels := map[string]string{
"app.kubernetes.io/component": "testComponent2",
"app.kubernetes.io/managed-by": "testOp2",
"app.kubernetes.io/name": jobName,
"app.kubernetes.io/part-of": "testProd2",
"app.kubernetes.io/version": "testOpVersion2",
"caos.ch/apiversion": "testVersion2",
"caos.ch/kind": "testKind2"}
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd2", "testOp2", "testOpVersion2"), "testKind2", "testVersion2"), "testComponent2")
nameLabels := labels.MustForName(componentLabels, jobName)
equals :=
&batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: jobName,
Namespace: namespace,
Labels: k8sLabels,
},
Spec: batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
NodeSelector: nodeselector,
Tolerations: tolerations,
Containers: []corev1.Container{{
Name: jobName,
Image: image,
Command: []string{
"/bin/bash",
"-c",
command,
},
VolumeMounts: []corev1.VolumeMount{{
Name: internalSecretName,
MountPath: certPath,
}, {
Name: accessKeyIDKey,
SubPath: accessKeyIDKey,
MountPath: accessKeyIDPath,
}, {
Name: secretAccessKeyKey,
SubPath: secretAccessKeyKey,
MountPath: secretAccessKeyPath,
}, {
Name: sessionTokenKey,
SubPath: sessionTokenKey,
MountPath: sessionTokenPath,
}},
ImagePullPolicy: corev1.PullIfNotPresent,
}},
Volumes: []corev1.Volume{{
Name: internalSecretName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: rootSecretName,
DefaultMode: helpers.PointerInt32(defaultMode),
},
},
}, {
Name: accessKeyIDKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: accessKeyIDName,
},
},
}, {
Name: secretAccessKeyKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretAccessKeyName,
},
},
}, {
Name: sessionTokenKey,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: sessionTokenName,
},
},
}},
},
},
},
}
assert.Equal(t, equals, getJob(
namespace,
nameLabels,
nodeselector,
tolerations,
accessKeyIDName,
accessKeyIDKey,
secretAccessKeyName,
secretAccessKeyKey,
sessionTokenName,
sessionTokenKey,
image,
command))
}

View File

@@ -1,54 +0,0 @@
package s3
import (
"github.com/caos/orbos/pkg/secret"
)
func getSecretsMap(desiredKind *DesiredV0) (map[string]*secret.Secret, map[string]*secret.Existing) {
var (
secrets = make(map[string]*secret.Secret, 0)
existing = make(map[string]*secret.Existing, 0)
)
if desiredKind.Spec == nil {
desiredKind.Spec = &Spec{}
}
if desiredKind.Spec.AccessKeyID == nil {
desiredKind.Spec.AccessKeyID = &secret.Secret{}
}
if desiredKind.Spec.ExistingAccessKeyID == nil {
desiredKind.Spec.ExistingAccessKeyID = &secret.Existing{}
}
akikey := "accesskeyid"
secrets[akikey] = desiredKind.Spec.AccessKeyID
existing[akikey] = desiredKind.Spec.ExistingAccessKeyID
if desiredKind.Spec.SecretAccessKey == nil {
desiredKind.Spec.SecretAccessKey = &secret.Secret{}
}
if desiredKind.Spec.ExistingSecretAccessKey == nil {
desiredKind.Spec.ExistingSecretAccessKey = &secret.Existing{}
}
sakkey := "secretaccesskey"
secrets[sakkey] = desiredKind.Spec.SecretAccessKey
existing[sakkey] = desiredKind.Spec.ExistingSecretAccessKey
if desiredKind.Spec.SessionToken == nil {
desiredKind.Spec.SessionToken = &secret.Secret{}
}
if desiredKind.Spec.ExistingSessionToken == nil {
desiredKind.Spec.ExistingSessionToken = &secret.Existing{}
}
stkey := "sessiontoken"
secrets[stkey] = desiredKind.Spec.SessionToken
existing[stkey] = desiredKind.Spec.ExistingSessionToken
return secrets, existing
}

View File

@@ -1,38 +0,0 @@
package s3
import (
"testing"
"github.com/caos/orbos/pkg/secret"
"github.com/stretchr/testify/assert"
)
func TestBucket_getSecretsFull(t *testing.T) {
secrets, existing := getSecretsMap(&desired)
assert.Equal(t, desired.Spec.AccessKeyID, secrets["accesskeyid"])
assert.Equal(t, desired.Spec.ExistingAccessKeyID, existing["accesskeyid"])
assert.Equal(t, desired.Spec.SecretAccessKey, secrets["secretaccesskey"])
assert.Equal(t, desired.Spec.ExistingSecretAccessKey, existing["secretaccesskey"])
assert.Equal(t, desired.Spec.SessionToken, secrets["sessiontoken"])
assert.Equal(t, desired.Spec.ExistingSessionToken, existing["sessiontoken"])
}
func TestBucket_getSecretsEmpty(t *testing.T) {
secrets, existing := getSecretsMap(&desiredWithoutSecret)
assert.Equal(t, &secret.Secret{}, secrets["accesskeyid"])
assert.Equal(t, &secret.Existing{}, existing["accesskeyid"])
assert.Equal(t, &secret.Secret{}, secrets["secretaccesskey"])
assert.Equal(t, &secret.Existing{}, existing["secretaccesskey"])
assert.Equal(t, &secret.Secret{}, secrets["sessiontoken"])
assert.Equal(t, &secret.Existing{}, existing["sessiontoken"])
}
func TestBucket_getSecretsNil(t *testing.T) {
secrets, existing := getSecretsMap(&desiredNil)
assert.Equal(t, &secret.Secret{}, secrets["accesskeyid"])
assert.Equal(t, &secret.Existing{}, existing["accesskeyid"])
assert.Equal(t, &secret.Secret{}, secrets["secretaccesskey"])
assert.Equal(t, &secret.Existing{}, existing["secretaccesskey"])
assert.Equal(t, &secret.Secret{}, secrets["sessiontoken"])
assert.Equal(t, &secret.Existing{}, existing["sessiontoken"])
}

View File

@@ -1,66 +0,0 @@
package core
import (
"crypto/rsa"
"errors"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator"
)
const queriedName = "database"
type DatabaseCurrent interface {
GetURL() string
GetPort() string
GetReadyQuery() operator.EnsureFunc
GetCertificateKey() *rsa.PrivateKey
SetCertificateKey(*rsa.PrivateKey)
GetCertificate() []byte
SetCertificate([]byte)
GetAddUserFunc() func(user string) (operator.QueryFunc, error)
GetDeleteUserFunc() func(user string) (operator.DestroyFunc, error)
GetListUsersFunc() func(k8sClient kubernetes.ClientInt) ([]string, error)
GetListDatabasesFunc() func(k8sClient kubernetes.ClientInt) ([]string, error)
}
func ParseQueriedForDatabase(queried map[string]interface{}) (DatabaseCurrent, error) {
queriedDB, ok := queried[queriedName]
if !ok {
return nil, errors.New("no current state for database found")
}
currentDBTree, ok := queriedDB.(*tree.Tree)
if !ok {
return nil, errors.New("current state does not fullfil interface")
}
currentDB, ok := currentDBTree.Parsed.(DatabaseCurrent)
if !ok {
return nil, errors.New("current state does not fullfil interface")
}
return currentDB, nil
}
func SetQueriedForDatabase(queried map[string]interface{}, databaseCurrent *tree.Tree) {
queried[queriedName] = databaseCurrent
}
func SetQueriedForDatabaseDBList(queried map[string]interface{}, databases, users []string) {
currentDBList := &CurrentDBList{
Common: &tree.Common{
Kind: "DBList",
},
Current: &DatabaseCurrentDBList{
Databases: databases,
Users: users,
},
}
currentDBList.Common.OverwriteVersion("V0")
currentDB := &tree.Tree{
Parsed: currentDBList,
}
SetQueriedForDatabase(queried, currentDB)
}

View File

@@ -1,68 +0,0 @@
package core
import (
"crypto/rsa"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator"
)
var current DatabaseCurrent = &CurrentDBList{}
type CurrentDBList struct {
Common *tree.Common `yaml:",inline"`
Current *DatabaseCurrentDBList
}
type DatabaseCurrentDBList struct {
Databases []string
Users []string
}
func (c *CurrentDBList) GetURL() string {
return ""
}
func (c *CurrentDBList) GetPort() string {
return ""
}
func (c *CurrentDBList) GetReadyQuery() operator.EnsureFunc {
return nil
}
func (c *CurrentDBList) GetCertificateKey() *rsa.PrivateKey {
return nil
}
func (c *CurrentDBList) SetCertificateKey(key *rsa.PrivateKey) {
return
}
func (c *CurrentDBList) GetCertificate() []byte {
return nil
}
func (c *CurrentDBList) SetCertificate(cert []byte) {
return
}
func (c *CurrentDBList) GetListDatabasesFunc() func(k8sClient kubernetes.ClientInt) ([]string, error) {
return func(k8sClient kubernetes.ClientInt) ([]string, error) {
return c.Current.Databases, nil
}
}
func (c *CurrentDBList) GetListUsersFunc() func(k8sClient kubernetes.ClientInt) ([]string, error) {
return func(k8sClient kubernetes.ClientInt) ([]string, error) {
return c.Current.Users, nil
}
}
func (c *CurrentDBList) GetAddUserFunc() func(user string) (operator.QueryFunc, error) {
return nil
}
func (c *CurrentDBList) GetDeleteUserFunc() func(user string) (operator.DestroyFunc, error) {
return nil
}

View File

@@ -1,3 +0,0 @@
package core
//go:generate mockgen -source current.go -package coremock -destination mock/current.mock.go github.com/caos/internal/operator/database/kinds/databases/core DatabaseCurrent

View File

@@ -1,186 +0,0 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: current.go
// Package coremock is a generated GoMock package.
package coremock
import (
rsa "crypto/rsa"
kubernetes "github.com/caos/orbos/pkg/kubernetes"
operator "github.com/caos/zitadel/operator"
gomock "github.com/golang/mock/gomock"
reflect "reflect"
)
// MockDatabaseCurrent is a mock of DatabaseCurrent interface
type MockDatabaseCurrent struct {
ctrl *gomock.Controller
recorder *MockDatabaseCurrentMockRecorder
}
// MockDatabaseCurrentMockRecorder is the mock recorder for MockDatabaseCurrent
type MockDatabaseCurrentMockRecorder struct {
mock *MockDatabaseCurrent
}
// NewMockDatabaseCurrent creates a new mock instance
func NewMockDatabaseCurrent(ctrl *gomock.Controller) *MockDatabaseCurrent {
mock := &MockDatabaseCurrent{ctrl: ctrl}
mock.recorder = &MockDatabaseCurrentMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockDatabaseCurrent) EXPECT() *MockDatabaseCurrentMockRecorder {
return m.recorder
}
// GetURL mocks base method
func (m *MockDatabaseCurrent) GetURL() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetURL")
ret0, _ := ret[0].(string)
return ret0
}
// GetURL indicates an expected call of GetURL
func (mr *MockDatabaseCurrentMockRecorder) GetURL() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetURL", reflect.TypeOf((*MockDatabaseCurrent)(nil).GetURL))
}
// GetPort mocks base method
func (m *MockDatabaseCurrent) GetPort() string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetPort")
ret0, _ := ret[0].(string)
return ret0
}
// GetPort indicates an expected call of GetPort
func (mr *MockDatabaseCurrentMockRecorder) GetPort() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPort", reflect.TypeOf((*MockDatabaseCurrent)(nil).GetPort))
}
// GetReadyQuery mocks base method
func (m *MockDatabaseCurrent) GetReadyQuery() operator.EnsureFunc {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetReadyQuery")
ret0, _ := ret[0].(operator.EnsureFunc)
return ret0
}
// GetReadyQuery indicates an expected call of GetReadyQuery
func (mr *MockDatabaseCurrentMockRecorder) GetReadyQuery() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReadyQuery", reflect.TypeOf((*MockDatabaseCurrent)(nil).GetReadyQuery))
}
// GetCertificateKey mocks base method
func (m *MockDatabaseCurrent) GetCertificateKey() *rsa.PrivateKey {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetCertificateKey")
ret0, _ := ret[0].(*rsa.PrivateKey)
return ret0
}
// GetCertificateKey indicates an expected call of GetCertificateKey
func (mr *MockDatabaseCurrentMockRecorder) GetCertificateKey() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCertificateKey", reflect.TypeOf((*MockDatabaseCurrent)(nil).GetCertificateKey))
}
// SetCertificateKey mocks base method
func (m *MockDatabaseCurrent) SetCertificateKey(arg0 *rsa.PrivateKey) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "SetCertificateKey", arg0)
}
// SetCertificateKey indicates an expected call of SetCertificateKey
func (mr *MockDatabaseCurrentMockRecorder) SetCertificateKey(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCertificateKey", reflect.TypeOf((*MockDatabaseCurrent)(nil).SetCertificateKey), arg0)
}
// GetCertificate mocks base method
func (m *MockDatabaseCurrent) GetCertificate() []byte {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetCertificate")
ret0, _ := ret[0].([]byte)
return ret0
}
// GetCertificate indicates an expected call of GetCertificate
func (mr *MockDatabaseCurrentMockRecorder) GetCertificate() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCertificate", reflect.TypeOf((*MockDatabaseCurrent)(nil).GetCertificate))
}
// SetCertificate mocks base method
func (m *MockDatabaseCurrent) SetCertificate(arg0 []byte) {
m.ctrl.T.Helper()
m.ctrl.Call(m, "SetCertificate", arg0)
}
// SetCertificate indicates an expected call of SetCertificate
func (mr *MockDatabaseCurrentMockRecorder) SetCertificate(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCertificate", reflect.TypeOf((*MockDatabaseCurrent)(nil).SetCertificate), arg0)
}
// GetAddUserFunc mocks base method
func (m *MockDatabaseCurrent) GetAddUserFunc() func(string) (operator.QueryFunc, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetAddUserFunc")
ret0, _ := ret[0].(func(string) (operator.QueryFunc, error))
return ret0
}
// GetAddUserFunc indicates an expected call of GetAddUserFunc
func (mr *MockDatabaseCurrentMockRecorder) GetAddUserFunc() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAddUserFunc", reflect.TypeOf((*MockDatabaseCurrent)(nil).GetAddUserFunc))
}
// GetDeleteUserFunc mocks base method
func (m *MockDatabaseCurrent) GetDeleteUserFunc() func(string) (operator.DestroyFunc, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetDeleteUserFunc")
ret0, _ := ret[0].(func(string) (operator.DestroyFunc, error))
return ret0
}
// GetDeleteUserFunc indicates an expected call of GetDeleteUserFunc
func (mr *MockDatabaseCurrentMockRecorder) GetDeleteUserFunc() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDeleteUserFunc", reflect.TypeOf((*MockDatabaseCurrent)(nil).GetDeleteUserFunc))
}
// GetListUsersFunc mocks base method
func (m *MockDatabaseCurrent) GetListUsersFunc() func(kubernetes.ClientInt) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetListUsersFunc")
ret0, _ := ret[0].(func(kubernetes.ClientInt) ([]string, error))
return ret0
}
// GetListUsersFunc indicates an expected call of GetListUsersFunc
func (mr *MockDatabaseCurrentMockRecorder) GetListUsersFunc() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetListUsersFunc", reflect.TypeOf((*MockDatabaseCurrent)(nil).GetListUsersFunc))
}
// GetListDatabasesFunc mocks base method
func (m *MockDatabaseCurrent) GetListDatabasesFunc() func(kubernetes.ClientInt) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetListDatabasesFunc")
ret0, _ := ret[0].(func(kubernetes.ClientInt) ([]string, error))
return ret0
}
// GetListDatabasesFunc indicates an expected call of GetListDatabasesFunc
func (mr *MockDatabaseCurrentMockRecorder) GetListDatabasesFunc() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetListDatabasesFunc", reflect.TypeOf((*MockDatabaseCurrent)(nil).GetListDatabasesFunc))
}

View File

@@ -1,76 +0,0 @@
package databases
import (
"fmt"
core "k8s.io/api/core/v1"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator"
"github.com/caos/zitadel/operator/database/kinds/databases/managed"
"github.com/caos/zitadel/operator/database/kinds/databases/provided"
)
const (
component = "database"
)
func ComponentSelector() *labels.Selector {
return labels.OpenComponentSelector("ZITADEL", component)
}
func Adapt(
monitor mntr.Monitor,
desiredTree *tree.Tree,
currentTree *tree.Tree,
namespace string,
apiLabels *labels.API,
timestamp string,
nodeselector map[string]string,
tolerations []core.Toleration,
version string,
features []string,
customImageRegistry string,
) (
query operator.QueryFunc,
destroy operator.DestroyFunc,
configure operator.ConfigureFunc,
secrets map[string]*secret.Secret,
existing map[string]*secret.Existing,
migrate bool,
err error,
) {
componentLabels := labels.MustForComponent(apiLabels, component)
internalMonitor := monitor.WithField("component", component)
switch desiredTree.Common.Kind {
case "databases.caos.ch/CockroachDB":
return managed.Adapter(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features, customImageRegistry)(internalMonitor, desiredTree, currentTree)
case "databases.caos.ch/ProvidedDatabase":
return provided.Adapter()(internalMonitor, desiredTree, currentTree)
default:
return nil, nil, nil, nil, nil, false, mntr.ToUserError(fmt.Errorf("unknown database kind %s: %w", desiredTree.Common.Kind, err))
}
}
func GetBackupList(
monitor mntr.Monitor,
k8sClient kubernetes.ClientInt,
desiredTree *tree.Tree,
) (
[]string,
error,
) {
switch desiredTree.Common.Kind {
case "databases.caos.ch/CockroachDB":
return managed.BackupList()(monitor, k8sClient, desiredTree)
case "databases.caos.ch/ProvidedDatabse":
return nil, mntr.ToUserError(fmt.Errorf("no backups supported for database kind %s", desiredTree.Common.Kind))
default:
return nil, mntr.ToUserError(fmt.Errorf("unknown database kind %s", desiredTree.Common.Kind))
}
}

View File

@@ -1,284 +0,0 @@
package managed
import (
"fmt"
"strconv"
"strings"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/pdb"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator"
"github.com/caos/zitadel/operator/common"
"github.com/caos/zitadel/operator/database/kinds/backups"
"github.com/caos/zitadel/operator/database/kinds/databases/core"
"github.com/caos/zitadel/operator/database/kinds/databases/managed/certificate"
"github.com/caos/zitadel/operator/database/kinds/databases/managed/rbac"
"github.com/caos/zitadel/operator/database/kinds/databases/managed/services"
"github.com/caos/zitadel/operator/database/kinds/databases/managed/statefulset"
)
const (
SfsName = "cockroachdb"
pdbName = SfsName + "-budget"
serviceAccountName = SfsName
PublicServiceName = SfsName + "-public"
privateServiceName = SfsName
cockroachPort = int32(26257)
cockroachHTTPPort = int32(8080)
Clean = "clean"
DBReady = "dbready"
)
func Adapter(
componentLabels *labels.Component,
namespace string,
timestamp string,
nodeselector map[string]string,
tolerations []corev1.Toleration,
version string,
features []string,
customImageRegistry string,
) operator.AdaptFunc {
return func(
monitor mntr.Monitor,
desired *tree.Tree,
current *tree.Tree,
) (
_ operator.QueryFunc,
_ operator.DestroyFunc,
_ operator.ConfigureFunc,
_ map[string]*secret.Secret,
_ map[string]*secret.Existing,
migrate bool,
err error,
) {
defer func() {
if err != nil {
err = fmt.Errorf("adapting managed database failed: %w", err)
}
}()
var (
internalMonitor = monitor.WithField("kind", "cockroachdb")
allSecrets = make(map[string]*secret.Secret)
allExisting = make(map[string]*secret.Existing)
)
desiredKind, err := parseDesiredV0(desired)
if err != nil {
return nil, nil, nil, nil, nil, false, fmt.Errorf("parsing desired state failed: %w", err)
}
desired.Parsed = desiredKind
storageCapacity, err := resource.ParseQuantity(desiredKind.Spec.StorageCapacity)
if err != nil {
return nil, nil, nil, nil, nil, false, mntr.ToUserError(fmt.Errorf("parsing storage capacity format failed: %w", err))
}
if !monitor.IsVerbose() && desiredKind.Spec.Verbose {
internalMonitor.Verbose()
}
var (
isFeatureDatabase bool
isFeatureClean bool
)
for _, feature := range features {
switch feature {
case "database":
isFeatureDatabase = true
case Clean:
isFeatureClean = true
}
}
queryCert, destroyCert, addUser, deleteUser, listUsers, err := certificate.AdaptFunc(internalMonitor, namespace, componentLabels, desiredKind.Spec.ClusterDns, isFeatureDatabase)
if err != nil {
return nil, nil, nil, nil, nil, false, err
}
addRoot, err := addUser("root")
if err != nil {
return nil, nil, nil, nil, nil, false, err
}
destroyRoot, err := deleteUser("root")
if err != nil {
return nil, nil, nil, nil, nil, false, err
}
queryRBAC, destroyRBAC, err := rbac.AdaptFunc(internalMonitor, namespace, labels.MustForName(componentLabels, serviceAccountName))
cockroachNameLabels := labels.MustForName(componentLabels, SfsName)
cockroachSelector := labels.DeriveNameSelector(cockroachNameLabels, false)
cockroachSelectabel := labels.AsSelectable(cockroachNameLabels)
querySFS, destroySFS, ensureInit, checkDBReady, listDatabases, err := statefulset.AdaptFunc(
internalMonitor,
cockroachSelectabel,
cockroachSelector,
desiredKind.Spec.Force,
namespace,
common.CockroachImage.Reference(customImageRegistry),
serviceAccountName,
desiredKind.Spec.ReplicaCount,
storageCapacity,
cockroachPort,
cockroachHTTPPort,
desiredKind.Spec.StorageClass,
desiredKind.Spec.NodeSelector,
desiredKind.Spec.Tolerations,
desiredKind.Spec.Resources,
desiredKind.Spec.Cache,
desiredKind.Spec.MaxSQLMemory,
)
if err != nil {
return nil, nil, nil, nil, nil, false, err
}
queryS, destroyS, err := services.AdaptFunc(
internalMonitor,
namespace,
labels.MustForName(componentLabels, PublicServiceName),
labels.MustForName(componentLabels, privateServiceName),
cockroachSelector,
cockroachPort,
cockroachHTTPPort,
)
queryPDB, err := pdb.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, pdbName), cockroachSelector, "1")
if err != nil {
return nil, nil, nil, nil, nil, false, err
}
currentDB := &Current{
Common: tree.NewCommon("databases.caos.ch/CockroachDB", "v0", false),
Current: &CurrentDB{
CA: &certificate.Current{},
},
}
current.Parsed = currentDB
var (
queriers = make([]operator.QueryFunc, 0)
destroyers = make([]operator.DestroyFunc, 0)
configurers = make([]operator.ConfigureFunc, 0)
)
if isFeatureDatabase {
queriers = append(queriers,
queryRBAC,
queryCert,
addRoot,
operator.ResourceQueryToZitadelQuery(querySFS),
operator.ResourceQueryToZitadelQuery(queryPDB),
queryS,
operator.EnsureFuncToQueryFunc(ensureInit),
)
destroyers = append(destroyers,
destroyS,
operator.ResourceDestroyToZitadelDestroy(destroySFS),
destroyRBAC,
destroyCert,
destroyRoot,
)
}
if isFeatureClean {
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(
statefulset.CleanPVCs(
monitor,
namespace,
cockroachSelectabel,
desiredKind.Spec.ReplicaCount,
),
),
operator.EnsureFuncToQueryFunc(ensureInit),
operator.EnsureFuncToQueryFunc(checkDBReady),
)
}
if desiredKind.Spec.Backups != nil {
oneBackup := false
for backupName := range desiredKind.Spec.Backups {
if timestamp != "" && strings.HasPrefix(timestamp, backupName) {
oneBackup = true
}
}
for backupName, desiredBackup := range desiredKind.Spec.Backups {
currentBackup := &tree.Tree{}
if timestamp == "" || !oneBackup || (timestamp != "" && strings.HasPrefix(timestamp, backupName)) {
queryB, destroyB, configureB, secrets, existing, migrateB, err := backups.Adapt(
internalMonitor,
desiredBackup,
currentBackup,
backupName,
namespace,
componentLabels,
checkDBReady,
strings.TrimPrefix(timestamp, backupName+"."),
nodeselector,
tolerations,
version,
PublicServiceName,
cockroachPort,
features,
customImageRegistry,
)
if err != nil {
return nil, nil, nil, nil, nil, false, err
}
migrate = migrate || migrateB
secret.AppendSecrets(backupName, allSecrets, secrets, allExisting, existing)
destroyers = append(destroyers, destroyB)
queriers = append(queriers, queryB)
configurers = append(configurers, configureB)
}
}
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
queriedCurrentDB, err := core.ParseQueriedForDatabase(queried)
if err != nil || queriedCurrentDB == nil {
// TODO: query system state
currentDB.Current.Port = strconv.Itoa(int(cockroachPort))
currentDB.Current.URL = PublicServiceName
currentDB.Current.ReadyFunc = checkDBReady
currentDB.Current.AddUserFunc = addUser
currentDB.Current.DeleteUserFunc = deleteUser
currentDB.Current.ListUsersFunc = listUsers
currentDB.Current.ListDatabasesFunc = listDatabases
core.SetQueriedForDatabase(queried, current)
internalMonitor.Info("set current state of managed database")
}
ensure, err := operator.QueriersToEnsureFunc(internalMonitor, true, queriers, k8sClient, queried)
return ensure, err
},
operator.DestroyersToDestroyFunc(internalMonitor, destroyers),
func(k8sClient kubernetes.ClientInt, queried map[string]interface{}, gitops bool) error {
for i := range configurers {
if err := configurers[i](k8sClient, queried, gitops); err != nil {
return err
}
}
return nil
},
allSecrets,
allExisting,
migrate,
nil
}
}

View File

@@ -1,172 +0,0 @@
package managed
import (
"testing"
"time"
"github.com/caos/orbos/mntr"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/database/kinds/backups/bucket"
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/backup"
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/restore"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
)
func getTreeWithDBAndBackup(t *testing.T, masterkey string, saJson string, backupName string) *tree.Tree {
bucketDesired := getDesiredTree(t, masterkey, &bucket.DesiredV0{
Common: tree.NewCommon("databases.caos.ch/BucketBackup", "v0", false),
Spec: &bucket.Spec{
Verbose: true,
Cron: "testCron",
Bucket: "testBucket",
ServiceAccountJSON: &secret.Secret{
Value: saJson,
},
},
})
bucketDesiredKind, err := bucket.ParseDesiredV0(bucketDesired)
assert.NoError(t, err)
bucketDesired.Parsed = bucketDesiredKind
return getDesiredTree(t, masterkey, &DesiredV0{
Common: tree.NewCommon("databases.caos.ch/CockroachDB", "v0", false),
Spec: Spec{
Verbose: false,
ReplicaCount: 1,
StorageCapacity: "368Gi",
StorageClass: "testSC",
NodeSelector: map[string]string{},
ClusterDns: "testDns",
Backups: map[string]*tree.Tree{backupName: bucketDesired},
},
})
}
func TestManaged_AdaptBucketBackup(t *testing.T) {
monitor := mntr.Monitor{}
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "testKind", "v0"), "database")
labels := map[string]string{
"app.kubernetes.io/component": "backup",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": "backup-serviceaccountjson",
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "BucketBackup",
}
namespace := "testNs"
timestamp := "testTs"
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{}
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
backupName := "testBucket"
saJson := "testSA"
masterkey := "testMk"
version := "test"
desired := getTreeWithDBAndBackup(t, masterkey, saJson, backupName)
features := []string{backup.Normal}
bucket.SetBackup(k8sClient, namespace, labels, saJson)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second)
query, _, _, _, _, _, err := Adapter(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features, "")(monitor, desired, &tree.Tree{})
assert.NoError(t, err)
databases := []string{"test1", "test2"}
queried := bucket.SetQueriedForDatabases(databases, []string{})
ensure, err := query(k8sClient, queried)
assert.NoError(t, err)
assert.NotNil(t, ensure)
assert.NoError(t, ensure(k8sClient))
}
func TestManaged_AdaptBucketInstantBackup(t *testing.T) {
monitor := mntr.Monitor{}
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "testKind", "v0"), "database")
labels := map[string]string{
"app.kubernetes.io/component": "backup",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": "backup-serviceaccountjson",
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "BucketBackup",
}
namespace := "testNs"
timestamp := "testTs"
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{}
masterkey := "testMk"
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
saJson := "testSA"
backupName := "testBucket"
version := "test"
features := []string{backup.Instant}
bucket.SetInstantBackup(k8sClient, namespace, backupName, labels, saJson)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second)
desired := getTreeWithDBAndBackup(t, masterkey, saJson, backupName)
query, _, _, _, _, _, err := Adapter(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features, "")(monitor, desired, &tree.Tree{})
assert.NoError(t, err)
databases := []string{"test1", "test2"}
queried := bucket.SetQueriedForDatabases(databases, []string{})
ensure, err := query(k8sClient, queried)
assert.NoError(t, err)
assert.NotNil(t, ensure)
assert.NoError(t, ensure(k8sClient))
}
func TestManaged_AdaptBucketCleanAndRestore(t *testing.T) {
monitor := mntr.Monitor{}
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "testKind", "v0"), "database")
labels := map[string]string{
"app.kubernetes.io/component": "backup",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": "backup-serviceaccountjson",
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "BucketBackup",
}
namespace := "testNs"
timestamp := "testTs"
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{}
version := "testVersion"
masterkey := "testMk"
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
saJson := "testSA"
backupName := "testBucket"
features := []string{restore.Instant}
bucket.SetRestore(k8sClient, namespace, backupName, labels, saJson)
//SetClean(k8sClient, namespace, 1)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second).Times(1)
desired := getTreeWithDBAndBackup(t, masterkey, saJson, backupName)
query, _, _, _, _, _, err := Adapter(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features, "")(monitor, desired, &tree.Tree{})
assert.NoError(t, err)
databases := []string{"test1", "test2"}
users := []string{"test1", "test2"}
queried := bucket.SetQueriedForDatabases(databases, users)
ensure, err := query(k8sClient, queried)
assert.NoError(t, err)
assert.NotNil(t, ensure)
assert.NoError(t, ensure(k8sClient))
}

View File

@@ -1,250 +0,0 @@
package managed
import (
"testing"
"time"
"gopkg.in/yaml.v3"
"github.com/caos/orbos/mntr"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
coremock "github.com/caos/zitadel/operator/database/kinds/databases/core/mock"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
func getDesiredTree(t *testing.T, masterkey string, desired interface{}) *tree.Tree {
secret.Masterkey = masterkey
desiredTree := &tree.Tree{}
data, err := yaml.Marshal(desired)
assert.NoError(t, err)
assert.NoError(t, yaml.Unmarshal(data, desiredTree))
return desiredTree
}
func TestManaged_Adapt1(t *testing.T) {
monitor := mntr.Monitor{}
nodeLabels := map[string]string{
"app.kubernetes.io/component": "database",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": "cockroachdb.node",
"app.kubernetes.io/part-of": "testProd",
"orbos.ch/selectable": "yes",
}
cockroachLabels := map[string]string{
"app.kubernetes.io/component": "database",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": "cockroachdb-budget",
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "testKind",
}
cockroachSelectorLabels := map[string]string{
"app.kubernetes.io/component": "database",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": "cockroachdb",
"app.kubernetes.io/part-of": "testProd",
"orbos.ch/selectable": "yes",
}
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "testKind", "v0"), "database")
namespace := "testNs"
timestamp := "testTs"
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{}
features := []string{"database"}
masterkey := "testMk"
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
dbCurrent := coremock.NewMockDatabaseCurrent(gomock.NewController(t))
queried := map[string]interface{}{}
version := "test"
desired := getDesiredTree(t, masterkey, &DesiredV0{
Common: tree.NewCommon("databases.caos.ch/CockroachDB", "v0", false),
Spec: Spec{
Verbose: false,
ReplicaCount: 1,
StorageCapacity: "368Gi",
StorageClass: "testSC",
NodeSelector: map[string]string{},
ClusterDns: "testDns",
},
})
unav := intstr.FromInt(1)
k8sClient.EXPECT().ApplyPodDisruptionBudget(&policy.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: "cockroachdb-budget",
Namespace: namespace,
Labels: cockroachLabels,
},
Spec: policy.PodDisruptionBudgetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: cockroachSelectorLabels,
},
MaxUnavailable: &unav,
},
})
secretList := &corev1.SecretList{
Items: []corev1.Secret{},
}
k8sClient.EXPECT().ApplyService(gomock.Any()).Times(3)
k8sClient.EXPECT().ApplyServiceAccount(gomock.Any()).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().ApplyRole(gomock.Any()).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().ApplyClusterRole(gomock.Any()).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().ApplyRoleBinding(gomock.Any()).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().ApplyClusterRoleBinding(gomock.Any()).MinTimes(1).MaxTimes(1)
//statefulset
k8sClient.EXPECT().ApplyStatefulSet(gomock.Any(), gomock.Any()).MinTimes(1).MaxTimes(1)
//running for setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, false, 60*time.Second).MinTimes(1).MaxTimes(1)
//not ready for setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 1*time.Second).MinTimes(1).MaxTimes(1)
//ready after setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second).MinTimes(1).MaxTimes(1)
//client
k8sClient.EXPECT().ListSecrets(namespace, nodeLabels).MinTimes(1).MaxTimes(1).Return(secretList, nil)
dbCurrent.EXPECT().GetCertificate().MinTimes(1).MaxTimes(1).Return(nil)
dbCurrent.EXPECT().GetCertificateKey().MinTimes(1).MaxTimes(1).Return(nil)
k8sClient.EXPECT().ApplySecret(gomock.Any()).MinTimes(1).MaxTimes(1)
//node
k8sClient.EXPECT().ListSecrets(namespace, nodeLabels).MinTimes(1).MaxTimes(1).Return(secretList, nil)
dbCurrent.EXPECT().GetCertificate().MinTimes(1).MaxTimes(1).Return(nil)
dbCurrent.EXPECT().GetCertificateKey().MinTimes(1).MaxTimes(1).Return(nil)
dbCurrent.EXPECT().SetCertificate(gomock.Any()).MinTimes(1).MaxTimes(1)
dbCurrent.EXPECT().SetCertificateKey(gomock.Any()).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().ApplySecret(gomock.Any()).MinTimes(1).MaxTimes(1)
query, _, _, _, _, _, err := Adapter(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features, "")(monitor, desired, &tree.Tree{})
assert.NoError(t, err)
ensure, err := query(k8sClient, queried)
assert.NoError(t, err)
assert.NotNil(t, ensure)
assert.NoError(t, ensure(k8sClient))
}
func TestManaged_Adapt2(t *testing.T) {
monitor := mntr.Monitor{}
namespace := "testNs"
timestamp := "testTs"
nodeLabels := map[string]string{
"app.kubernetes.io/component": "database2",
"app.kubernetes.io/managed-by": "testOp2",
"app.kubernetes.io/name": "cockroachdb.node",
"app.kubernetes.io/part-of": "testProd2",
"orbos.ch/selectable": "yes",
}
cockroachLabels := map[string]string{
"app.kubernetes.io/component": "database2",
"app.kubernetes.io/managed-by": "testOp2",
"app.kubernetes.io/name": "cockroachdb-budget",
"app.kubernetes.io/part-of": "testProd2",
"app.kubernetes.io/version": "testVersion2",
"caos.ch/apiversion": "v1",
"caos.ch/kind": "testKind2",
}
cockroachSelectorLabels := map[string]string{
"app.kubernetes.io/component": "database2",
"app.kubernetes.io/managed-by": "testOp2",
"app.kubernetes.io/name": "cockroachdb",
"app.kubernetes.io/part-of": "testProd2",
"orbos.ch/selectable": "yes",
}
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd2", "testOp2", "testVersion2"), "testKind2", "v1"), "database2")
nodeselector := map[string]string{"test2": "test2"}
var tolerations []corev1.Toleration
features := []string{"database"}
masterkey := "testMk2"
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
dbCurrent := coremock.NewMockDatabaseCurrent(gomock.NewController(t))
queried := map[string]interface{}{}
version := "test"
desired := getDesiredTree(t, masterkey, &DesiredV0{
Common: tree.NewCommon("databases.caos.ch/CockroachDB", "v0", false),
Spec: Spec{
Verbose: false,
ReplicaCount: 1,
StorageCapacity: "368Gi",
StorageClass: "testSC",
NodeSelector: map[string]string{},
ClusterDns: "testDns",
},
})
unav := intstr.FromInt(1)
k8sClient.EXPECT().ApplyPodDisruptionBudget(&policy.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: "cockroachdb-budget",
Namespace: namespace,
Labels: cockroachLabels,
},
Spec: policy.PodDisruptionBudgetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: cockroachSelectorLabels,
},
MaxUnavailable: &unav,
},
})
secretList := &corev1.SecretList{
Items: []corev1.Secret{},
}
k8sClient.EXPECT().ApplyService(gomock.Any()).Times(3)
k8sClient.EXPECT().ApplyServiceAccount(gomock.Any()).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().ApplyRole(gomock.Any()).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().ApplyClusterRole(gomock.Any()).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().ApplyRoleBinding(gomock.Any()).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().ApplyClusterRoleBinding(gomock.Any()).MinTimes(1).MaxTimes(1)
//statefulset
k8sClient.EXPECT().ApplyStatefulSet(gomock.Any(), gomock.Any()).MinTimes(1).MaxTimes(1)
//running for setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, false, 60*time.Second).MinTimes(1).MaxTimes(1)
//not ready for setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 1*time.Second).MinTimes(1).MaxTimes(1)
//ready after setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second).MinTimes(1).MaxTimes(1)
//client
k8sClient.EXPECT().ListSecrets(namespace, nodeLabels).MinTimes(1).MaxTimes(1).Return(secretList, nil)
dbCurrent.EXPECT().GetCertificate().MinTimes(1).MaxTimes(1).Return(nil)
dbCurrent.EXPECT().GetCertificateKey().MinTimes(1).MaxTimes(1).Return(nil)
k8sClient.EXPECT().ApplySecret(gomock.Any()).MinTimes(1).MaxTimes(1)
//node
k8sClient.EXPECT().ListSecrets(namespace, nodeLabels).MinTimes(1).MaxTimes(1).Return(secretList, nil)
dbCurrent.EXPECT().GetCertificate().MinTimes(1).MaxTimes(1).Return(nil)
dbCurrent.EXPECT().GetCertificateKey().MinTimes(1).MaxTimes(1).Return(nil)
dbCurrent.EXPECT().SetCertificate(gomock.Any()).MinTimes(1).MaxTimes(1)
dbCurrent.EXPECT().SetCertificateKey(gomock.Any()).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().ApplySecret(gomock.Any()).MinTimes(1).MaxTimes(1)
query, _, _, _, _, _, err := Adapter(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features, "")(monitor, desired, &tree.Tree{})
assert.NoError(t, err)
ensure, err := query(k8sClient, queried)
assert.NoError(t, err)
assert.NotNil(t, ensure)
assert.NoError(t, ensure(k8sClient))
}

View File

@@ -1,91 +0,0 @@
package certificate
import (
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/zitadel/operator"
"github.com/caos/zitadel/operator/database/kinds/databases/managed/certificate/client"
"github.com/caos/zitadel/operator/database/kinds/databases/managed/certificate/node"
)
var (
nodeSecret = "cockroachdb.node"
)
func AdaptFunc(
monitor mntr.Monitor,
namespace string,
componentLabels *labels.Component,
clusterDns string,
generateNodeIfNotExists bool,
) (
operator.QueryFunc,
operator.DestroyFunc,
func(user string) (operator.QueryFunc, error),
func(user string) (operator.DestroyFunc, error),
func(k8sClient kubernetes.ClientInt) ([]string, error),
error,
) {
cMonitor := monitor.WithField("type", "certificates")
queryNode, destroyNode, err := node.AdaptFunc(
cMonitor,
namespace,
labels.MustForName(componentLabels, nodeSecret),
clusterDns,
generateNodeIfNotExists,
)
if err != nil {
return nil, nil, nil, nil, nil, err
}
queriers := []operator.QueryFunc{
queryNode,
}
destroyers := []operator.DestroyFunc{
destroyNode,
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
return operator.QueriersToEnsureFunc(cMonitor, false, queriers, k8sClient, queried)
},
operator.DestroyersToDestroyFunc(cMonitor, destroyers),
func(user string) (operator.QueryFunc, error) {
query, _, err := client.AdaptFunc(
cMonitor,
namespace,
componentLabels,
)
if err != nil {
return nil, err
}
queryClient := query(user)
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
_, err := queryNode(k8sClient, queried)
if err != nil {
return nil, err
}
return queryClient(k8sClient, queried)
}, nil
},
func(user string) (operator.DestroyFunc, error) {
_, destroy, err := client.AdaptFunc(
cMonitor,
namespace,
componentLabels,
)
if err != nil {
return nil, err
}
return destroy(user), nil
},
func(k8sClient kubernetes.ClientInt) ([]string, error) {
return client.QueryCertificates(namespace, labels.DeriveComponentSelector(componentLabels, false), k8sClient)
},
nil
}

View File

@@ -1,305 +0,0 @@
package certificate
import (
"github.com/caos/orbos/mntr"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/database/kinds/databases/core"
coremock "github.com/caos/zitadel/operator/database/kinds/databases/core/mock"
"github.com/caos/zitadel/operator/database/kinds/databases/managed/certificate/pem"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"testing"
)
const (
caPem = `-----BEGIN CERTIFICATE-----
MIIE9TCCAt2gAwIBAgICB+MwDQYJKoZIhvcNAQELBQAwKzESMBAGA1UEChMJQ29j
a3JvYWNoMRUwEwYDVQQDEwxDb2Nrcm9hY2ggQ0EwHhcNMjAxMjAxMTAyMjA0WhcN
MzAxMjAxMTAyMjA0WjArMRIwEAYDVQQKEwlDb2Nrcm9hY2gxFTATBgNVBAMTDENv
Y2tyb2FjaCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAOja5IXJ
GUY9sFgyvWkav+O74gzcv8y69uJzSOx0piOP+sfpZWVeGEjqO4JgdcS5NPMrT5Tb
aJ52CjiOdlHVTyR87i5JmOIvA2qA4dWQmSbX7AQ8r9ptYDe9xMn+qFegcbr4YxCz
K9mmbDZUhlLO7cz3QV6nvRxGFWbzffo8BXZnOUCAOyOHrbnPpLumnfZlL5BckdtY
pS7jAlUpKSMBTK4AHcmrouFsNKHqlUopYXeJFdg9g1F0DuCnVP9x7+XcUW8dAVut
Q7Jswy+++GAXs6mPVsYFLXUSYNyW+Bfl/jKwx0XTQx/6iyNpK0XtAzjBZFjXwmot
0mODkqnfE3BB4lXxZ5knomAQEGSScUhCUb9upbF4uJJF27xr/kIkwtWxMGpCXds0
IxI+wNRCenhfFZEIQCzri0zn6WdN8b/gbv1BErNcccYwolPUv1oUgYbzowbQ5O2D
aLQPqO1VAZiZHLxb787bRywpCl33VZ1ptMHi2ogKjcsh4DQ9SsRj+rU/Tk5lyk7G
FHteyHcq12TGpz9/CQYMacl8yeRRHfNO3Rq0jFTYeD4+ZdVBPKeuTHyXGzy2T157
pgqMFzwqxlNYPzpuz7xsZBExJzCtcomB8fMlsCJnxV/kuMTTsWsPrRc7hsqzBCq3
plfYT8a7EBCVmJmDu+6mMVh+A9M2zZCqtV57AgMBAAGjIzAhMA4GA1UdDwEB/wQE
AwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQDRmkBYDk/F
lYTgMaL7yhRAowDR8mtOl06Oba/MCDDsmBvfOVIqY9Sa9sJtb78Uvecv9WAE4qpb
PNSaIvNmujmGQrBxtGwQdeHZvtXQ1lKAGBM+ukfz/NFU+6rwOdTuLgS5jTaiM4r1
uMoIG6S5z/brWGHAw8Qv2Fq05dSLnevmPWK903V/RnwmCKT9as5Qpfkaie2PNory
euxVGeGolxzgbSps3ZJlDSSymQBl10iJYYdGIsgcw5FHcCdpS4AutdpQbIFuCGk2
CHTcTTa4KMaVfRm/gKm1gh8UnuVDLBKQS8+1CHFYUnih8ozBNUyBo5r5L4BHJK/n
f0gnrYqaxtqPAyHkfo0PRc50HlAQRS/gW8ESv2PQmcSWlDggEzXt5MqFIYOfEXWE
gtC7Ct1P7gzIRxolYVsNSgBR62sJM1PUa39E5v0QJsmuM51txHuw/PGqkkBlEwx9
4u3FFXD9Pslg/g1l50Ek5O3/TWMta4Q+NagDDaOdkb4LRQr522ySgGIinBw00Xr+
Ics2L42rP1LQyaE7+BljDEwmGaT7mrl8QGZR9uv/9mbtk2+q7wAOQzcMSFFACfwx
KdKENmK0GpQZ6HaeDZO7wxGiAyFKhTNpd39XNOEhaKpWLSUxnZTLGlMIzvSyMGCj
PV3xm+Xg1Xvki7f/qC1djysMcaokoJzNdg==
-----END CERTIFICATE-----
`
caPrivPem = `-----BEGIN RSA PRIVATE KEY-----
MIIJKAIBAAKCAgEA2Owc3IOljq3hkKO9q6jT1kFZp/+Y9dv/lTIbsMhV7gPrn2WS
Os2KQphcrB9DvGoZAxUh1aD8MUO7USIMhFodQEq7vfycDT16jrTnc1fSDDcfk3Ra
DvqZGcBkj0lc6w5LK3FNl2y6rA/26teGbyNaVXJfW01dKKy4E2or2+1+tUamyjiU
cvPzNWEoPPJT5HcadLTxZr/SKDvDXFyej4nKT9+j1pKmaqQrIrL4KXKq75LhU+kH
L4TG+kJ35isiv5OTpd2jG6ssz0i+ZEtX4hjIM6eCnZaiFT//33nSL3zl2LUjmyou
ks2FDuX9m90mg8UtcrpA/eVlwyg8nJ/d7/Yn3ZjuHVOgzE8YuQXauJ16HuLcFgEW
WQg1uwhOhrc5b0YJndZbJ2Re4qfaBoC5fODRXoUPvqG9k9kNp98xtclNCIyW8EpA
Su8QmK42ksOJox+OQamHzatrGppgIK77TY3ZFcBHETHClabgsfjv/1GNXXAexJI4
Cjntnou+yoc+LUj87WJfD3ERGgm0nDfnE7uZ7kccO5Lj/oajeO+Q+QJaLZ4I2jz+
a05k16naGGT29AnM+iwBqqoTODr4Z7905niZc6+fEOPml1V7wSuJs2eE4jOa3ixX
5tnruw74rN82Zfrkg6kWPOEfBBXzSotRiHv+BAV2tFbnC55ItnHn1ZE68V0CAwEA
AQKCAgEAssWsZ4PLXpIo8qYve5hQtSP4er7oVb8wnMnGDmSchOMQPbZc1D9uscGV
pnjBvzcFVAgHcWMSVJuIda4E+NK3hrPQlBvqk/LV3WRz1xhKUKzhRgm+6tdWc+We
OoRwonuOMchX9PKzyXgCu7pR3agaG499zOYuX4Yw0jdO3BqXsVf/v2rv1Oj9yEFB
AzGHOCN8VzCEPnTaAzR1pdnjB1K8vCUIhp8nrX2M2zT51lbdT0ISl6/VrzDTN46t
97AXHCHIrgrCENx6un4uAsQhMoHQBNoJiEyLWc371zYzpdVeK8HlDUyvQ2dDQGsF
Hn4c7r4C3aloRJbYzgSMJ1yNcOTCJpciQsq1VmCQFOHfbum2ejquXJ7BbeRGeHdM
145epckL+hbECTCpSs0Z5t90NdfoJspvr+3sOEt6h3DMUGjvobrf/s3KiRY5hHdc
x86Htx3QgWNCG6a+ga7h7s3t+4ZtoPPWn+vlAoxD/2eCzsDgE/XLDmC2T4yS8UOb
LIb4UN7wl2sNM8zhm4BfoiKfjGu4dPZIlsPP+ZKRby1O4ogHHoPREBTH1VSEplVM
fA/KSITV+rUfO3T/qXIFZ4/Wa5YZoULiMCOJOWNgXQzWvTf7Qr31LhhfXd+uIw30
LDtjdkpT43zlKxRosQFLiV7q3fVbvKPVQfxzBz7M1Gl74IllpmECggEBAOnAimKl
w/9mcMkEd2L2ZzDI+57W8DU6grUUILZfjJG+uvOiMmIA5M1wzsVHil3ihZthrJ5A
UUAKG97JgwjpIAliAzgtPZY2AK/bJz7Hht1EwewrV+RyHM+dMv6em2OA+XUjFSPX
VsecFDaUQebkypw236KdA4jgRG6hr4xObdXgTN6RFCv1mI1EijZ/YbKgPgTJaPI4
b2N5QokYFygUCwRxKIt7/Z4hQs9LbdW680NcXtPRPnS1SmwYJbi7wTX+o5f6nfNV
YvojborjXwNrZe0p+FfaEuD44wf6kNlRGfcKXoaAncXV/M5/oXf1dktKP630eq9g
0MAKFYJ6MAUheakCggEBAO2Rgfy/tEydBuP9BIfC84BXk8EwnRJ0ZfmXv77HFj3F
VT5QNU97I5gZJOFkL7OO8bL0/WljlqeBEht0DmHNmOt5Pyew4LRLpfeN6rClZgRN
V4wqKXjoZAIYa9khQcwwFNER1RdI+PkuusJtrvY6CbwwG9LbBq2NR4C1YSgaQnhV
NqdXK5dwrYEky6lI31sDD4BYeiJVKlkkNCQAVOC+04Mrsa9F0NG7TKXzji5hU8l5
x8squjvJ6vmobhmsRTL1LMpafUrt5pHL9jcWIZYxJJo9mB0zxJKcsLI8IOg2QPoj
tQ395FZ2YtjNzZa1CYeUOUiaQu+uvztfE36AdW/vUpUCggEAMV7bW66LURw/4hUx
ahOFBAbPLmNTZMqw5LIVnq9br0TLk73ESnLJ4KJc6coMbXv0oDbnEJ2hC5eW/10s
cetbOuAasfjMMzfAuWPeTCI0V/O3ybv12mhHsYoQRTsWstOA3L7GLkXDLHHIyyZR
LQVRzeDBJ0Vmg7hqe7tmqom+JRg05CVcT1SWHfBGCPCqn+G8d6Jaqh5FWIs6BF60
NWDWWt/TonJTxNxdkg7qaeQMkUOnO7HMMTZBO8d14Ci3zEG2J9llFwoH17E4Hdmc
Lcq3QnpE27lRl3a57Ot9QIkipMzp3hq4OBrURIEsh3uuuoQ6IvGqH/Sg4o6+sEpC
bjL90QKCAQBDc/0kdooK9sruEPkoUwIwfq1FPThb9RC/PYcD9CMshssdVkjMuHny
xbDjDj89DGk0FrudINm11b/+a4Vp36Z7tYFpE5+5kYEeOP1aCpxcvFkPQyljWxiK
P8TfccHs5/oBIr8OTXnjxpDgg6QZ5YC+HirIQ8gxntuef+GGMW6OHCPYf7ew2B1r
fbcV6csBXG0aVATZmrTbepwTXMS8y3Hi3JUm3vvbkQLCW9US9i+EFT/VP9yA/WPq
Xxhj0bYUMej1y5unmsTMwMy392Cx9GIgKTz3jatStYq2ELyHMmBgpaLSxjP/GL4Y
MNce42hBRqS9KI+43jUN9oDiejbeAWXBAoIBACZKnS6EppNDSdos0f32J5TxEUxv
lF9AuVXEAPDR/m3+PlSRzrlf7uowdHTfsemHMSduL8weLNhduprwz4TmW9Fo6fSF
UePLNbXcMX3omAk+AKKOiexLG0fCXGW2zr4nHZJzTbK2+La3yLeUcoPu1puNpLiq
LVj2bH3zKWVRA9/ovuN6V5w18ojjdqOw4bw5qXcdZhoWLxI8Q9Oqua24f/dnRpuI
I8mRtPQ3+vuOKbTT+/80eAUpSfEKwAg1Mjgury9q1/B4Ib6hAGzpJuXxG7xQjnsJ
EFcN1kvdg5WGK41+fYMdexPaLamjhDGN0e1vxJfAukWIAsBMwp8wfEWZvzA=
-----END RSA PRIVATE KEY-----
`
)
func TestCertificate_AdaptWithCA(t *testing.T) {
monitor := mntr.Monitor{}
namespace := "testNs"
clusterDns := "testDns"
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "cockroachdb", "v0"), "cockroachdb")
nodeLabels := map[string]string{
"app.kubernetes.io/component": "cockroachdb",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": "cockroachdb.node",
"app.kubernetes.io/part-of": "testProd",
"orbos.ch/selectable": "yes",
}
dbCurrent := coremock.NewMockDatabaseCurrent(gomock.NewController(t))
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
secretList := &corev1.SecretList{
Items: []corev1.Secret{},
}
ca, err := pem.DecodeCertificate([]byte(caPem))
assert.NoError(t, err)
caPriv, err := pem.DecodeKey([]byte(caPrivPem))
assert.NoError(t, err)
k8sClient.EXPECT().ListSecrets(namespace, nodeLabels).Times(1).Return(secretList, nil)
dbCurrent.EXPECT().GetCertificate().Times(1).Return(ca)
dbCurrent.EXPECT().GetCertificateKey().Times(1).Return(caPriv)
dbCurrent.EXPECT().SetCertificate(ca).Times(1)
dbCurrent.EXPECT().SetCertificateKey(caPriv).Times(1)
queried := map[string]interface{}{}
current := &tree.Tree{
Parsed: dbCurrent,
}
core.SetQueriedForDatabase(queried, current)
query, _, _, _, _, err := AdaptFunc(monitor, namespace, componentLabels, clusterDns, true)
assert.NoError(t, err)
ensure, err := query(k8sClient, queried)
assert.NoError(t, err)
assert.NotNil(t, ensure)
assert.NoError(t, ensure(k8sClient))
}
func TestNode_AdaptWithoutCA(t *testing.T) {
monitor := mntr.Monitor{}
namespace := "testNs"
clusterDns := "testDns"
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "cockroachdb", "v0"), "cockroachdb")
nodeLabels := map[string]string{
"app.kubernetes.io/component": "cockroachdb",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": "cockroachdb.node",
"app.kubernetes.io/part-of": "testProd",
"orbos.ch/selectable": "yes",
}
dbCurrent := coremock.NewMockDatabaseCurrent(gomock.NewController(t))
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
secretList := &corev1.SecretList{
Items: []corev1.Secret{},
}
k8sClient.EXPECT().ListSecrets(namespace, nodeLabels).Times(1).Return(secretList, nil)
dbCurrent.EXPECT().GetCertificate().Times(1).Return(nil)
dbCurrent.EXPECT().GetCertificateKey().Times(1).Return(nil)
dbCurrent.EXPECT().SetCertificate(gomock.Any()).Times(1)
dbCurrent.EXPECT().SetCertificateKey(gomock.Any()).Times(1)
k8sClient.EXPECT().ApplySecret(gomock.Any()).Times(1).Return(nil)
queried := map[string]interface{}{}
current := &tree.Tree{
Parsed: dbCurrent,
}
core.SetQueriedForDatabase(queried, current)
query, _, _, _, _, err := AdaptFunc(monitor, namespace, componentLabels, clusterDns, true)
assert.NoError(t, err)
ensure, err := query(k8sClient, queried)
assert.NoError(t, err)
assert.NotNil(t, ensure)
assert.NoError(t, ensure(k8sClient))
}
func TestNode_AdaptAlreadyExisting(t *testing.T) {
monitor := mntr.Monitor{}
namespace := "testNs"
clusterDns := "testDns"
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "cockroachdb", "v0"), "cockroachdb")
nodeLabels := map[string]string{
"app.kubernetes.io/component": "cockroachdb",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": "cockroachdb.node",
"app.kubernetes.io/part-of": "testProd",
"orbos.ch/selectable": "yes",
}
dbCurrent := coremock.NewMockDatabaseCurrent(gomock.NewController(t))
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
caCertKey := "ca.crt"
caPrivKeyKey := "ca.key"
secretList := &corev1.SecretList{
Items: []corev1.Secret{{
ObjectMeta: metav1.ObjectMeta{},
Data: map[string][]byte{
caCertKey: []byte(caPem),
caPrivKeyKey: []byte(caPrivPem),
},
Type: "Opaque",
}},
}
k8sClient.EXPECT().ListSecrets(namespace, nodeLabels).Times(1).Return(secretList, nil)
dbCurrent.EXPECT().SetCertificate(gomock.Any()).Times(1)
dbCurrent.EXPECT().SetCertificateKey(gomock.Any()).Times(1)
queried := map[string]interface{}{}
current := &tree.Tree{
Parsed: dbCurrent,
}
core.SetQueriedForDatabase(queried, current)
query, _, _, _, _, err := AdaptFunc(monitor, namespace, componentLabels, clusterDns, true)
assert.NoError(t, err)
ensure, err := query(k8sClient, queried)
assert.NoError(t, err)
assert.NotNil(t, ensure)
assert.NoError(t, ensure(k8sClient))
}
func TestNode_AdaptCreateUser(t *testing.T) {
monitor := mntr.Monitor{}
clusterDns := "testDns"
namespace := "testNs"
user := "test"
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "cockroachdb", "v0"), "cockroachdb")
nodeLabels := map[string]string{
"app.kubernetes.io/component": "cockroachdb",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": "cockroachdb.node",
"app.kubernetes.io/part-of": "testProd",
"orbos.ch/selectable": "yes",
}
dbCurrent := coremock.NewMockDatabaseCurrent(gomock.NewController(t))
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
ca, err := pem.DecodeCertificate([]byte(caPem))
assert.NoError(t, err)
caPriv, err := pem.DecodeKey([]byte(caPrivPem))
assert.NoError(t, err)
caCertKey := "ca.crt"
caPrivKeyKey := "ca.key"
secretList := &corev1.SecretList{
Items: []corev1.Secret{{
ObjectMeta: metav1.ObjectMeta{},
Data: map[string][]byte{
caCertKey: []byte(caPem),
caPrivKeyKey: []byte(caPrivPem),
},
Type: "Opaque",
}},
}
k8sClient.EXPECT().ListSecrets(namespace, nodeLabels).Times(1).Return(secretList, nil)
dbCurrent.EXPECT().GetCertificate().Times(1).Return(ca)
dbCurrent.EXPECT().GetCertificateKey().Times(1).Return(caPriv)
dbCurrent.EXPECT().SetCertificate(gomock.Any()).Times(1)
dbCurrent.EXPECT().SetCertificateKey(gomock.Any()).Times(1)
k8sClient.EXPECT().ApplySecret(gomock.Any())
queried := map[string]interface{}{}
current := &tree.Tree{
Parsed: dbCurrent,
}
core.SetQueriedForDatabase(queried, current)
_, _, createUser, _, _, err := AdaptFunc(monitor, namespace, componentLabels, clusterDns, true)
assert.NoError(t, err)
query, err := createUser(user)
assert.NoError(t, err)
ensure, err := query(k8sClient, queried)
assert.NoError(t, err)
assert.NotNil(t, ensure)
assert.NoError(t, ensure(k8sClient))
}

Some files were not shown because too many files have changed in this diff Show More