fix: operator picks (#1463)

* feat(crd): add crd mode for operators (#1329)

* feat(operator): add base for zitadel operator

* fix(operator): changed pipeline to release operator

* fix(operator): fmt with only one parameter

* fix(operator): corrected workflow job name

* fix(zitadelctl): added restore and backuplist command

* fix(zitadelctl): scale for restore

* chore(container): use scratch for deploy container

* fix(zitadelctl): limit image to scratch

* fix(migration): added migration scripts for newer version

* fix(operator): changed handling of kubeconfig in operator logic

* fix(operator): changed handling of secrets in operator logic

* fix(operator): use new version of zitadel

* fix(operator): added path for migrations

* fix(operator): delete doublets of migration scripts

* fix(operator): delete subpaths and integrate logic into init container

* fix(operator): corrected path in dockerfile for local migrations

* fix(operator): added migrations for cockroachdb-secure

* fix(operator): delete logic for ambassador module

* fix(operator): added read and write secret commands

* fix(operator): correct and align operator pipeline with zitadel pipeline

* fix(operator): correct yaml error in operator pipeline

* fix(operator): correct action name in operator pipeline

* fix(operator): correct case-sensitive filename in operator pipeline

* fix(operator): upload artifacts from buildx output

* fix(operator): corrected attribute spelling error

* fix(operator): combined jobs for operator binary and image

* fix(operator): added missing comma in operator pipeline

* fix(operator): added codecov for operator image

* fix(operator): added codecov for operator image

* fix(testing): code changes for testing and several unit-tests (#1009)

* fix(operator): usage of interface of kubernetes client for testing and several unit-tests

* fix(operator): several unit-tests

* fix(operator): several unit-tests

* fix(operator): changed order for the operator logic

* fix(operator): added version of zitadelctl from semantic release

* fix(operator): corrected function call with version of zitadelctl

* fix(operator): corrected function call with version of zitadelctl

* fix(operator): add check output to operator release pipeline

* fix(operator): set --short length everywhere to 12

* fix(operator): zitadel setup in job instead of exec with several unit tests

* fix(operator): fixes to combine newest zitadel and testing branch

* fix(operator): corrected path in Dockerfile

* fix(operator): fixed unit-test that was ignored during changes

* fix(operator): fixed unit-test that was ignored during changes

* fix(operator): corrected Dockerfile to correctly use env variable

* fix(operator): quickfix takeoff deployment

* fix(operator): corrected the clusterrolename in the applied artifacts

* fix: update secure migrations

* fix(operator): migrations (#1057)

* fix(operator): copied migrations from orbos repository

* fix(operator): newest migrations

* chore: use cockroach-secure

* fix: rename migration

* fix: remove insecure cockroach migrations

Co-authored-by: Stefan Benz <stefan@caos.ch>

* fix: finalize labels

* fix(operator): cli logging concurrent and fixe deployment of operator during restore

* fix: finalize labels and cli commands

* fix: restore

* chore: cockroachdb is always secure

* chore: use orbos consistent-labels latest commit

* test: make tests compatible with new labels

* fix: default to sa token for start command

* fix: use cockroachdb v12.02

* fix: don't delete flyway user

* test: fix migration test

* fix: use correct table qualifiers

* fix: don't alter sequence ownership

* fix: upgrade flyway

* fix: change ownership of all dbs and tables to admin user

* fix: change defaultdb user

* fix: treat clientid status codes >= 400 as errors

* fix: reconcile specified ZITADEL version, not binary version

* fix: add ca-certs

* fix: use latest orbos code

* fix: use orbos with fixed race condition

* fix: use latest ORBOS code

* fix: use latest ORBOS code

* fix: make migration and scaling around restoring work

* fix(operator): move zitadel operator

* chore(migrations): include owner change migration

* feat(db): add code base for database operator

* fix(db): change used image registry for database operator

* fix(db): generated mock

* fix(db): add accidentally ignored file

* fix(db): add cockroachdb backup image to pipeline

* fix(db): correct pipeline and image versions

* fix(db): correct version of used orbos

* fix(db): correct database import

* fix(db): go mod tidy

* fix(db): use new version for orbos

* fix(migrations): include migrations into zitadelctl binary (#1211)

* fix(db): use statik to integrate migrations into binary

* fix(migrations): corrections unit tests and pipeline for integrated migrations into zitadelctl binary

* fix(migrations): correction in dockerfile for pipeline build

* fix(migrations): correction in dockerfile for pipeline build

* fix(migrations):  dockerfile changes for cache optimization

* fix(database): correct used part-of label in database operator

* fix(database): correct used selectable label in zitadel operator

* fix(operator): correct lables for user secrets in zitadel operator

* fix(operator): correct lables for service test in zitadel operator

* fix: don't enable database features for user operations (#1227)

* fix: don't enable database features for user operations

* fix: omit database feature for connection info adapter

* fix: use latest orbos version

* fix(crd): corrected logic to get database connection and other info

* fix(crd): corrected yaml tags and start for zitadel operator

* fix(crd): move some dependencies and use consistent structure

* fix(crd): corrected unit-tests

* fix(crd): corrected main files for debug starts

* chore(pipeline): use correct version for zitadelctl build

* fix(crd): correct calculating of current db state for zitadel operator

* fix(crd): use binary version for deployment of crd mode operators

* fix(crd): add gitops attribute for reconciling

* fix(crd): corrected crd with newest version

* fix(migration): collect cleanup functions and only use them if all jobs are successful

* fix(zitadelctl): import gcp auth to connect to gke cluster

* feat: Add read and writesecret options for crd mode (#1435)

* fix: don't require orbconfig for crd mode

* test: pass

* fix(zitadelctl): import gcp auth to connect to gke cluster

* feat: add read and writesecret option for crd mode

* test: fix

* fix: make all crd secrets writable

* fix: use in-cluster configs for in-cluster operators

* chore: remove unnecessary debug files

Co-authored-by: Stefan Benz <stefan@caos.ch>

* fix: Crdoperatormerge review (#1385)

* fix: don't require orbconfig for crd mode

* test: pass

* fix(zitadelctl): import gcp auth to connect to gke cluster

* fix: ensure caos-system namespace

* fix: apply orbconfig at takeoff

* docs: improve help for creating an orbconfig

* docs: describe orbconfig properties

* docs: add --gitops to help message example

* fix(pipeline): correct upload of artifacts in dev releases

* test: pass

Co-authored-by: Stefan Benz <stefan@caos.ch>

* fix(test): corrected falsely merged tests

* chore: update orbos library

* fix: only handle exactly named and namespaced crd resource

* fix: print errors, check correct crd namespace

* fix: validate bucket secret

* chore: compile

* fix(operator): corrected secret handling when unused secrets are not defined

* fix(operator): corrected handling of jobs

* fix: dont print logs when readsecret path is provided

* fix(operator): corrected handling of jobs and sort for mounted volumes

* fix(operator): sort for volumes

* fix(operator): change orboos import to newest release

Co-authored-by: Florian Forster <florian@caos.ch>
Co-authored-by: Elio Bischof <eliobischof@gmail.com>

(cherry picked from commit fa9bd5a8e7)

* fix(operator): Standard timeout handling (#1458)

* fix: always use standard time.Duration

* fix: give backup and restore more time

* fix: give backup and restore jobs more time

(cherry picked from commit 7468b7d1e8)

* fix go mod

Co-authored-by: Stefan Benz <46600784+stebenz@users.noreply.github.com>
Co-authored-by: Elio Bischof <eliobischof@gmail.com>
This commit is contained in:
Livio Amstutz 2021-03-24 10:31:19 +01:00 committed by GitHub
parent e7c11cb9e2
commit bacfc3b099
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
105 changed files with 3431 additions and 1324 deletions

View File

@ -265,19 +265,18 @@ jobs:
if: steps.semantic.outputs.new_release_published != 'true' && needs.refs.outputs.short_ref != 'master' && needs.refs.outputs.short_ref != ''
- name: Dev-Release
id: create_release
uses: softprops/action-gh-release@v1
uses: ncipollo/release-action@v1.8.1
if: steps.semantic.outputs.new_release_published != 'true' && needs.refs.outputs.short_ref != 'master' && needs.refs.outputs.short_ref != ''
env:
GITHUB_TOKEN: ${{ env.GITHUB_TOKEN }}
with:
tag_name: ${{ needs.refs.outputs.short_ref }}-dev
name: Branch ${{ needs.refs.outputs.short_ref }}
draft: false
prerelease: true
artifacts: "${{ env.ARTIFACTS_FOLDER }}/zitadelctl-darwin-amd64/zitadelctl-darwin-amd64,${{ env.ARTIFACTS_FOLDER }}/zitadelctl-linux-amd64/zitadelctl-linux-amd64,${{ env.ARTIFACTS_FOLDER }}/zitadelctl-windows-amd64/zitadelctl-windows-amd64.exe"
body: |
This is a release from a development branch.
Do not use these artifacts in production.
files: |
${{ env.ARTIFACTS_FOLDER }}/zitadelctl-darwin-amd64/zitadelctl-darwin-amd64
${{ env.ARTIFACTS_FOLDER }}/zitadelctl-linux-amd64/zitadelctl-linux-amd64
${{ env.ARTIFACTS_FOLDER }}/zitadelctl-windows-amd64/zitadelctl-windows-amd64.exe
tag: ${{ needs.refs.outputs.short_ref }}-dev
commit: ${{ needs.refs.outputs.short_ref }}
name: Branch ${{ needs.refs.outputs.short_ref }}
token: ${{ env.GITHUB_TOKEN }}
replacesArtifacts: true
prerelease: true
draft: false
allowUpdates: true

View File

@ -1,50 +0,0 @@
package main
import (
"flag"
"io/ioutil"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/zitadel/operator/start"
"github.com/caos/orbos/mntr"
"github.com/caos/zitadel/operator/helpers"
)
func main() {
orbconfig := flag.String("orbconfig", "~/.orb/config", "The orbconfig file to use")
kubeconfig := flag.String("kubeconfig", "~/.kube/config", "The kubeconfig file to use")
verbose := flag.Bool("verbose", false, "Print debug levelled logs")
flag.Parse()
monitor := mntr.Monitor{
OnInfo: mntr.LogMessage,
OnChange: mntr.LogMessage,
OnError: mntr.LogError,
}
if *verbose {
monitor = monitor.Verbose()
}
kc, err := ioutil.ReadFile(helpers.PruneHome(*kubeconfig))
if err != nil {
panic(err)
}
if err := start.Database(
monitor,
helpers.PruneHome(*orbconfig),
kubernetes.NewK8sClient(monitor, strPtr(string(kc))),
strPtr("database-development"),
); err != nil {
panic(err)
}
}
func strPtr(str string) *string {
return &str
}

View File

@ -1,47 +0,0 @@
package main
import (
"flag"
"io/ioutil"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/zitadel/operator/helpers"
"github.com/caos/zitadel/operator/start"
)
func main() {
orbconfig := flag.String("orbconfig", "~/.orb/config", "The orbconfig file to use")
kubeconfig := flag.String("kubeconfig", "~/.kube/config", "The kubeconfig file to use")
verbose := flag.Bool("verbose", false, "Print debug levelled logs")
flag.Parse()
monitor := mntr.Monitor{
OnInfo: mntr.LogMessage,
OnChange: mntr.LogMessage,
OnError: mntr.LogError,
}
if *verbose {
monitor = monitor.Verbose()
}
kc, err := ioutil.ReadFile(helpers.PruneHome(*kubeconfig))
if err != nil {
panic(err)
}
if err := start.Operator(
monitor,
helpers.PruneHome(*orbconfig),
kubernetes.NewK8sClient(monitor, strPtr(string(kc))),
strPtr("local-debugging"),
); err != nil {
panic(err)
}
}
func strPtr(str string) *string {
return &str
}

View File

@ -1,14 +1,15 @@
package cmds
import (
"io/ioutil"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/zitadel/operator/api"
"github.com/caos/zitadel/operator/start"
"github.com/caos/zitadel/operator/crtlgitops"
"github.com/spf13/cobra"
"io/ioutil"
)
func BackupCommand(rv RootValues) *cobra.Command {
func BackupCommand(getRv GetRootValues) *cobra.Command {
var (
kubeconfig string
backup string
@ -24,14 +25,19 @@ func BackupCommand(rv RootValues) *cobra.Command {
flags.StringVar(&backup, "backup", "", "Name used for backup folder")
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
_, monitor, orbConfig, gitClient, version, errFunc, err := rv()
rv, err := getRv()
if err != nil {
return err
}
defer func() {
err = errFunc(err)
err = rv.ErrFunc(err)
}()
monitor := rv.Monitor
orbConfig := rv.OrbConfig
gitClient := rv.GitClient
version := rv.Version
if err := gitClient.Configure(orbConfig.URL, []byte(orbConfig.Repokey)); err != nil {
return err
}
@ -55,7 +61,7 @@ func BackupCommand(rv RootValues) *cobra.Command {
k8sClient := kubernetes.NewK8sClient(monitor, &kubeconfigStr)
if k8sClient.Available() {
if err := start.Backup(
if err := crtlgitops.Backup(
monitor,
orbConfig.Path,
k8sClient,

View File

@ -8,7 +8,7 @@ import (
"github.com/spf13/cobra"
)
func BackupListCommand(rv RootValues) *cobra.Command {
func BackupListCommand(getRv GetRootValues) *cobra.Command {
var (
cmd = &cobra.Command{
Use: "backuplist",
@ -18,14 +18,18 @@ func BackupListCommand(rv RootValues) *cobra.Command {
)
cmd.RunE = func(cmd *cobra.Command, args []string) error {
_, monitor, orbConfig, gitClient, _, errFunc, err := rv()
rv, err := getRv()
if err != nil {
return err
}
defer func() {
err = errFunc(err)
err = rv.ErrFunc(err)
}()
monitor := rv.Monitor
orbConfig := rv.OrbConfig
gitClient := rv.GitClient
if err := gitClient.Configure(orbConfig.URL, []byte(orbConfig.Repokey)); err != nil {
monitor.Error(err)
return nil

View File

@ -3,12 +3,15 @@ package cmds
import (
"os"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/kubernetes/cli"
"github.com/caos/zitadel/operator/secrets"
"github.com/caos/orbos/pkg/secret"
"github.com/spf13/cobra"
)
func ReadSecretCommand(rv RootValues) *cobra.Command {
func ReadSecretCommand(getRv GetRootValues) *cobra.Command {
return &cobra.Command{
Use: "readsecret [path]",
Short: "Print a secrets decrypted value to stdout",
@ -16,32 +19,33 @@ func ReadSecretCommand(rv RootValues) *cobra.Command {
Args: cobra.MaximumNArgs(1),
Example: `zitadelctl readsecret zitadel.emailappkey > ~/emailappkey`,
RunE: func(cmd *cobra.Command, args []string) error {
_, monitor, orbConfig, gitClient, _, errFunc, err := rv()
rv, err := getRv()
if err != nil {
return err
}
defer func() {
err = errFunc(err)
err = rv.ErrFunc(err)
}()
if err := gitClient.Configure(orbConfig.URL, []byte(orbConfig.Repokey)); err != nil {
return err
}
if err := gitClient.Clone(); err != nil {
return err
}
monitor := rv.Monitor
orbConfig := rv.OrbConfig
gitClient := rv.GitClient
path := ""
if len(args) > 0 {
path = args[0]
}
k8sClient, _, err := cli.Client(monitor, orbConfig, gitClient, rv.Kubeconfig, rv.Gitops)
if err != nil && !rv.Gitops {
return err
}
value, err := secret.Read(
monitor,
gitClient,
k8sClient,
path,
secrets.GetAllSecretsFunc(orbConfig))
secrets.GetAllSecretsFunc(monitor, path == "", rv.Gitops, gitClient, k8sClient, orbConfig),
)
if err != nil {
monitor.Error(err)
return nil

View File

@ -4,19 +4,20 @@ import (
"errors"
"io/ioutil"
"github.com/caos/zitadel/operator/crtlgitops"
"github.com/caos/zitadel/operator/helpers"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/zitadel/operator/start"
"github.com/caos/zitadel/pkg/databases"
"github.com/manifoldco/promptui"
"github.com/spf13/cobra"
)
func RestoreCommand(rv RootValues) *cobra.Command {
func RestoreCommand(getRv GetRootValues) *cobra.Command {
var (
backup string
kubeconfig string
gitOpsMode bool
cmd = &cobra.Command{
Use: "restore",
Short: "Restore from backup",
@ -27,16 +28,22 @@ func RestoreCommand(rv RootValues) *cobra.Command {
flags := cmd.Flags()
flags.StringVar(&backup, "backup", "", "Backup used for db restore")
flags.StringVar(&kubeconfig, "kubeconfig", "~/.kube/config", "Kubeconfig for ZITADEL operator deployment")
flags.BoolVar(&gitOpsMode, "gitops", false, "defines if the operator should run in gitops mode")
cmd.RunE = func(cmd *cobra.Command, args []string) error {
_, monitor, orbConfig, gitClient, version, errFunc, err := rv()
rv, err := getRv()
if err != nil {
return err
}
defer func() {
err = errFunc(err)
err = rv.ErrFunc(err)
}()
monitor := rv.Monitor
orbConfig := rv.OrbConfig
gitClient := rv.GitClient
version := rv.Version
kubeconfig = helpers.PruneHome(kubeconfig)
if err := gitClient.Configure(orbConfig.URL, []byte(orbConfig.Repokey)); err != nil {
@ -89,7 +96,7 @@ func RestoreCommand(rv RootValues) *cobra.Command {
return nil
}
if err := start.Restore(monitor, gitClient, orbConfig, k8sClient, backup, &version); err != nil {
if err := crtlgitops.Restore(monitor, gitClient, orbConfig, k8sClient, backup, gitOpsMode, &version); err != nil {
monitor.Error(err)
}
return nil

View File

@ -2,6 +2,7 @@ package cmds
import (
"context"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/git"
"github.com/caos/orbos/pkg/orb"
@ -9,64 +10,92 @@ import (
"github.com/spf13/cobra"
)
type RootValues func() (context.Context, mntr.Monitor, *orb.Orb, *git.Client, string, errFunc, error)
type RootValues struct {
Ctx context.Context
Monitor mntr.Monitor
Version string
Gitops bool
OrbConfig *orb.Orb
GitClient *git.Client
Kubeconfig string
ErrFunc errFunc
}
type GetRootValues func() (*RootValues, error)
type errFunc func(err error) error
func RootCommand(version string) (*cobra.Command, RootValues) {
func RootCommand(version string) (*cobra.Command, GetRootValues) {
var (
verbose bool
orbConfigPath string
)
cmd := &cobra.Command{
Use: "zitadelctl [flags]",
Short: "Interact with your IAM orbs",
Long: `zitadelctl launches zitadel and simplifies common tasks such as updating your kubeconfig.
Participate in our community on https://github.com/caos/orbos
and visit our website at https://caos.ch`,
Example: `$ mkdir -p ~/.orb
$ cat > ~/.orb/myorb << EOF
> url: git@github.com:me/my-orb.git
> masterkey: "$(gopass my-secrets/orbs/myorb/masterkey)"
> repokey: |
> $(cat ~/.ssh/myorbrepo | sed s/^/\ \ /g)
> EOF
$ orbctl -f ~/.orb/myorb [command]
`,
}
flags := cmd.PersistentFlags()
flags.StringVarP(&orbConfigPath, "orbconfig", "f", "~/.orb/config", "Path to the file containing the orbs git repo URL, deploy key and the master key for encrypting and decrypting secrets")
flags.BoolVar(&verbose, "verbose", false, "Print debug levelled logs")
return cmd, func() (context.Context, mntr.Monitor, *orb.Orb, *git.Client, string, errFunc, error) {
monitor := mntr.Monitor{
ctx = context.Background()
monitor = mntr.Monitor{
OnInfo: mntr.LogMessage,
OnChange: mntr.LogMessage,
OnError: mntr.LogError,
}
rv = &RootValues{
Ctx: ctx,
Version: version,
ErrFunc: func(err error) error {
if err != nil {
monitor.Error(err)
}
return nil
},
}
orbConfigPath string
verbose bool
)
cmd := &cobra.Command{
Use: "zitadelctl [flags]",
Short: "Interact with your IAM orbs",
Long: `zitadelctl launches zitadel and simplifies common tasks such as deploying operators or reading and writing secrets.
Participate in our community on https://github.com/caos/orbos
and visit our website at https://caos.ch`,
Example: `$ # For being able to use the --gitops flag, you need to create an orbconfig and add an SSH deploy key to your github project
$ # Create an ssh key pair
$ ssh-keygen -b 2048 -t rsa -f ~/.ssh/myorbrepo -q -N ""
$ # Create the orbconfig
$ mkdir -p ~/.orb
$ cat > ~/.orb/myorb << EOF
> # this is the ssh URL to your git repository
> url: git@github.com:me/my-orb.git
> masterkey: "$(openssl rand -base64 21)" # used for encrypting and decrypting secrets
> # the repokey is used to connect to your git repository
> repokey: |
> $(cat ~/.ssh/myorbrepo | sed s/^/\ \ /g)
> EOF
$ zitadelctl --gitops -f ~/.orb/myorb [command]
`,
}
flags := cmd.PersistentFlags()
flags.BoolVar(&rv.Gitops, "gitops", false, "Run orbctl in gitops mode. Not specifying this flag is only supported for BOOM and Networking Operator")
flags.StringVarP(&orbConfigPath, "orbconfig", "f", "~/.orb/config", "Path to the file containing the orbs git repo URL, deploy key and the master key for encrypting and decrypting secrets")
flags.StringVarP(&rv.Kubeconfig, "kubeconfig", "k", "~/.kube/config", "Path to the kubeconfig file to the cluster orbctl should target")
flags.BoolVar(&verbose, "verbose", false, "Print debug levelled logs")
return cmd, func() (*RootValues, error) {
if verbose {
monitor = monitor.Verbose()
}
prunedPath := helpers.PruneHome(orbConfigPath)
orbConfig, err := orb.ParseOrbConfig(prunedPath)
if err != nil {
orbConfig = &orb.Orb{Path: prunedPath}
return nil, mntr.Monitor{}, nil, nil, "", nil, err
rv.Monitor = monitor
rv.Kubeconfig = helpers.PruneHome(rv.Kubeconfig)
rv.GitClient = git.New(ctx, monitor, "orbos", "orbos@caos.ch")
if rv.Gitops {
prunedPath := helpers.PruneHome(orbConfigPath)
orbConfig, err := orb.ParseOrbConfig(prunedPath)
if err != nil {
orbConfig = &orb.Orb{Path: prunedPath}
return nil, err
}
rv.OrbConfig = orbConfig
}
ctx := context.Background()
return ctx, monitor, orbConfig, git.New(ctx, monitor, "orbos", "orbos@caos.ch"), version, func(err error) error {
if err != nil {
monitor.Error(err)
}
return nil
}, nil
return rv, nil
}
}

View File

@ -1,81 +1,95 @@
package cmds
import (
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/zitadel/operator/helpers"
"github.com/caos/zitadel/operator/start"
"github.com/caos/orbos/pkg/kubernetes/cli"
"github.com/caos/zitadel/operator/crtlcrd"
"github.com/caos/zitadel/operator/crtlgitops"
"github.com/spf13/cobra"
)
func StartOperator(rv RootValues) *cobra.Command {
func StartOperator(getRv GetRootValues) *cobra.Command {
var (
kubeconfig string
cmd = &cobra.Command{
metricsAddr string
cmd = &cobra.Command{
Use: "operator",
Short: "Launch a ZITADEL operator",
Long: "Ensures a desired state of ZITADEL",
}
)
flags := cmd.Flags()
flags.StringVar(&kubeconfig, "kubeconfig", "", "Kubeconfig for ZITADEL operator deployment")
flags.StringVar(&metricsAddr, "metrics-addr", "", "The address the metric endpoint binds to.")
cmd.RunE = func(cmd *cobra.Command, args []string) error {
_, monitor, orbConfig, _, version, errFunc, err := rv()
rv, err := getRv()
if err != nil {
return err
}
defer func() {
err = errFunc(err)
err = rv.ErrFunc(err)
}()
kubeconfig = helpers.PruneHome(kubeconfig)
monitor := rv.Monitor
orbConfig := rv.OrbConfig
version := rv.Version
k8sClient, err := kubernetes.NewK8sClientWithPath(monitor, kubeconfig)
if err != nil {
monitor.Error(err)
return nil
}
if rv.Gitops {
k8sClient, _, err := cli.Client(monitor, orbConfig, rv.GitClient, rv.Kubeconfig, rv.Gitops)
if err != nil {
return err
}
if k8sClient.Available() {
if err := start.Operator(monitor, orbConfig.Path, k8sClient, &version); err != nil {
monitor.Error(err)
return nil
return crtlgitops.Operator(monitor, orbConfig.Path, k8sClient, &version, rv.Gitops)
} else {
if err := crtlcrd.Start(monitor, version, metricsAddr, crtlcrd.Zitadel); err != nil {
return err
}
}
return nil
}
return cmd
}
func StartDatabase(rv RootValues) *cobra.Command {
func StartDatabase(getRv GetRootValues) *cobra.Command {
var (
kubeconfig string
cmd = &cobra.Command{
kubeconfig string
metricsAddr string
cmd = &cobra.Command{
Use: "database",
Short: "Launch a database operator",
Long: "Ensures a desired state of the database",
}
)
flags := cmd.Flags()
flags.StringVar(&kubeconfig, "kubeconfig", "", "kubeconfig used by zitadel operator")
flags.StringVar(&kubeconfig, "kubeconfig", "", "kubeconfig used by database operator")
flags.StringVar(&metricsAddr, "metrics-addr", "", "The address the metric endpoint binds to.")
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
_, monitor, orbConfig, _, version, errFunc, err := rv()
rv, err := getRv()
if err != nil {
return err
}
defer func() {
err = errFunc(err)
err = rv.ErrFunc(err)
}()
k8sClient, err := kubernetes.NewK8sClientWithPath(monitor, kubeconfig)
if err != nil {
return err
monitor := rv.Monitor
orbConfig := rv.OrbConfig
version := rv.Version
if rv.Gitops {
k8sClient, _, err := cli.Client(monitor, orbConfig, rv.GitClient, rv.Kubeconfig, rv.Gitops)
if err != nil {
return err
}
return crtlgitops.Database(monitor, orbConfig.Path, k8sClient, &version, rv.Gitops)
} else {
if err := crtlcrd.Start(monitor, version, metricsAddr, crtlcrd.Database); err != nil {
return err
}
}
if k8sClient.Available() {
return start.Database(monitor, orbConfig.Path, k8sClient, &version)
}
return nil
}
return cmd

View File

@ -1,23 +1,25 @@
package cmds
import (
orbdb "github.com/caos/zitadel/operator/database/kinds/orb"
"io/ioutil"
"github.com/caos/orbos/pkg/kubernetes/cli"
"github.com/ghodss/yaml"
"github.com/caos/zitadel/operator/helpers"
orbdb "github.com/caos/zitadel/operator/database/kinds/orb"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/git"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/zitadel/operator/api"
"github.com/caos/zitadel/operator/zitadel/kinds/orb"
orbzit "github.com/caos/zitadel/operator/zitadel/kinds/orb"
"github.com/spf13/cobra"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
)
func TakeoffCommand(rv RootValues) *cobra.Command {
func TakeoffCommand(getRv GetRootValues) *cobra.Command {
var (
kubeconfig string
cmd = &cobra.Command{
gitOpsZitadel bool
gitOpsDatabase bool
cmd = &cobra.Command{
Use: "takeoff",
Short: "Launch a ZITADEL operator on the orb",
Long: "Ensures a desired state of the resources on the orb",
@ -25,39 +27,57 @@ func TakeoffCommand(rv RootValues) *cobra.Command {
)
flags := cmd.Flags()
flags.StringVar(&kubeconfig, "kubeconfig", "~/.kube/config", "Kubeconfig for ZITADEL operator deployment")
flags.BoolVar(&gitOpsZitadel, "gitops-zitadel", false, "defines if the zitadel operator should run in gitops mode")
flags.BoolVar(&gitOpsDatabase, "gitops-database", false, "defines if the database operator should run in gitops mode")
cmd.RunE = func(cmd *cobra.Command, args []string) error {
_, monitor, orbConfig, gitClient, _, errFunc, err := rv()
rv, err := getRv()
if err != nil {
return err
}
defer func() {
err = errFunc(err)
err = rv.ErrFunc(err)
}()
kubeconfig = helpers.PruneHome(kubeconfig)
if err := gitClient.Configure(orbConfig.URL, []byte(orbConfig.Repokey)); err != nil {
monitor.Error(err)
return nil
}
monitor := rv.Monitor
orbConfig := rv.OrbConfig
gitClient := rv.GitClient
if err := gitClient.Clone(); err != nil {
monitor.Error(err)
return nil
}
value, err := ioutil.ReadFile(kubeconfig)
k8sClient, _, err := cli.Client(
monitor,
orbConfig,
gitClient,
rv.Kubeconfig,
gitOpsZitadel || gitOpsDatabase,
)
if err != nil {
monitor.Error(err)
return nil
return err
}
if err := kubernetes.EnsureCaosSystemNamespace(monitor, k8sClient); err != nil {
monitor.Info("failed to apply common resources into k8s-cluster")
return err
}
if gitOpsZitadel || gitOpsDatabase {
orbConfigBytes, err := yaml.Marshal(orbConfig)
if err != nil {
return err
}
if err := kubernetes.EnsureOrbconfigSecret(monitor, k8sClient, orbConfigBytes); err != nil {
monitor.Info("failed to apply configuration resources into k8s-cluster")
return err
}
}
kubeconfigStr := string(value)
if err := deployOperator(
monitor,
gitClient,
&kubeconfigStr,
k8sClient,
rv.Version,
rv.Gitops || gitOpsZitadel,
); err != nil {
monitor.Error(err)
}
@ -65,7 +85,9 @@ func TakeoffCommand(rv RootValues) *cobra.Command {
if err := deployDatabase(
monitor,
gitClient,
&kubeconfigStr,
k8sClient,
rv.Version,
rv.Gitops || gitOpsDatabase,
); err != nil {
monitor.Error(err)
}
@ -74,53 +96,85 @@ func TakeoffCommand(rv RootValues) *cobra.Command {
return cmd
}
func deployOperator(monitor mntr.Monitor, gitClient *git.Client, kubeconfig *string) error {
found, err := api.ExistsZitadelYml(gitClient)
if err != nil {
return err
}
if !found {
monitor.Info("No ZITADEL operator deployed as no zitadel.yml present")
return nil
}
func deployOperator(monitor mntr.Monitor, gitClient *git.Client, k8sClient kubernetes.ClientInt, version string, gitops bool) error {
if gitops {
found, err := api.ExistsZitadelYml(gitClient)
if err != nil {
return err
}
if found {
if found {
k8sClient := kubernetes.NewK8sClient(monitor, kubeconfig)
if k8sClient.Available() {
desiredTree, err := api.ReadZitadelYml(gitClient)
if err != nil {
return err
}
if err := orb.Reconcile(monitor, desiredTree, true)(k8sClient); err != nil {
return err
}
}
}
return nil
}
func deployDatabase(monitor mntr.Monitor, gitClient *git.Client, kubeconfig *string) error {
found, err := api.ExistsDatabaseYml(gitClient)
if err != nil {
return err
}
if found {
k8sClient := kubernetes.NewK8sClient(monitor, kubeconfig)
if k8sClient.Available() {
tree, err := api.ReadDatabaseYml(gitClient)
desired, err := orbzit.ParseDesiredV0(desiredTree)
if err != nil {
return err
}
spec := desired.Spec
spec.GitOps = gitops
if err := orbdb.Reconcile(
monitor,
tree)(k8sClient); err != nil {
// at takeoff the artifacts have to be applied
spec.SelfReconciling = true
if err := orbzit.Reconcile(monitor, spec)(k8sClient); err != nil {
return err
}
} else {
monitor.Info("Failed to connect to k8s")
}
} else {
// at takeoff the artifacts have to be applied
spec := &orbzit.Spec{
Version: version,
SelfReconciling: true,
GitOps: gitops,
}
if err := orbzit.Reconcile(monitor, spec)(k8sClient); err != nil {
return err
}
}
return nil
}
func deployDatabase(monitor mntr.Monitor, gitClient *git.Client, k8sClient kubernetes.ClientInt, version string, gitops bool) error {
if gitops {
found, err := api.ExistsDatabaseYml(gitClient)
if err != nil {
return err
}
if found {
desiredTree, err := api.ReadDatabaseYml(gitClient)
if err != nil {
return err
}
desired, err := orbdb.ParseDesiredV0(desiredTree)
if err != nil {
return err
}
spec := desired.Spec
spec.GitOps = gitops
// at takeoff the artifacts have to be applied
spec.SelfReconciling = true
if err := orbdb.Reconcile(
monitor,
spec)(k8sClient); err != nil {
return err
}
}
} else {
// at takeoff the artifacts have to be applied
spec := &orbdb.Spec{
Version: version,
SelfReconciling: true,
GitOps: gitops,
}
if err := orbdb.Reconcile(
monitor,
spec)(k8sClient); err != nil {
return err
}
}
return nil

View File

@ -2,15 +2,18 @@ package cmds
import (
"errors"
"fmt"
"io/ioutil"
"os"
"github.com/caos/orbos/pkg/kubernetes/cli"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/zitadel/operator/secrets"
"github.com/spf13/cobra"
)
func WriteSecretCommand(rv RootValues) *cobra.Command {
func WriteSecretCommand(getRv GetRootValues) *cobra.Command {
var (
value string
@ -32,43 +35,43 @@ orbctl writesecret mygceprovider.google_application_credentials_value --value "$
flags.BoolVar(&stdin, "stdin", false, "Value to encrypt is read from standard input")
cmd.RunE = func(cmd *cobra.Command, args []string) error {
_, monitor, orbConfig, gitClient, _, errFunc, err := rv()
rv, err := getRv()
if err != nil {
return err
}
defer func() {
err = errFunc(err)
err = rv.ErrFunc(err)
}()
monitor := rv.Monitor
orbConfig := rv.OrbConfig
gitClient := rv.GitClient
s, err := key(value, file, stdin)
if err != nil {
monitor.Error(err)
return nil
}
if err := gitClient.Configure(orbConfig.URL, []byte(orbConfig.Repokey)); err != nil {
monitor.Error(err)
return nil
}
if err := gitClient.Clone(); err != nil {
monitor.Error(err)
return nil
}
path := ""
if len(args) > 0 {
path = args[0]
}
k8sClient, _, err := cli.Client(monitor, orbConfig, gitClient, rv.Kubeconfig, rv.Gitops)
if err != nil && !rv.Gitops {
return err
}
if err := secret.Write(
monitor,
gitClient,
k8sClient,
path,
s,
secrets.GetAllSecretsFunc(orbConfig),
secrets.PushFunc(),
"zitadelctl",
fmt.Sprintf(rv.Version),
secrets.GetAllSecretsFunc(monitor, path != "", rv.Gitops, gitClient, k8sClient, orbConfig),
secrets.PushFunc(monitor, rv.Gitops, gitClient, k8sClient),
); err != nil {
monitor.Error(err)
}

15
go.mod
View File

@ -17,7 +17,7 @@ require (
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc
github.com/caos/logging v0.0.2
github.com/caos/oidc v0.14.3
github.com/caos/orbos v1.5.14-0.20210302165604-744ecfd88280
github.com/caos/orbos v1.5.14-0.20210323181340-093f4c8a30f9
github.com/cockroachdb/cockroach-go/v2 v2.1.0
github.com/duo-labs/webauthn v0.0.0-20200714211715-1daaee874e43
github.com/envoyproxy/protoc-gen-validate v0.4.1
@ -54,7 +54,7 @@ require (
github.com/rakyll/statik v0.1.7
github.com/rs/cors v1.7.0
github.com/sony/sonyflake v1.0.0
github.com/spf13/cobra v0.0.7
github.com/spf13/cobra v1.1.1
github.com/stretchr/testify v1.7.0
github.com/ttacon/builder v0.0.0-20170518171403-c099f663e1c2 // indirect
github.com/ttacon/libphonenumber v1.1.0
@ -66,7 +66,6 @@ require (
go.opentelemetry.io/otel/exporters/stdout v0.13.0
go.opentelemetry.io/otel/sdk v0.13.0
golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102 // indirect
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 // indirect
golang.org/x/text v0.3.5
golang.org/x/tools v0.0.0-20201103235415-b653051172e4
@ -76,9 +75,11 @@ require (
google.golang.org/grpc v1.35.0
google.golang.org/protobuf v1.25.0
gopkg.in/square/go-jose.v2 v2.5.1
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776
gotest.tools v2.2.0+incompatible
k8s.io/api v0.18.5
k8s.io/apiextensions-apiserver v0.18.5
k8s.io/apimachinery v0.18.5
k8s.io/api v0.19.2
k8s.io/apiextensions-apiserver v0.19.2
k8s.io/apimachinery v0.19.2
k8s.io/client-go v0.19.2
sigs.k8s.io/controller-runtime v0.7.0
)

157
go.sum
View File

@ -6,6 +6,7 @@ cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxK
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
@ -24,6 +25,7 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@ -42,10 +44,14 @@ github.com/AppsFlyer/go-sundheit v0.2.0 h1:FArqX+HbqZ6U32RC3giEAWRUpkggqxHj91KIv
github.com/AppsFlyer/go-sundheit v0.2.0/go.mod h1:rCRkVTMQo7/krF7xQ9X0XEF1an68viFR6/Gy02q+4ds=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
@ -124,6 +130,7 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI=
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
@ -133,8 +140,8 @@ github.com/caos/logging v0.0.2/go.mod h1:9LKiDE2ChuGv6CHYif/kiugrfEXu9AwDiFWSreX
github.com/caos/oidc v0.6.2/go.mod h1:ozoi3b+aY33gzdvjz4w90VZShIHGsmDa0goruuV0arQ=
github.com/caos/oidc v0.14.3 h1:ItpN396oY/lcIG2dm3rAm8Wm5ZM7kJRJ/BUIXn/tHtI=
github.com/caos/oidc v0.14.3/go.mod h1:fSLPGlxZhjSMP2LYKZ5QMaM/YYmLHfj/Fce+ji48kYY=
github.com/caos/orbos v1.5.14-0.20210302165604-744ecfd88280 h1:0c87LbDKLYZdDKhxcODZ+V4rwcz8Fnt7S4DVJG8bL1U=
github.com/caos/orbos v1.5.14-0.20210302165604-744ecfd88280/go.mod h1:hyjRSGFdmfGHjeiFzL/wyuNKiUPVdkP3eY7+H/JXqAU=
github.com/caos/orbos v1.5.14-0.20210323181340-093f4c8a30f9 h1:y5uUCM2rQUJYwty8kknV9DDyXCPO52zoOUI2UyZrkOg=
github.com/caos/orbos v1.5.14-0.20210323181340-093f4c8a30f9/go.mod h1:pydYnEk6rLwun8ur79Et0Fx7BaIJoLmeDWsNy7m4O0o=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
@ -154,7 +161,9 @@ github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/cfssl v0.0.0-20190726000631-633726f6bcb7 h1:Puu1hUwfps3+1CUzYdAZXijuvLuRMirgiXdf3zsM2Ig=
github.com/cloudflare/cfssl v0.0.0-20190726000631-633726f6bcb7/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
github.com/cloudflare/cloudflare-go v0.12.1 h1:j6TfMkFbfoRYqC9wbktl59Nd7xIqPem0XXXvZ9Vtj1I=
github.com/cloudflare/cloudflare-go v0.12.1/go.mod h1:gmzHQPAyHh8N8UgX0Z+3rSMRbNj47JDEbzXDICHVXys=
github.com/cloudscale-ch/cloudscale-go-sdk v1.6.0 h1:qKHn4YlgYKS0oHnOoDvPnL5jDUPpqGRn9RnKUgoPbKY=
github.com/cloudscale-ch/cloudscale-go-sdk v1.6.0/go.mod h1:FhOTOCgKAVvRRMQc1mC0D7xK/3zYnmcZBWFXNkacvMc=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@ -165,6 +174,7 @@ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:z
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
@ -223,8 +233,10 @@ github.com/envoyproxy/protoc-gen-validate v0.4.1 h1:7dLaJvASGRD7X49jSCSXXHwKPm0Z
github.com/envoyproxy/protoc-gen-validate v0.4.1/go.mod h1:E+IEazqdaWv3FrnGtZIu3b9fPFMK8AzeTTrk9SfVwWs=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
@ -235,8 +247,9 @@ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fxamacker/cbor/v2 v2.2.0 h1:6eXqdDDe588rSYAi1HfZKbx6YYQO4mxQ9eC6xYpU/JQ=
github.com/fxamacker/cbor/v2 v2.2.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@ -264,6 +277,11 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs=
github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4=
github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU=
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
@ -312,6 +330,8 @@ github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gG
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gobuffalo/flect v0.2.0 h1:EWCvMGGxOjsgwlWaP+f4+Hh6yrrte7JeFL2S6b+0hdM=
github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80=
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@ -404,8 +424,9 @@ github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk=
github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleinterns/cloud-operations-api-mock v0.0.0-20200709193332-a1e58c29bdd3 h1:eHv/jVY/JNop1xg2J9cBb4EzyMpWZoNCP1BslSAIkOI=
github.com/googleinterns/cloud-operations-api-mock v0.0.0-20200709193332-a1e58c29bdd3/go.mod h1:h/KNeRx7oYU4SpA4SoY7W2/NxDKEEVuwA6j9A27L4OI=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
@ -428,6 +449,7 @@ github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyC
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
@ -440,7 +462,9 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.2.0 h1:HlJcTiqGHvaWDG7/s85d68Kw7G7FqMz+9LlcyVauOAw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.2.0/go.mod h1:gRq9gZWcIFvz68EgWqy2qQpRbmtn5j2qLZ4zHjqiLpg=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
@ -456,6 +480,8 @@ github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
@ -463,7 +489,6 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174 h1:WlZsjVhE8Af9IcZDGgJGQpNflI3+MJSBhsgT5PCtzBQ=
github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw=
github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
@ -473,6 +498,7 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/log15 v0.0.0-20200109203555-b30bc20e4fd1 h1:KUDFlmBg2buRWNzIcwLlKvfcnujcHQRQ1As1LoaCLAM=
@ -543,7 +569,6 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
@ -576,6 +601,7 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
@ -604,6 +630,7 @@ github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0Q
github.com/magefile/mage v1.10.0 h1:3HiXzCUY12kh9bIuyXShaVe529fJfyqoVM42o/uom2g=
github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@ -635,8 +662,9 @@ github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOq
github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus=
github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U=
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
@ -656,6 +684,7 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE=
github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -678,6 +707,8 @@ github.com/nicksnyder/go-i18n/v2 v2.1.1 h1:ATCOanRDlrfKVB4WHAdJnLEqZtDmKYsweqsOU
github.com/nicksnyder/go-i18n/v2 v2.1.1/go.mod h1:d++QJC9ZVf7pa48qrsRWhMJ5pSHIPmS3OLqK1niyLxs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
@ -686,12 +717,18 @@ github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FW
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4=
github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs=
github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
@ -729,7 +766,6 @@ github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.8.0 h1:zvJNkoCFAnYFNC24FV8nW4JdRJ3GIFcLbg65lL/JDcw=
github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM=
@ -745,7 +781,6 @@ github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM=
@ -755,7 +790,6 @@ github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
@ -805,8 +839,9 @@ github.com/spf13/afero v1.3.4/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v0.0.7 h1:FfTH+vuMXOas8jmfb5/M7dzEYx7LpcLb7a0LPe34uOU=
github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
@ -815,6 +850,8 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
@ -829,6 +866,7 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
@ -857,7 +895,9 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8=
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
@ -890,14 +930,22 @@ go.opentelemetry.io/otel/sdk v0.13.0/go.mod h1:dKvLH8Uu8LcEPlSAUsfW7kMGaJBhk/1NY
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -918,6 +966,7 @@ golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191128160524-b544559bb6d1/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@ -997,6 +1046,7 @@ golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
@ -1004,9 +1054,8 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201026091529-146b70c837a4 h1:awiuzyrRjJDb+OXi9ceHO3SDxVoN3JER57mhtqkdQBs=
golang.org/x/net v0.0.0-20201026091529-146b70c837a4/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102 h1:42cLlJJdEh+ySyeUUbEQ5bsTiq8voBeTuweGVkY6Puw=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190517181255-950ef44c6e07/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1055,9 +1104,12 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191206220618-eeba5f6aabab/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1076,8 +1128,10 @@ golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1120,6 +1174,7 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@ -1128,6 +1183,8 @@ golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@ -1151,6 +1208,8 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200616195046-dc31b401abb5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200701151220-7cb253f4c4f8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
@ -1168,6 +1227,8 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k=
gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
@ -1279,12 +1340,12 @@ gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLF
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
@ -1304,14 +1365,16 @@ gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c h1:grhR+C34yXImVGp7EzNk+DTIk+323eIUWOmEevy6bDo=
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.0.5/go.mod h1:qrD92UurYzNctBMVCJ8C3VQEjffEuphycXtxOudXNCA=
gorm.io/gorm v1.20.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
gorm.io/gorm v1.20.6/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@ -1319,44 +1382,68 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78=
k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA=
k8s.io/api v0.18.5 h1:fKbCxr+U3fu7k6jB+QeYPD/c6xKYeSJ2KVWmyUypuWM=
k8s.io/api v0.18.5/go.mod h1:tN+e/2nbdGKOAH55NMV8oGrMG+3uRlA9GaRfvnCCSNk=
k8s.io/apiextensions-apiserver v0.18.5 h1:pvbXjB/BRXZiO+/Erp5Pxr+lnhDCv5uxNxHh3FLGZ/g=
k8s.io/apiextensions-apiserver v0.18.5/go.mod h1:woZ7PkEIMHjhHIyApvOwkGOkBLUYKuet0VWVkPTQ/Fs=
k8s.io/api v0.19.2 h1:q+/krnHWKsL7OBZg/rxnycsl9569Pud76UJ77MvKXms=
k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI=
k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY=
k8s.io/apiextensions-apiserver v0.19.2 h1:oG84UwiDsVDu7dlsGQs5GySmQHCzMhknfhFExJMz9tA=
k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg=
k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
k8s.io/apimachinery v0.18.5 h1:Lh6tgsM9FMkC12K5T5QjRm7rDs6aQN5JHkA0JomULDM=
k8s.io/apimachinery v0.18.5/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
k8s.io/apiserver v0.18.5/go.mod h1:+1XgOMq7YJ3OyqPNSJ54EveHwCoBWcJT9CaPycYI5ps=
k8s.io/apimachinery v0.19.2 h1:5Gy9vQpAGTKHPVOh5c4plE274X8D/6cuEiTO2zve7tc=
k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw=
k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA=
k8s.io/cli-runtime v0.18.3/go.mod h1:pqbbi4nqRIQhUWAVzen8uE8DD/zcZLwf+8sQYO4lwLk=
k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU=
k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw=
k8s.io/client-go v0.18.5 h1:cLhGZdOmyPhwtt20Lrb7uAqxxB1uvY+NTmNJvno1oKA=
k8s.io/client-go v0.18.5/go.mod h1:EsiD+7Fx+bRckKWZXnAXRKKetm1WuzPagH4iOSC8x58=
k8s.io/client-go v0.19.2 h1:gMJuU3xJZs86L1oQ99R4EViAADUPMHHtS9jFshasHSc=
k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA=
k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
k8s.io/code-generator v0.18.3/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
k8s.io/code-generator v0.18.5/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk=
k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM=
k8s.io/component-base v0.18.3/go.mod h1:bp5GzGR0aGkYEfTj+eTY0AN/vXTgkJdQXjNTTVUaa3k=
k8s.io/component-base v0.18.5/go.mod h1:RSbcboNk4B+S8Acs2JaBOVW3XNz1+A637s2jL+QQrlU=
k8s.io/component-base v0.19.2 h1:jW5Y9RcZTb79liEhW3XDVTW7MuvEGP0tQZnfSX6/+gs=
k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ=
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
k8s.io/kubectl v0.18.3/go.mod h1:k/EpvXBDgEsHBzWr0A44l9+ArvYi3txBBnzXBjQasUQ=
k8s.io/metrics v0.18.3/go.mod h1:TkuJE3ezDZ1ym8pYkZoEzJB7HDiFE7qxl+EmExEBoPA=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20200912215256-4140de9c8800 h1:9ZNvfPvVIEsp/T1ez4GQuzCcCTEQWhovSofhqR73A6g=
k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0=
sigs.k8s.io/controller-runtime v0.7.0 h1:bU20IBBEPccWz5+zXpLnpVsgBYxqclaHu1pVDl/gEt8=
sigs.k8s.io/controller-runtime v0.7.0/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU=
sigs.k8s.io/controller-tools v0.4.1 h1:VkuV0MxlRPmRu5iTgBZU4UxUX2LiR99n3sdQGRxZF4w=
sigs.k8s.io/controller-tools v0.4.1/go.mod h1:G9rHdZMVlBDocIxGkK3jHLWqcTMNvveypYJwrvYKjWU=
sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA=
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=

15
hack/boilerplate.go.txt Normal file
View File

@ -0,0 +1,15 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

View File

@ -11,7 +11,7 @@ import (
"gopkg.in/yaml.v3"
)
type AdaptFunc func(monitor mntr.Monitor, desired *tree.Tree, current *tree.Tree) (QueryFunc, DestroyFunc, map[string]*secret.Secret, error)
type AdaptFunc func(monitor mntr.Monitor, desired *tree.Tree, current *tree.Tree) (QueryFunc, DestroyFunc, map[string]*secret.Secret, map[string]*secret.Existing, bool, error)
type EnsureFunc func(k8sClient kubernetes.ClientInt) error

46
operator/api/core/api.go Normal file
View File

@ -0,0 +1,46 @@
package core
import (
"errors"
"github.com/caos/orbos/pkg/tree"
"gopkg.in/yaml.v3"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
func UnmarshalUnstructuredSpec(unstruct *unstructured.Unstructured) (*tree.Tree, error) {
spec, found := unstruct.Object["spec"]
if !found {
return nil, errors.New("no spec in crd")
}
specMap, ok := spec.(map[string]interface{})
if !ok {
return nil, errors.New("no spec in crd")
}
data, err := yaml.Marshal(specMap)
if err != nil {
return nil, err
}
desired := &tree.Tree{}
if err := yaml.Unmarshal(data, &desired); err != nil {
return nil, err
}
return desired, nil
}
func MarshalToUnstructuredSpec(t *tree.Tree) (*unstructured.Unstructured, error) {
data, err := yaml.Marshal(t)
if err != nil {
return nil, err
}
unstruct := &unstructured.Unstructured{
Object: map[string]interface{}{
"spec": make(map[string]interface{}),
},
}
return unstruct, yaml.Unmarshal(data, unstruct.Object["spec"])
}

View File

@ -0,0 +1,44 @@
package database
import (
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/api/core"
databasev1 "github.com/caos/zitadel/operator/api/database/v1"
macherrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
)
const (
Namespace = "caos-system"
kind = "Database"
apiVersion = "caos.ch/v1"
Name = "database"
)
func ReadCrd(k8sClient kubernetes.ClientInt) (*tree.Tree, error) {
unstruct, err := k8sClient.GetNamespacedCRDResource(databasev1.GroupVersion.Group, databasev1.GroupVersion.Version, kind, Namespace, Name)
if err != nil {
if macherrs.IsNotFound(err) || meta.IsNoMatchError(err) {
return nil, nil
}
return nil, err
}
return core.UnmarshalUnstructuredSpec(unstruct)
}
func WriteCrd(k8sClient kubernetes.ClientInt, t *tree.Tree) error {
unstruct, err := core.MarshalToUnstructuredSpec(t)
if err != nil {
return err
}
unstruct.SetName(Name)
unstruct.SetNamespace(Namespace)
unstruct.SetKind(kind)
unstruct.SetAPIVersion(apiVersion)
return k8sClient.ApplyNamespacedCRDResource(databasev1.GroupVersion.Group, databasev1.GroupVersion.Version, kind, Namespace, Name, unstruct)
}

View File

@ -0,0 +1,57 @@
// +kubebuilder:object:generate=true
// +groupName=caos.ch
package v1
import (
"github.com/caos/orbos/pkg/tree"
orbdb "github.com/caos/zitadel/operator/database/kinds/orb"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "caos.ch", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:crd=Database
type Database struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec Spec `json:"spec,omitempty"`
Status Status `json:"status,omitempty"`
}
type Status struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
}
type Spec struct {
Common *tree.Common `json:",inline" yaml:",inline"`
Spec *orbdb.Spec `json:"spec" yaml:"spec"`
Database *Empty `json:"database" yaml:"database"`
}
type Empty struct{}
// +kubebuilder:object:root=true
type DatabaseList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Database `json:"items"`
}
func init() {
SchemeBuilder.Register(&Database{}, &DatabaseList{})
}

View File

@ -0,0 +1,146 @@
// +build !ignore_autogenerated
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1
import (
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/database/kinds/orb"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Database) DeepCopyInto(out *Database) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Database.
func (in *Database) DeepCopy() *Database {
if in == nil {
return nil
}
out := new(Database)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Database) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DatabaseList) DeepCopyInto(out *DatabaseList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Database, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseList.
func (in *DatabaseList) DeepCopy() *DatabaseList {
if in == nil {
return nil
}
out := new(DatabaseList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DatabaseList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Empty) DeepCopyInto(out *Empty) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Empty.
func (in *Empty) DeepCopy() *Empty {
if in == nil {
return nil
}
out := new(Empty)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Spec) DeepCopyInto(out *Spec) {
*out = *in
if in.Common != nil {
in, out := &in.Common, &out.Common
*out = new(tree.Common)
**out = **in
}
if in.Spec != nil {
in, out := &in.Spec, &out.Spec
*out = new(orb.Spec)
(*in).DeepCopyInto(*out)
}
if in.Database != nil {
in, out := &in.Database, &out.Database
*out = new(Empty)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Spec.
func (in *Spec) DeepCopy() *Spec {
if in == nil {
return nil
}
out := new(Spec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Status) DeepCopyInto(out *Status) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status.
func (in *Status) DeepCopy() *Status {
if in == nil {
return nil
}
out := new(Status)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,44 @@
package zitadel
import (
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/api/core"
zitadelv1 "github.com/caos/zitadel/operator/api/zitadel/v1"
macherrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
)
const (
Namespace = "caos-system"
kind = "Zitadel"
apiVersion = "caos.ch/v1"
Name = "zitadel"
)
func ReadCrd(k8sClient kubernetes.ClientInt) (*tree.Tree, error) {
unstruct, err := k8sClient.GetNamespacedCRDResource(zitadelv1.GroupVersion.Group, zitadelv1.GroupVersion.Version, kind, Namespace, Name)
if err != nil {
if macherrs.IsNotFound(err) || meta.IsNoMatchError(err) {
return nil, nil
}
return nil, err
}
return core.UnmarshalUnstructuredSpec(unstruct)
}
func WriteCrd(k8sClient kubernetes.ClientInt, t *tree.Tree) error {
unstruct, err := core.MarshalToUnstructuredSpec(t)
if err != nil {
return err
}
unstruct.SetName(Name)
unstruct.SetNamespace(Namespace)
unstruct.SetKind(kind)
unstruct.SetAPIVersion(apiVersion)
return k8sClient.ApplyNamespacedCRDResource(zitadelv1.GroupVersion.Group, zitadelv1.GroupVersion.Version, kind, Namespace, Name, unstruct)
}

View File

@ -0,0 +1,57 @@
// +kubebuilder:object:generate=true
// +groupName=caos.ch
package v1
import (
"github.com/caos/orbos/pkg/tree"
orbz "github.com/caos/zitadel/operator/zitadel/kinds/orb"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "caos.ch", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:crd=Zitadel
type Zitadel struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec Spec `json:"spec,omitempty"`
Status Status `json:"status,omitempty"`
}
type Status struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
}
type Spec struct {
Common *tree.Common `json:",inline" yaml:",inline"`
Spec *orbz.Spec `json:"spec" yaml:"spec"`
IAM *Empty `json:"iam" yaml:"iam"`
}
type Empty struct{}
// +kubebuilder:object:root=true
type ZitadelList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Zitadel `json:"items"`
}
func init() {
SchemeBuilder.Register(&Zitadel{}, &ZitadelList{})
}

View File

@ -0,0 +1,146 @@
// +build !ignore_autogenerated
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1
import (
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/zitadel/kinds/orb"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Empty) DeepCopyInto(out *Empty) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Empty.
func (in *Empty) DeepCopy() *Empty {
if in == nil {
return nil
}
out := new(Empty)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Spec) DeepCopyInto(out *Spec) {
*out = *in
if in.Common != nil {
in, out := &in.Common, &out.Common
*out = new(tree.Common)
**out = **in
}
if in.Spec != nil {
in, out := &in.Spec, &out.Spec
*out = new(orb.Spec)
(*in).DeepCopyInto(*out)
}
if in.IAM != nil {
in, out := &in.IAM, &out.IAM
*out = new(Empty)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Spec.
func (in *Spec) DeepCopy() *Spec {
if in == nil {
return nil
}
out := new(Spec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Status) DeepCopyInto(out *Status) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status.
func (in *Status) DeepCopy() *Status {
if in == nil {
return nil
}
out := new(Status)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Zitadel) DeepCopyInto(out *Zitadel) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Zitadel.
func (in *Zitadel) DeepCopy() *Zitadel {
if in == nil {
return nil
}
out := new(Zitadel)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Zitadel) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ZitadelList) DeepCopyInto(out *ZitadelList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Zitadel, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZitadelList.
func (in *ZitadelList) DeepCopy() *ZitadelList {
if in == nil {
return nil
}
out := new(ZitadelList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ZitadelList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}

View File

@ -0,0 +1,74 @@
package crtlcrd
import (
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
databasev1 "github.com/caos/zitadel/operator/api/database/v1"
zitadelv1 "github.com/caos/zitadel/operator/api/zitadel/v1"
"github.com/caos/zitadel/operator/crtlcrd/database"
"github.com/caos/zitadel/operator/crtlcrd/zitadel"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
)
const (
Database = "database"
Zitadel = "zitadel"
)
var (
scheme = runtime.NewScheme()
)
func init() {
_ = clientgoscheme.AddToScheme(scheme)
_ = databasev1.AddToScheme(scheme)
_ = zitadelv1.AddToScheme(scheme)
}
func Start(monitor mntr.Monitor, version, metricsAddr string, features ...string) error {
cfg := ctrl.GetConfigOrDie()
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
Port: 9443,
LeaderElection: false,
LeaderElectionID: "9adsd12l.caos.ch",
})
if err != nil {
return errors.Wrap(err, "unable to start manager")
}
k8sClient := kubernetes.NewK8sClientWithConfig(monitor, cfg)
for _, feature := range features {
switch feature {
case Database:
if err = (&database.Reconciler{
ClientInt: k8sClient,
Monitor: monitor,
Scheme: mgr.GetScheme(),
Version: version,
}).SetupWithManager(mgr); err != nil {
return errors.Wrap(err, "unable to create controller")
}
case Zitadel:
if err = (&zitadel.Reconciler{
ClientInt: k8sClient,
Monitor: monitor,
Scheme: mgr.GetScheme(),
Version: version,
}).SetupWithManager(mgr); err != nil {
return errors.Wrap(err, "unable to create controller")
}
}
}
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
return errors.Wrap(err, "problem running manager")
}
return nil
}

View File

@ -0,0 +1,68 @@
package database
import (
"context"
"fmt"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/api/database"
v1 "github.com/caos/zitadel/operator/api/database/v1"
orbdb "github.com/caos/zitadel/operator/database/kinds/orb"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
)
type Reconciler struct {
kubernetes.ClientInt
Monitor mntr.Monitor
Scheme *runtime.Scheme
Version string
}
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) {
internalMonitor := r.Monitor.WithFields(map[string]interface{}{
"kind": "database",
"namespace": req.NamespacedName,
})
defer func() {
r.Monitor.Error(err)
}()
if req.Namespace != database.Namespace || req.Name != database.Name {
return res, fmt.Errorf("resource must be named %s and namespaced in %s", database.Name, database.Namespace)
}
desired, err := database.ReadCrd(r.ClientInt)
if err != nil {
internalMonitor.Error(err)
return res, err
}
query, _, _, _, _, err := orbdb.AdaptFunc("", &r.Version, false, "database")(internalMonitor, desired, &tree.Tree{})
if err != nil {
internalMonitor.Error(err)
return res, err
}
ensure, err := query(r.ClientInt, map[string]interface{}{})
if err != nil {
internalMonitor.Error(err)
return res, err
}
if err := ensure(r.ClientInt); err != nil {
internalMonitor.Error(err)
return res, err
}
return res, nil
}
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&v1.Database{}).
Complete(r)
}

View File

@ -0,0 +1,67 @@
package zitadel
import (
"context"
"fmt"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/api/zitadel"
v1 "github.com/caos/zitadel/operator/api/zitadel/v1"
orbz "github.com/caos/zitadel/operator/zitadel/kinds/orb"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
)
type Reconciler struct {
kubernetes.ClientInt
Monitor mntr.Monitor
Scheme *runtime.Scheme
Version string
}
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) {
internalMonitor := r.Monitor.WithFields(map[string]interface{}{
"kind": "zitadel",
"namespace": req.NamespacedName,
})
defer func() {
r.Monitor.Error(err)
}()
if req.Namespace != zitadel.Namespace || req.Name != zitadel.Name {
return res, fmt.Errorf("resource must be named %s and namespaced in %s", zitadel.Name, zitadel.Namespace)
}
desired, err := zitadel.ReadCrd(r.ClientInt)
if err != nil {
return res, err
}
query, _, _, _, _, err := orbz.AdaptFunc(nil, "ensure", &r.Version, false, []string{"operator", "iam"})(internalMonitor, desired, &tree.Tree{})
if err != nil {
internalMonitor.Error(err)
return res, err
}
ensure, err := query(r.ClientInt, map[string]interface{}{})
if err != nil {
internalMonitor.Error(err)
return res, err
}
if err := ensure(r.ClientInt); err != nil {
internalMonitor.Error(err)
return res, err
}
return res, nil
}
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&v1.Zitadel{}).
Complete(r)
}

View File

@ -1,11 +1,12 @@
package start
package crtlgitops
import (
"context"
"time"
"github.com/caos/zitadel/operator/database"
orbdb "github.com/caos/zitadel/operator/database/kinds/orb"
"github.com/caos/zitadel/operator/zitadel"
"time"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/git"
@ -16,7 +17,7 @@ import (
kubernetes2 "github.com/caos/zitadel/pkg/kubernetes"
)
func Operator(monitor mntr.Monitor, orbConfigPath string, k8sClient *kubernetes.Client, version *string) error {
func Operator(monitor mntr.Monitor, orbConfigPath string, k8sClient *kubernetes.Client, version *string, gitops bool) error {
takeoffChan := make(chan struct{})
go func() {
takeoffChan <- struct{}{}
@ -35,7 +36,7 @@ func Operator(monitor mntr.Monitor, orbConfigPath string, k8sClient *kubernetes.
return err
}
takeoff := zitadel.Takeoff(monitor, gitClient, orb.AdaptFunc(orbConfig, "ensure", version, []string{"operator", "iam"}), k8sClient)
takeoff := zitadel.Takeoff(monitor, gitClient, orb.AdaptFunc(orbConfig, "ensure", version, gitops, []string{"operator", "iam"}), k8sClient)
go func() {
started := time.Now()
@ -53,7 +54,15 @@ func Operator(monitor mntr.Monitor, orbConfigPath string, k8sClient *kubernetes.
return nil
}
func Restore(monitor mntr.Monitor, gitClient *git.Client, orbCfg *orbconfig.Orb, k8sClient *kubernetes.Client, backup string, version *string) error {
func Restore(
monitor mntr.Monitor,
gitClient *git.Client,
orbCfg *orbconfig.Orb,
k8sClient *kubernetes.Client,
backup string,
gitops bool,
version *string,
) error {
databasesList := []string{
"notification",
"adminapi",
@ -67,7 +76,7 @@ func Restore(monitor mntr.Monitor, gitClient *git.Client, orbCfg *orbconfig.Orb,
return err
}
if err := zitadel.Takeoff(monitor, gitClient, orb.AdaptFunc(orbCfg, "scaledown", version, []string{"scaledown"}), k8sClient)(); err != nil {
if err := zitadel.Takeoff(monitor, gitClient, orb.AdaptFunc(orbCfg, "scaledown", version, gitops, []string{"scaledown"}), k8sClient)(); err != nil {
return err
}
@ -75,7 +84,7 @@ func Restore(monitor mntr.Monitor, gitClient *git.Client, orbCfg *orbconfig.Orb,
return err
}
if err := zitadel.Takeoff(monitor, gitClient, orb.AdaptFunc(orbCfg, "migration", version, []string{"migration"}), k8sClient)(); err != nil {
if err := zitadel.Takeoff(monitor, gitClient, orb.AdaptFunc(orbCfg, "migration", version, gitops, []string{"migration"}), k8sClient)(); err != nil {
return err
}
@ -89,7 +98,7 @@ func Restore(monitor mntr.Monitor, gitClient *git.Client, orbCfg *orbconfig.Orb,
return err
}
if err := zitadel.Takeoff(monitor, gitClient, orb.AdaptFunc(orbCfg, "scaleup", version, []string{"scaleup"}), k8sClient)(); err != nil {
if err := zitadel.Takeoff(monitor, gitClient, orb.AdaptFunc(orbCfg, "scaleup", version, gitops, []string{"scaleup"}), k8sClient)(); err != nil {
return err
}
@ -100,7 +109,7 @@ func Restore(monitor mntr.Monitor, gitClient *git.Client, orbCfg *orbconfig.Orb,
return nil
}
func Database(monitor mntr.Monitor, orbConfigPath string, k8sClient *kubernetes.Client, binaryVersion *string) error {
func Database(monitor mntr.Monitor, orbConfigPath string, k8sClient *kubernetes.Client, binaryVersion *string, gitops bool) error {
takeoffChan := make(chan struct{})
go func() {
takeoffChan <- struct{}{}
@ -119,7 +128,7 @@ func Database(monitor mntr.Monitor, orbConfigPath string, k8sClient *kubernetes.
return err
}
takeoff := database.Takeoff(monitor, gitClient, orbdb.AdaptFunc("", binaryVersion, "database", "backup"), k8sClient)
takeoff := database.Takeoff(monitor, gitClient, orbdb.AdaptFunc("", binaryVersion, gitops, "operator", "database", "backup"), k8sClient)
go func() {
started := time.Now()
@ -150,6 +159,6 @@ func Backup(monitor mntr.Monitor, orbConfigPath string, k8sClient *kubernetes.Cl
return err
}
database.Takeoff(monitor, gitClient, orbdb.AdaptFunc(backup, binaryVersion, "instantbackup"), k8sClient)()
database.Takeoff(monitor, gitClient, orbdb.AdaptFunc(backup, binaryVersion, false, "instantbackup"), k8sClient)()
return nil
}

View File

@ -28,6 +28,8 @@ func GetQueryAndDestroyFuncs(
operator.QueryFunc,
operator.DestroyFunc,
map[string]*secret.Secret,
map[string]*secret.Existing,
bool,
error,
) {
switch desiredTree.Common.Kind {
@ -50,7 +52,7 @@ func GetQueryAndDestroyFuncs(
features,
)(monitor, desiredTree, currentTree)
default:
return nil, nil, nil, errors.Errorf("unknown database kind %s", desiredTree.Common.Kind)
return nil, nil, nil, nil, false, errors.Errorf("unknown database kind %s", desiredTree.Common.Kind)
}
}

View File

@ -2,6 +2,7 @@ package bucket
import (
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/helper"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/secret"
"github.com/caos/orbos/pkg/labels"
@ -32,28 +33,36 @@ func AdaptFunc(
version string,
features []string,
) operator.AdaptFunc {
return func(monitor mntr.Monitor, desired *tree.Tree, current *tree.Tree) (queryFunc operator.QueryFunc, destroyFunc operator.DestroyFunc, secrets map[string]*secretpkg.Secret, err error) {
return func(
monitor mntr.Monitor,
desired *tree.Tree,
current *tree.Tree,
) (
operator.QueryFunc,
operator.DestroyFunc,
map[string]*secretpkg.Secret,
map[string]*secretpkg.Existing,
bool,
error,
) {
internalMonitor := monitor.WithField("component", "backup")
desiredKind, err := ParseDesiredV0(desired)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "parsing desired state failed")
return nil, nil, nil, nil, false, errors.Wrap(err, "parsing desired state failed")
}
desired.Parsed = desiredKind
secrets, existing := getSecretsMap(desiredKind)
if !monitor.IsVerbose() && desiredKind.Spec.Verbose {
internalMonitor.Verbose()
}
destroyS, err := secret.AdaptFuncToDestroy(namespace, secretName)
if err != nil {
return nil, nil, nil, err
}
queryS, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, secretName), map[string]string{secretKey: desiredKind.Spec.ServiceAccountJSON.Value})
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
_, destroyB, err := backup.AdaptFunc(
@ -74,7 +83,7 @@ func AdaptFunc(
version,
)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
_, destroyR, err := restore.AdaptFunc(
@ -93,7 +102,7 @@ func AdaptFunc(
version,
)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
_, destroyC, err := clean.AdaptFunc(
@ -110,7 +119,7 @@ func AdaptFunc(
version,
)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
destroyers := make([]operator.DestroyFunc, 0)
@ -133,6 +142,11 @@ func AdaptFunc(
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
if err := desiredKind.validateSecrets(); err != nil {
return nil, err
}
currentDB, err := coreDB.ParseQueriedForDatabase(queried)
if err != nil {
return nil, err
@ -143,6 +157,16 @@ func AdaptFunc(
databases = []string{}
}
value, err := helper.GetSecretValue(k8sClient, desiredKind.Spec.ServiceAccountJSON, desiredKind.Spec.ExistingServiceAccountJSON)
if err != nil {
return nil, err
}
queryS, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, secretName), map[string]string{secretKey: value})
if err != nil {
return nil, err
}
queryB, _, err := backup.AdaptFunc(
internalMonitor,
name,
@ -201,30 +225,53 @@ func AdaptFunc(
}
queriers := make([]operator.QueryFunc, 0)
cleanupQueries := make([]operator.QueryFunc, 0)
if databases != nil && len(databases) != 0 {
for _, feature := range features {
switch feature {
case backup.Normal, backup.Instant:
case backup.Normal:
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryS),
queryB,
)
case backup.Instant:
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryS),
queryB,
)
cleanupQueries = append(cleanupQueries,
operator.EnsureFuncToQueryFunc(backup.GetCleanupFunc(monitor, namespace, name)),
)
case clean.Instant:
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryS),
queryC,
)
cleanupQueries = append(cleanupQueries,
operator.EnsureFuncToQueryFunc(clean.GetCleanupFunc(monitor, namespace, name)),
)
case restore.Instant:
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryS),
queryR,
)
cleanupQueries = append(cleanupQueries,
operator.EnsureFuncToQueryFunc(restore.GetCleanupFunc(monitor, namespace, name)),
)
}
}
}
for _, cleanup := range cleanupQueries {
queriers = append(queriers, cleanup)
}
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
},
operator.DestroyersToDestroyFunc(internalMonitor, destroyers),
getSecretsMap(desiredKind),
secrets,
existing,
false,
nil
}
}

View File

@ -1,6 +1,8 @@
package bucket
import (
"testing"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
@ -13,7 +15,6 @@ import (
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
"testing"
)
func TestBucket_Secrets(t *testing.T) {
@ -60,7 +61,7 @@ func TestBucket_Secrets(t *testing.T) {
"serviceaccountjson": saJson,
}
_, _, secrets, err := AdaptFunc(
_, _, secrets, existing, _, err := AdaptFunc(
backupName,
namespace,
componentLabels,
@ -78,6 +79,7 @@ func TestBucket_Secrets(t *testing.T) {
assert.NoError(t, err)
for key, value := range allSecrets {
assert.Contains(t, secrets, key)
assert.Contains(t, existing, key)
assert.Equal(t, value, secrets[key].Value)
}
}
@ -131,7 +133,7 @@ func TestBucket_AdaptBackup(t *testing.T) {
SetBackup(client, namespace, k8sLabels, saJson)
query, _, _, err := AdaptFunc(
query, _, _, _, _, err := AdaptFunc(
backupName,
namespace,
componentLabels,
@ -205,7 +207,7 @@ func TestBucket_AdaptInstantBackup(t *testing.T) {
SetInstantBackup(client, namespace, backupName, k8sLabels, saJson)
query, _, _, err := AdaptFunc(
query, _, _, _, _, err := AdaptFunc(
backupName,
namespace,
componentLabels,
@ -280,7 +282,7 @@ func TestBucket_AdaptRestore(t *testing.T) {
SetRestore(client, namespace, backupName, k8sLabels, saJson)
query, _, _, err := AdaptFunc(
query, _, _, _, _, err := AdaptFunc(
backupName,
namespace,
componentLabels,
@ -316,6 +318,15 @@ func TestBucket_AdaptClean(t *testing.T) {
namespace := "testNs"
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "BucketBackup", "v0"), "testComponent")
k8sLabels := map[string]string{
"app.kubernetes.io/component": "testComponent",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": "backup-serviceaccountjson",
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "BucketBackup",
}
timestamp := "test"
nodeselector := map[string]string{"test": "test"}
@ -344,9 +355,9 @@ func TestBucket_AdaptClean(t *testing.T) {
return nil
}
SetClean(client, namespace, backupName)
SetClean(client, namespace, backupName, k8sLabels, saJson)
query, _, _, err := AdaptFunc(
query, _, _, _, _, err := AdaptFunc(
backupName,
namespace,
componentLabels,

View File

@ -1,9 +1,10 @@
package backup
import (
"github.com/caos/zitadel/operator"
"time"
"github.com/caos/zitadel/operator"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/cronjob"
@ -13,18 +14,18 @@ import (
)
const (
defaultMode int32 = 256
certPath = "/cockroach/cockroach-certs"
secretPath = "/secrets/sa.json"
backupPath = "/cockroach"
backupNameEnv = "BACKUP_NAME"
cronJobNamePrefix = "backup-"
internalSecretName = "client-certs"
image = "ghcr.io/caos/zitadel-crbackup"
rootSecretName = "cockroachdb.client.root"
timeout time.Duration = 60
Normal = "backup"
Instant = "instantbackup"
defaultMode int32 = 256
certPath = "/cockroach/cockroach-certs"
secretPath = "/secrets/sa.json"
backupPath = "/cockroach"
backupNameEnv = "BACKUP_NAME"
cronJobNamePrefix = "backup-"
internalSecretName = "client-certs"
image = "ghcr.io/caos/zitadel-crbackup"
rootSecretName = "cockroachdb.client.root"
timeout = 15 * time.Minute
Normal = "backup"
Instant = "instantbackup"
)
func AdaptFunc(
@ -119,7 +120,6 @@ func AdaptFunc(
queriers = append(queriers,
operator.EnsureFuncToQueryFunc(checkDBReady),
operator.ResourceQueryToZitadelQuery(queryJ),
operator.EnsureFuncToQueryFunc(getCleanupFunc(monitor, jobDef.Namespace, jobDef.Name)),
)
}
}

View File

@ -1,6 +1,8 @@
package backup
import (
"testing"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
@ -10,7 +12,6 @@ import (
corev1 "k8s.io/api/core/v1"
macherrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"testing"
)
func TestBackup_AdaptInstantBackup1(t *testing.T) {
@ -60,8 +61,6 @@ func TestBackup_AdaptInstantBackup1(t *testing.T) {
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
client.EXPECT().WaitUntilJobCompleted(jobDef.Namespace, jobDef.Name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil)
query, _, err := AdaptFunc(
monitor,
@ -134,8 +133,6 @@ func TestBackup_AdaptInstantBackup2(t *testing.T) {
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
client.EXPECT().WaitUntilJobCompleted(jobDef.Namespace, jobDef.Name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil)
query, _, err := AdaptFunc(
monitor,

View File

@ -7,15 +7,19 @@ import (
"github.com/pkg/errors"
)
func getCleanupFunc(monitor mntr.Monitor, namespace string, name string) operator.EnsureFunc {
func GetCleanupFunc(
monitor mntr.Monitor,
namespace string,
backupName string,
) operator.EnsureFunc {
return func(k8sClient kubernetes.ClientInt) error {
monitor.Info("waiting for backup to be completed")
if err := k8sClient.WaitUntilJobCompleted(namespace, name, timeout); err != nil {
if err := k8sClient.WaitUntilJobCompleted(namespace, GetJobName(backupName), timeout); err != nil {
monitor.Error(errors.Wrap(err, "error while waiting for backup to be completed"))
return err
}
monitor.Info("backup is completed, cleanup")
if err := k8sClient.DeleteJob(namespace, name); err != nil {
if err := k8sClient.DeleteJob(namespace, GetJobName(backupName)); err != nil {
monitor.Error(errors.Wrap(err, "error while trying to cleanup backup"))
return err
}

View File

@ -1,12 +1,13 @@
package backup
import (
"testing"
"github.com/caos/orbos/mntr"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/golang/mock/gomock"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"testing"
)
func TestBackup_Cleanup1(t *testing.T) {
@ -15,12 +16,12 @@ func TestBackup_Cleanup1(t *testing.T) {
name := "test"
namespace := "testNs"
cleanupFunc := getCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, name).Times(1)
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
assert.NoError(t, cleanupFunc(client))
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(errors.New("fail"))
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
assert.Error(t, cleanupFunc(client))
}
@ -30,11 +31,11 @@ func TestBackup_Cleanup2(t *testing.T) {
name := "test2"
namespace := "testNs2"
cleanupFunc := getCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, name).Times(1)
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
assert.NoError(t, cleanupFunc(client))
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(errors.New("fail"))
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
assert.Error(t, cleanupFunc(client))
}

View File

@ -1,9 +1,10 @@
package clean
import (
"github.com/caos/zitadel/operator"
"time"
"github.com/caos/zitadel/operator"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/job"
@ -12,16 +13,16 @@ import (
)
const (
Instant = "clean"
defaultMode = int32(256)
certPath = "/cockroach/cockroach-certs"
secretPath = "/secrets/sa.json"
internalSecretName = "client-certs"
image = "ghcr.io/caos/zitadel-crbackup"
rootSecretName = "cockroachdb.client.root"
jobPrefix = "backup-"
jobSuffix = "-clean"
timeout time.Duration = 60
Instant = "clean"
defaultMode = int32(256)
certPath = "/cockroach/cockroach-certs"
secretPath = "/secrets/sa.json"
internalSecretName = "client-certs"
image = "ghcr.io/caos/zitadel-crbackup"
rootSecretName = "cockroachdb.client.root"
jobPrefix = "backup-"
jobSuffix = "-clean"
timeout = 60 * time.Second
)
func AdaptFunc(
@ -71,7 +72,6 @@ func AdaptFunc(
queriers := []operator.QueryFunc{
operator.EnsureFuncToQueryFunc(checkDBReady),
operator.ResourceQueryToZitadelQuery(queryJ),
operator.EnsureFuncToQueryFunc(getCleanupFunc(monitor, jobDef.Namespace, jobDef.Name)),
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {

View File

@ -1,6 +1,8 @@
package clean
import (
"testing"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
@ -10,7 +12,6 @@ import (
corev1 "k8s.io/api/core/v1"
macherrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"testing"
)
func TestBackup_Adapt1(t *testing.T) {
@ -49,8 +50,6 @@ func TestBackup_Adapt1(t *testing.T) {
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
client.EXPECT().WaitUntilJobCompleted(jobDef.Namespace, jobDef.Name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil)
query, _, err := AdaptFunc(
monitor,
@ -109,8 +108,6 @@ func TestBackup_Adapt2(t *testing.T) {
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
client.EXPECT().WaitUntilJobCompleted(jobDef.Namespace, jobDef.Name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil)
query, _, err := AdaptFunc(
monitor,

View File

@ -7,19 +7,19 @@ import (
"github.com/pkg/errors"
)
func getCleanupFunc(
func GetCleanupFunc(
monitor mntr.Monitor,
namespace string,
jobName string,
backupName string,
) operator.EnsureFunc {
return func(k8sClient kubernetes.ClientInt) error {
monitor.Info("waiting for clean to be completed")
if err := k8sClient.WaitUntilJobCompleted(namespace, jobName, 60); err != nil {
if err := k8sClient.WaitUntilJobCompleted(namespace, GetJobName(backupName), timeout); err != nil {
monitor.Error(errors.Wrap(err, "error while waiting for clean to be completed"))
return err
}
monitor.Info("clean is completed, cleanup")
if err := k8sClient.DeleteJob(namespace, jobName); err != nil {
if err := k8sClient.DeleteJob(namespace, GetJobName(backupName)); err != nil {
monitor.Error(errors.Wrap(err, "error while trying to cleanup clean"))
return err
}

View File

@ -15,12 +15,12 @@ func TestBackup_Cleanup1(t *testing.T) {
name := "test"
namespace := "testNs"
cleanupFunc := getCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, name).Times(1)
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
assert.NoError(t, cleanupFunc(client))
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(errors.New("fail"))
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
assert.Error(t, cleanupFunc(client))
}
@ -30,11 +30,11 @@ func TestBackup_Cleanup2(t *testing.T) {
name := "test2"
namespace := "testNs2"
cleanupFunc := getCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, name).Times(1)
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
assert.NoError(t, cleanupFunc(client))
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(errors.New("fail"))
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
assert.Error(t, cleanupFunc(client))
}

View File

@ -1,7 +1,9 @@
package bucket
import (
secret2 "github.com/caos/orbos/pkg/secret"
"fmt"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
"github.com/pkg/errors"
)
@ -12,14 +14,15 @@ type DesiredV0 struct {
}
type Spec struct {
Verbose bool
Cron string `yaml:"cron,omitempty"`
Bucket string `yaml:"bucket,omitempty"`
ServiceAccountJSON *secret2.Secret `yaml:"serviceAccountJSON,omitempty"`
Verbose bool
Cron string `yaml:"cron,omitempty"`
Bucket string `yaml:"bucket,omitempty"`
ServiceAccountJSON *secret.Secret `yaml:"serviceAccountJSON,omitempty"`
ExistingServiceAccountJSON *secret.Existing `yaml:"existingServiceAccountJSON,omitempty"`
}
func (s *Spec) IsZero() bool {
if (s.ServiceAccountJSON == nil || s.ServiceAccountJSON.IsZero()) &&
if ((s.ServiceAccountJSON == nil || s.ServiceAccountJSON.IsZero()) && (s.ExistingServiceAccountJSON == nil || s.ExistingServiceAccountJSON.IsZero())) &&
!s.Verbose &&
s.Cron == "" &&
s.Bucket == "" {
@ -40,3 +43,10 @@ func ParseDesiredV0(desiredTree *tree.Tree) (*DesiredV0, error) {
return desiredKind, nil
}
func (d *DesiredV0) validateSecrets() error {
if err := secret.ValidateSecret(d.Spec.ServiceAccountJSON, d.Spec.ExistingServiceAccountJSON); err != nil {
return fmt.Errorf("validating api key failed: %w", err)
}
return nil
}

View File

@ -65,7 +65,18 @@ func SetClean(
k8sClient *kubernetesmock.MockClientInt,
namespace string,
backupName string,
labels map[string]string,
saJson string,
) {
k8sClient.EXPECT().ApplySecret(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: namespace,
Labels: labels,
},
StringData: map[string]string{secretKey: saJson},
Type: "Opaque",
}).Times(1).Return(nil)
k8sClient.EXPECT().ApplyJob(gomock.Any()).Times(1).Return(nil)
k8sClient.EXPECT().GetJob(namespace, clean.GetJobName(backupName)).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, clean.GetJobName(backupName)))

View File

@ -1,9 +1,10 @@
package restore
import (
"github.com/caos/zitadel/operator"
"time"
"github.com/caos/zitadel/operator"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/job"
@ -12,16 +13,16 @@ import (
)
const (
Instant = "restore"
defaultMode = int32(256)
certPath = "/cockroach/cockroach-certs"
secretPath = "/secrets/sa.json"
jobPrefix = "backup-"
jobSuffix = "-restore"
image = "ghcr.io/caos/zitadel-crbackup"
internalSecretName = "client-certs"
rootSecretName = "cockroachdb.client.root"
timeout time.Duration = 60
Instant = "restore"
defaultMode = int32(256)
certPath = "/cockroach/cockroach-certs"
secretPath = "/secrets/sa.json"
jobPrefix = "backup-"
jobSuffix = "-restore"
image = "ghcr.io/caos/zitadel-crbackup"
internalSecretName = "client-certs"
rootSecretName = "cockroachdb.client.root"
timeout = 15 * time.Minute
)
func AdaptFunc(
@ -79,7 +80,6 @@ func AdaptFunc(
queriers := []operator.QueryFunc{
operator.EnsureFuncToQueryFunc(checkDBReady),
operator.ResourceQueryToZitadelQuery(queryJ),
operator.EnsureFuncToQueryFunc(getCleanupFunc(monitor, jobdef.Namespace, jobdef.Name)),
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {

View File

@ -1,6 +1,8 @@
package restore
import (
"testing"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
@ -10,7 +12,6 @@ import (
corev1 "k8s.io/api/core/v1"
macherrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"testing"
)
func TestBackup_Adapt1(t *testing.T) {
@ -54,8 +55,6 @@ func TestBackup_Adapt1(t *testing.T) {
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
client.EXPECT().WaitUntilJobCompleted(jobDef.Namespace, jobDef.Name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil)
query, _, err := AdaptFunc(
monitor,
@ -121,8 +120,6 @@ func TestBackup_Adapt2(t *testing.T) {
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
client.EXPECT().WaitUntilJobCompleted(jobDef.Namespace, jobDef.Name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil)
query, _, err := AdaptFunc(
monitor,

View File

@ -7,15 +7,19 @@ import (
"github.com/pkg/errors"
)
func getCleanupFunc(monitor mntr.Monitor, namespace, jobName string) operator.EnsureFunc {
func GetCleanupFunc(
monitor mntr.Monitor,
namespace,
backupName string,
) operator.EnsureFunc {
return func(k8sClient kubernetes.ClientInt) error {
monitor.Info("waiting for restore to be completed")
if err := k8sClient.WaitUntilJobCompleted(namespace, jobName, timeout); err != nil {
if err := k8sClient.WaitUntilJobCompleted(namespace, GetJobName(backupName), timeout); err != nil {
monitor.Error(errors.Wrap(err, "error while waiting for restore to be completed"))
return err
}
monitor.Info("restore is completed, cleanup")
if err := k8sClient.DeleteJob(namespace, jobName); err != nil {
if err := k8sClient.DeleteJob(namespace, GetJobName(backupName)); err != nil {
monitor.Error(errors.Wrap(err, "error while trying to cleanup restore"))
return err
}

View File

@ -15,12 +15,12 @@ func TestBackup_Cleanup1(t *testing.T) {
name := "test"
namespace := "testNs"
cleanupFunc := getCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, name).Times(1)
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
assert.NoError(t, cleanupFunc(client))
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(errors.New("fail"))
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
assert.Error(t, cleanupFunc(client))
}
@ -30,11 +30,11 @@ func TestBackup_Cleanup2(t *testing.T) {
name := "test2"
namespace := "testNs2"
cleanupFunc := getCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, name).Times(1)
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
assert.NoError(t, cleanupFunc(client))
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(errors.New("fail"))
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
assert.Error(t, cleanupFunc(client))
}

View File

@ -4,8 +4,12 @@ import (
"github.com/caos/orbos/pkg/secret"
)
func getSecretsMap(desiredKind *DesiredV0) map[string]*secret.Secret {
secrets := make(map[string]*secret.Secret, 0)
func getSecretsMap(desiredKind *DesiredV0) (map[string]*secret.Secret, map[string]*secret.Existing) {
var (
secrets = make(map[string]*secret.Secret, 0)
existing = make(map[string]*secret.Existing, 0)
)
if desiredKind.Spec == nil {
desiredKind.Spec = &Spec{}
}
@ -13,7 +17,14 @@ func getSecretsMap(desiredKind *DesiredV0) map[string]*secret.Secret {
if desiredKind.Spec.ServiceAccountJSON == nil {
desiredKind.Spec.ServiceAccountJSON = &secret.Secret{}
}
secrets["serviceaccountjson"] = desiredKind.Spec.ServiceAccountJSON
return secrets
if desiredKind.Spec.ExistingServiceAccountJSON == nil {
desiredKind.Spec.ExistingServiceAccountJSON = &secret.Existing{}
}
sakey := "serviceaccountjson"
secrets[sakey] = desiredKind.Spec.ServiceAccountJSON
existing[sakey] = desiredKind.Spec.ExistingServiceAccountJSON
return secrets, existing
}

View File

@ -1,22 +1,26 @@
package bucket
import (
"testing"
"github.com/caos/orbos/pkg/secret"
"github.com/stretchr/testify/assert"
"testing"
)
func TestBucket_getSecretsFull(t *testing.T) {
secrets := getSecretsMap(&desired)
secrets, existing := getSecretsMap(&desired)
assert.Equal(t, desired.Spec.ServiceAccountJSON, secrets["serviceaccountjson"])
assert.Equal(t, desired.Spec.ExistingServiceAccountJSON, existing["serviceaccountjson"])
}
func TestBucket_getSecretsEmpty(t *testing.T) {
secrets := getSecretsMap(&desiredWithoutSecret)
secrets, existing := getSecretsMap(&desiredWithoutSecret)
assert.Equal(t, &secret.Secret{}, secrets["serviceaccountjson"])
assert.Equal(t, &secret.Existing{}, existing["serviceaccountjson"])
}
func TestBucket_getSecretsNil(t *testing.T) {
secrets := getSecretsMap(&desiredNil)
secrets, existing := getSecretsMap(&desiredNil)
assert.Equal(t, &secret.Secret{}, secrets["serviceaccountjson"])
assert.Equal(t, &secret.Existing{}, existing["serviceaccountjson"])
}

View File

@ -35,6 +35,8 @@ func GetQueryAndDestroyFuncs(
query operator.QueryFunc,
destroy operator.DestroyFunc,
secrets map[string]*secret.Secret,
existing map[string]*secret.Existing,
migrate bool,
err error,
) {
componentLabels := labels.MustForComponent(apiLabels, component)
@ -46,7 +48,7 @@ func GetQueryAndDestroyFuncs(
case "databases.caos.ch/ProvidedDatabase":
return provided.AdaptFunc()(internalMonitor, desiredTree, currentTree)
default:
return nil, nil, nil, errors.Errorf("unknown database kind %s", desiredTree.Common.Kind)
return nil, nil, nil, nil, false, errors.Errorf("unknown database kind %s", desiredTree.Common.Kind)
}
}

View File

@ -1,10 +1,11 @@
package managed
import (
"github.com/caos/zitadel/operator"
"strconv"
"strings"
"github.com/caos/zitadel/operator"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/orbos/pkg/secret"
@ -51,6 +52,8 @@ func AdaptFunc(
operator.QueryFunc,
operator.DestroyFunc,
map[string]*secret.Secret,
map[string]*secret.Existing,
bool,
error,
) {
@ -62,14 +65,21 @@ func AdaptFunc(
operator.QueryFunc,
operator.DestroyFunc,
map[string]*secret.Secret,
map[string]*secret.Existing,
bool,
error,
) {
internalMonitor := monitor.WithField("kind", "cockroachdb")
allSecrets := map[string]*secret.Secret{}
var (
internalMonitor = monitor.WithField("kind", "cockroachdb")
allSecrets = make(map[string]*secret.Secret)
allExisting = make(map[string]*secret.Existing)
migrate bool
)
desiredKind, err := parseDesiredV0(desired)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "parsing desired state failed")
return nil, nil, nil, nil, false, errors.Wrap(err, "parsing desired state failed")
}
desired.Parsed = desiredKind
@ -92,15 +102,15 @@ func AdaptFunc(
queryCert, destroyCert, addUser, deleteUser, listUsers, err := certificate.AdaptFunc(internalMonitor, namespace, componentLabels, desiredKind.Spec.ClusterDns, isFeatureDatabase)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
addRoot, err := addUser("root")
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
destroyRoot, err := deleteUser("root")
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
queryRBAC, destroyRBAC, err := rbac.AdaptFunc(internalMonitor, namespace, labels.MustForName(componentLabels, serviceAccountName))
@ -126,7 +136,7 @@ func AdaptFunc(
desiredKind.Spec.Resources,
)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
queryS, destroyS, err := services.AdaptFunc(
@ -147,12 +157,12 @@ func AdaptFunc(
queryPDB, err := pdb.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, pdbName), cockroachSelector, "1")
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
destroyPDB, err := pdb.AdaptFuncToDestroy(namespace, pdbName)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
currentDB := &Current{
@ -203,7 +213,7 @@ func AdaptFunc(
for backupName, desiredBackup := range desiredKind.Spec.Backups {
currentBackup := &tree.Tree{}
if timestamp == "" || !oneBackup || (timestamp != "" && strings.HasPrefix(timestamp, backupName)) {
queryB, destroyB, secrets, err := backups.GetQueryAndDestroyFuncs(
queryB, destroyB, secrets, existing, migrateB, err := backups.GetQueryAndDestroyFuncs(
internalMonitor,
desiredBackup,
currentBackup,
@ -218,10 +228,12 @@ func AdaptFunc(
features,
)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
secret.AppendSecrets(backupName, allSecrets, secrets)
migrate = migrate || migrateB
secret.AppendSecrets(backupName, allSecrets, secrets, allExisting, existing)
destroyers = append(destroyers, destroyB)
queriers = append(queriers, queryB)
}
@ -251,6 +263,8 @@ func AdaptFunc(
},
operator.DestroyersToDestroyFunc(internalMonitor, destroyers),
allSecrets,
allExisting,
migrate,
nil
}
}

View File

@ -1,6 +1,9 @@
package managed
import (
"testing"
"time"
"github.com/caos/orbos/mntr"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/caos/orbos/pkg/labels"
@ -13,8 +16,6 @@ import (
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
"testing"
"time"
)
func getTreeWithDBAndBackup(t *testing.T, masterkey string, saJson string, backupName string) *tree.Tree {
@ -81,9 +82,9 @@ func TestManaged_AdaptBucketBackup(t *testing.T) {
features := []string{backup.Normal}
bucket.SetBackup(k8sClient, namespace, labels, saJson)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60))
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second)
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
assert.NoError(t, err)
databases := []string{"test1", "test2"}
@ -119,11 +120,11 @@ func TestManaged_AdaptBucketInstantBackup(t *testing.T) {
features := []string{backup.Instant}
bucket.SetInstantBackup(k8sClient, namespace, backupName, labels, saJson)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60))
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second)
desired := getTreeWithDBAndBackup(t, masterkey, saJson, backupName)
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
assert.NoError(t, err)
databases := []string{"test1", "test2"}
@ -159,12 +160,12 @@ func TestManaged_AdaptBucketCleanAndRestore(t *testing.T) {
features := []string{restore.Instant, clean.Instant}
bucket.SetRestore(k8sClient, namespace, backupName, labels, saJson)
bucket.SetClean(k8sClient, namespace, backupName)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60)).Times(2)
bucket.SetClean(k8sClient, namespace, backupName, labels, saJson)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second).Times(2)
desired := getTreeWithDBAndBackup(t, masterkey, saJson, backupName)
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
assert.NoError(t, err)
databases := []string{"test1", "test2"}

View File

@ -1,10 +1,11 @@
package managed
import (
"gopkg.in/yaml.v3"
"testing"
"time"
"gopkg.in/yaml.v3"
"github.com/caos/orbos/mntr"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/caos/orbos/pkg/labels"
@ -114,11 +115,11 @@ func TestManaged_Adapt1(t *testing.T) {
//statefulset
k8sClient.EXPECT().ApplyStatefulSet(gomock.Any(), gomock.Any()).MinTimes(1).MaxTimes(1)
//running for setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, false, time.Duration(60)).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, false, 60*time.Second).MinTimes(1).MaxTimes(1)
//not ready for setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(1)).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 1*time.Second).MinTimes(1).MaxTimes(1)
//ready after setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60)).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second).MinTimes(1).MaxTimes(1)
//client
k8sClient.EXPECT().ListSecrets(namespace, nodeLabels).MinTimes(1).MaxTimes(1).Return(secretList, nil)
dbCurrent.EXPECT().GetCertificate().MinTimes(1).MaxTimes(1).Return(nil)
@ -132,7 +133,7 @@ func TestManaged_Adapt1(t *testing.T) {
dbCurrent.EXPECT().SetCertificateKey(gomock.Any()).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().ApplySecret(gomock.Any()).MinTimes(1).MaxTimes(1)
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
assert.NoError(t, err)
ensure, err := query(k8sClient, queried)
@ -226,11 +227,11 @@ func TestManaged_Adapt2(t *testing.T) {
//statefulset
k8sClient.EXPECT().ApplyStatefulSet(gomock.Any(), gomock.Any()).MinTimes(1).MaxTimes(1)
//running for setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, false, time.Duration(60)).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, false, 60*time.Second).MinTimes(1).MaxTimes(1)
//not ready for setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(1)).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 1*time.Second).MinTimes(1).MaxTimes(1)
//ready after setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60)).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second).MinTimes(1).MaxTimes(1)
//client
k8sClient.EXPECT().ListSecrets(namespace, nodeLabels).MinTimes(1).MaxTimes(1).Return(secretList, nil)
dbCurrent.EXPECT().GetCertificate().MinTimes(1).MaxTimes(1).Return(nil)
@ -244,7 +245,7 @@ func TestManaged_Adapt2(t *testing.T) {
dbCurrent.EXPECT().SetCertificateKey(gomock.Any()).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().ApplySecret(gomock.Any()).MinTimes(1).MaxTimes(1)
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
assert.NoError(t, err)
ensure, err := query(k8sClient, queried)

View File

@ -2,12 +2,14 @@ package statefulset
import (
"fmt"
"sort"
"strings"
"time"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/zitadel/operator"
"github.com/caos/zitadel/operator/helpers"
"k8s.io/apimachinery/pkg/util/intstr"
"sort"
"strings"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
@ -216,7 +218,7 @@ func AdaptFunc(
wrapedQuery, wrapedDestroy, err := resources.WrapFuncs(internalMonitor, query, destroy)
checkDBRunning := func(k8sClient kubernetes.ClientInt) error {
internalMonitor.Info("waiting for statefulset to be running")
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, false, 60); err != nil {
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, false, 60*time.Second); err != nil {
internalMonitor.Error(errors.Wrap(err, "error while waiting for statefulset to be running"))
return err
}
@ -226,7 +228,7 @@ func AdaptFunc(
checkDBNotReady := func(k8sClient kubernetes.ClientInt) error {
internalMonitor.Info("checking for statefulset to not be ready")
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, true, 1); err != nil {
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, true, 1*time.Second); err != nil {
internalMonitor.Info("statefulset is not ready")
return nil
}
@ -253,7 +255,7 @@ func AdaptFunc(
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
internalMonitor.Info("waiting for statefulset to be ready")
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, true, 60); err != nil {
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, true, 60*time.Second); err != nil {
internalMonitor.Error(errors.Wrap(err, "error while waiting for statefulset to be ready"))
return err
}

View File

@ -17,6 +17,8 @@ func AdaptFunc() func(
operator.QueryFunc,
operator.DestroyFunc,
map[string]*secret.Secret,
map[string]*secret.Existing,
bool,
error,
) {
return func(
@ -27,11 +29,13 @@ func AdaptFunc() func(
operator.QueryFunc,
operator.DestroyFunc,
map[string]*secret.Secret,
map[string]*secret.Existing,
bool,
error,
) {
desiredKind, err := parseDesiredV0(desired)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "parsing desired state failed")
return nil, nil, nil, nil, false, errors.Wrap(err, "parsing desired state failed")
}
desired.Parsed = desiredKind
@ -53,7 +57,9 @@ func AdaptFunc() func(
}, func(k8sClient kubernetes.ClientInt) error {
return nil
},
map[string]*secret.Secret{},
make(map[string]*secret.Secret),
make(map[string]*secret.Existing),
false,
nil
}
}

View File

@ -9,6 +9,9 @@ import (
"github.com/caos/orbos/pkg/tree"
"github.com/caos/orbos/pkg/treelabels"
"github.com/caos/zitadel/operator"
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/backup"
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/clean"
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/restore"
"github.com/caos/zitadel/operator/database/kinds/databases"
"github.com/pkg/errors"
)
@ -21,18 +24,29 @@ func OperatorSelector() *labels.Selector {
return labels.OpenOperatorSelector("ZITADEL", "database.caos.ch")
}
func AdaptFunc(timestamp string, binaryVersion *string, features ...string) operator.AdaptFunc {
func AdaptFunc(timestamp string, binaryVersion *string, gitops bool, features ...string) operator.AdaptFunc {
return func(monitor mntr.Monitor, orbDesiredTree *tree.Tree, currentTree *tree.Tree) (queryFunc operator.QueryFunc, destroyFunc operator.DestroyFunc, secrets map[string]*secret.Secret, err error) {
return func(
monitor mntr.Monitor,
orbDesiredTree *tree.Tree,
currentTree *tree.Tree,
) (
queryFunc operator.QueryFunc,
destroyFunc operator.DestroyFunc,
secrets map[string]*secret.Secret,
existing map[string]*secret.Existing,
migrate bool,
err error,
) {
defer func() {
err = errors.Wrapf(err, "building %s failed", orbDesiredTree.Common.Kind)
}()
orbMonitor := monitor.WithField("kind", "orb")
desiredKind, err := parseDesiredV0(orbDesiredTree)
desiredKind, err := ParseDesiredV0(orbDesiredTree)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "parsing desired state failed")
return nil, nil, nil, nil, migrate, errors.Wrap(err, "parsing desired state failed")
}
orbDesiredTree.Parsed = desiredKind
currentTree = &tree.Tree{}
@ -43,18 +57,18 @@ func AdaptFunc(timestamp string, binaryVersion *string, features ...string) oper
queryNS, err := namespace.AdaptFuncToEnsure(NamespaceStr)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, migrate, err
}
destroyNS, err := namespace.AdaptFuncToDestroy(NamespaceStr)
/*destroyNS, err := namespace.AdaptFuncToDestroy(NamespaceStr)
if err != nil {
return nil, nil, nil, err
}
}*/
databaseCurrent := &tree.Tree{}
operatorLabels := mustDatabaseOperator(binaryVersion)
queryDB, destroyDB, secrets, err := databases.GetQueryAndDestroyFuncs(
queryDB, destroyDB, secrets, existing, migrate, err := databases.GetQueryAndDestroyFuncs(
orbMonitor,
desiredKind.Database,
databaseCurrent,
@ -66,23 +80,28 @@ func AdaptFunc(timestamp string, binaryVersion *string, features ...string) oper
desiredKind.Spec.Version,
features,
)
if err != nil {
return nil, nil, nil, err
}
queriers := []operator.QueryFunc{
operator.ResourceQueryToZitadelQuery(queryNS),
queryDB,
}
if desiredKind.Spec.SelfReconciling {
queriers = append(queriers,
operator.EnsureFuncToQueryFunc(Reconcile(monitor, orbDesiredTree)),
)
return nil, nil, nil, nil, migrate, err
}
destroyers := []operator.DestroyFunc{
operator.ResourceDestroyToZitadelDestroy(destroyNS),
destroyDB,
destroyers := make([]operator.DestroyFunc, 0)
queriers := make([]operator.QueryFunc, 0)
for _, feature := range features {
switch feature {
case "database", backup.Instant, backup.Normal, restore.Instant, clean.Instant:
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryNS),
queryDB,
)
destroyers = append(destroyers,
destroyDB,
)
case "operator":
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryNS),
operator.EnsureFuncToQueryFunc(Reconcile(monitor, desiredKind.Spec)),
)
}
}
currentTree.Parsed = &DesiredV0{
@ -105,6 +124,8 @@ func AdaptFunc(timestamp string, binaryVersion *string, features ...string) oper
return operator.DestroyersToDestroyFunc(monitor, destroyers)(k8sClient)
},
secrets,
existing,
migrate,
nil
}
}

View File

@ -9,7 +9,7 @@ import (
func BackupListFunc() func(monitor mntr.Monitor, desiredTree *tree.Tree) (strings []string, err error) {
return func(monitor mntr.Monitor, desiredTree *tree.Tree) (strings []string, err error) {
desiredKind, err := parseDesiredV0(desiredTree)
desiredKind, err := ParseDesiredV0(desiredTree)
if err != nil {
return nil, errors.Wrap(err, "parsing desired state failed")
}

View File

@ -7,21 +7,25 @@ import (
)
type DesiredV0 struct {
Common *tree.Common `yaml:",inline"`
Spec struct {
Verbose bool
NodeSelector map[string]string `yaml:"nodeSelector,omitempty"`
Tolerations []corev1.Toleration `yaml:"tolerations,omitempty"`
Version string `yaml:"version,omitempty"`
SelfReconciling bool `yaml:"selfReconciling"`
//Use this registry to pull the ZITADEL operator image from
//@default: ghcr.io
CustomImageRegistry string `json:"customImageRegistry,omitempty" yaml:"customImageRegistry,omitempty"`
}
Common *tree.Common `json:",inline" yaml:",inline"`
Spec *Spec `json:"spec" yaml:"spec"`
Database *tree.Tree
}
func parseDesiredV0(desiredTree *tree.Tree) (*DesiredV0, error) {
// +kubebuilder:object:generate=true
type Spec struct {
Verbose bool `json:"verbose" json:"verbose"`
NodeSelector map[string]string `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty"`
Tolerations []corev1.Toleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty"`
Version string `json:"version,omitempty" yaml:"version,omitempty"`
SelfReconciling bool `json:"selfReconciling" yaml:"selfReconciling"`
GitOps bool `json:"gitOps,omitempty" yaml:"gitOps,omitempty"`
//Use this registry to pull the Database operator image from
//@default: ghcr.io
CustomImageRegistry string `json:"customImageRegistry,omitempty" yaml:"customImageRegistry,omitempty"`
}
func ParseDesiredV0(desiredTree *tree.Tree) (*DesiredV0, error) {
desiredKind := &DesiredV0{Common: desiredTree.Common}
if err := desiredTree.Original.Decode(desiredKind); err != nil {

View File

@ -10,38 +10,38 @@ import (
"github.com/pkg/errors"
)
func Reconcile(monitor mntr.Monitor, desiredTree *tree.Tree) operator.EnsureFunc {
func Reconcile(
monitor mntr.Monitor,
spec *Spec,
) operator.EnsureFunc {
return func(k8sClient kubernetes.ClientInt) (err error) {
defer func() {
err = errors.Wrapf(err, "building %s failed", desiredTree.Common.Kind)
}()
recMonitor := monitor.WithField("version", spec.Version)
desiredKind, err := parseDesiredV0(desiredTree)
if err != nil {
return errors.Wrap(err, "parsing desired state failed")
}
desiredTree.Parsed = desiredKind
recMonitor := monitor.WithField("version", desiredKind.Spec.Version)
if desiredKind.Spec.Version == "" {
err := errors.New("No version set in database.yml")
monitor.Error(err)
if spec.Version == "" {
err := errors.New("No version provided for self-reconciling")
recMonitor.Error(err)
return err
}
imageRegistry := desiredKind.Spec.CustomImageRegistry
imageRegistry := spec.CustomImageRegistry
if imageRegistry == "" {
imageRegistry = "ghcr.io"
}
if err := zitadelKubernetes.EnsureDatabaseArtifacts(monitor, treelabels.MustForAPI(desiredTree, mustDatabaseOperator(&desiredKind.Spec.Version)), k8sClient, desiredKind.Spec.Version, desiredKind.Spec.NodeSelector, desiredKind.Spec.Tolerations, imageRegistry); err != nil {
recMonitor.Error(errors.Wrap(err, "Failed to deploy database-operator into k8s-cluster"))
return err
if spec.SelfReconciling {
desiredTree := &tree.Tree{
Common: &tree.Common{
Kind: "databases.caos.ch/Orb",
Version: "v0",
},
}
if err := zitadelKubernetes.EnsureDatabaseArtifacts(monitor, treelabels.MustForAPI(desiredTree, mustDatabaseOperator(&spec.Version)), k8sClient, spec.Version, spec.NodeSelector, spec.Tolerations, imageRegistry, spec.GitOps); err != nil {
recMonitor.Error(errors.Wrap(err, "Failed to deploy database-operator into k8s-cluster"))
return err
}
recMonitor.Info("Applied database-operator")
}
recMonitor.Info("Applied database-operator")
return nil
}

View File

@ -0,0 +1,54 @@
// +build !ignore_autogenerated
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package orb
import (
"k8s.io/api/core/v1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Spec) DeepCopyInto(out *Spec) {
*out = *in
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]v1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Spec.
func (in *Spec) DeepCopy() *Spec {
if in == nil {
return nil
}
out := new(Spec)
in.DeepCopyInto(out)
return out
}

View File

@ -2,6 +2,7 @@ package database
import (
"errors"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/git"
"github.com/caos/orbos/pkg/kubernetes"
@ -25,7 +26,7 @@ func Takeoff(monitor mntr.Monitor, gitClient *git.Client, adapt operator.AdaptFu
return
}
query, _, _, err := adapt(internalMonitor, treeDesired, treeCurrent)
query, _, _, _, _, err := adapt(internalMonitor, treeDesired, treeCurrent)
if err != nil {
internalMonitor.Error(err)
return

View File

@ -2,16 +2,22 @@ package secrets
import (
"errors"
orbdb "github.com/caos/zitadel/operator/database/kinds/orb"
"fmt"
"strings"
"github.com/caos/orbos/pkg/kubernetes"
crddb "github.com/caos/zitadel/operator/api/database"
crdzit "github.com/caos/zitadel/operator/api/zitadel"
orbdb "github.com/caos/zitadel/operator/database/kinds/orb"
orbzit "github.com/caos/zitadel/operator/zitadel/kinds/orb"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/git"
"github.com/caos/orbos/pkg/orb"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/api"
zitadelOrb "github.com/caos/zitadel/operator/zitadel/kinds/orb"
)
const (
@ -19,79 +25,154 @@ const (
database string = "database"
)
func GetAllSecretsFunc(orb *orb.Orb) func(monitor mntr.Monitor, gitClient *git.Client) (map[string]*secret.Secret, map[string]*tree.Tree, error) {
return func(monitor mntr.Monitor, gitClient *git.Client) (map[string]*secret.Secret, map[string]*tree.Tree, error) {
allSecrets := make(map[string]*secret.Secret, 0)
allTrees := make(map[string]*tree.Tree, 0)
foundZitadel, err := api.ExistsZitadelYml(gitClient)
if err != nil {
return nil, nil, err
}
if foundZitadel {
zitadelYML, err := api.ReadZitadelYml(gitClient)
if err != nil {
return nil, nil, err
}
allTrees[zitadel] = zitadelYML
_, _, zitadelSecrets, err := zitadelOrb.AdaptFunc(orb, "secret", nil, []string{})(monitor, zitadelYML, &tree.Tree{})
if err != nil {
return nil, nil, err
}
if zitadelSecrets != nil && len(zitadelSecrets) > 0 {
secret.AppendSecrets(zitadel, allSecrets, zitadelSecrets)
}
} else {
monitor.Info("no file for zitadel found")
}
foundDB, err := api.ExistsDatabaseYml(gitClient)
if err != nil {
return nil, nil, err
}
if foundDB {
dbYML, err := api.ReadDatabaseYml(gitClient)
if err != nil {
return nil, nil, err
}
allTrees[database] = dbYML
_, _, dbSecrets, err := orbdb.AdaptFunc("", nil, "database", "backup")(monitor, dbYML, nil)
if err != nil {
return nil, nil, err
}
if dbSecrets != nil && len(dbSecrets) > 0 {
secret.AppendSecrets(database, allSecrets, dbSecrets)
}
} else {
monitor.Info("no file for database found")
}
return allSecrets, allTrees, nil
func GetAllSecretsFunc(
monitor mntr.Monitor,
printLogs,
gitops bool,
gitClient *git.Client,
k8sClient kubernetes.ClientInt,
orb *orb.Orb,
) func() (
map[string]*secret.Secret,
map[string]*secret.Existing,
map[string]*tree.Tree,
error,
) {
return func() (
map[string]*secret.Secret,
map[string]*secret.Existing,
map[string]*tree.Tree,
error,
) {
return getAllSecrets(monitor, printLogs, gitops, orb, gitClient, k8sClient)
}
}
func PushFunc() func(monitor mntr.Monitor, gitClient *git.Client, trees map[string]*tree.Tree, path string) error {
return func(monitor mntr.Monitor, gitClient *git.Client, trees map[string]*tree.Tree, path string) error {
operator := ""
if strings.HasPrefix(path, zitadel) {
operator = zitadel
} else if strings.HasPrefix(path, database) {
operator = database
} else {
return errors.New("Operator unknown")
}
func getAllSecrets(
monitor mntr.Monitor,
printLogs,
gitops bool,
orb *orb.Orb,
gitClient *git.Client,
k8sClient kubernetes.ClientInt,
) (
map[string]*secret.Secret,
map[string]*secret.Existing,
map[string]*tree.Tree,
error,
) {
allSecrets := make(map[string]*secret.Secret, 0)
allExisting := make(map[string]*secret.Existing, 0)
allTrees := make(map[string]*tree.Tree, 0)
desired, found := trees[operator]
if !found {
return errors.New("Operator file not found")
}
if err := secret.GetOperatorSecrets(
monitor,
printLogs,
gitops,
allTrees,
allSecrets,
allExisting,
zitadel,
func() (bool, error) { return api.ExistsZitadelYml(gitClient) },
func() (t *tree.Tree, err error) { return api.ReadZitadelYml(gitClient) },
func() (t *tree.Tree, err error) { return crdzit.ReadCrd(k8sClient) },
func(t *tree.Tree) (map[string]*secret.Secret, map[string]*secret.Existing, bool, error) {
_, _, secrets, existing, migrate, err := orbzit.AdaptFunc(orb, "secret", nil, gitops, []string{})(monitor, t, &tree.Tree{})
return secrets, existing, migrate, err
},
); err != nil {
return nil, nil, nil, err
}
if operator == zitadel {
if err := secret.GetOperatorSecrets(
monitor,
printLogs,
gitops,
allTrees,
allSecrets,
allExisting,
database,
func() (bool, error) { return api.ExistsDatabaseYml(gitClient) },
func() (t *tree.Tree, err error) { return api.ReadDatabaseYml(gitClient) },
func() (t *tree.Tree, err error) { return crddb.ReadCrd(k8sClient) },
func(t *tree.Tree) (map[string]*secret.Secret, map[string]*secret.Existing, bool, error) {
_, _, secrets, existing, migrate, err := orbdb.AdaptFunc("", nil, gitops, "database", "backup")(monitor, t, nil)
return secrets, existing, migrate, err
},
); err != nil {
return nil, nil, nil, err
}
if k8sClient == nil {
allExisting = nil
}
if len(allSecrets) == 0 && len(allExisting) == 0 {
return nil, nil, nil, errors.New("couldn't find any secrets")
}
return allSecrets, allExisting, allTrees, nil
}
func PushFunc(
monitor mntr.Monitor,
gitops bool,
gitClient *git.Client,
k8sClient kubernetes.ClientInt,
) func(
trees map[string]*tree.Tree,
path string,
) error {
return func(
trees map[string]*tree.Tree,
path string,
) error {
return push(monitor, gitops, gitClient, k8sClient, trees, path)
}
}
func push(
monitor mntr.Monitor,
gitops bool,
gitClient *git.Client,
k8sClient kubernetes.ClientInt,
trees map[string]*tree.Tree,
path string,
) error {
var (
pushGitFunc func(*tree.Tree) error
applyCRDFunc func(*tree.Tree) error
operator string
)
if strings.HasPrefix(path, zitadel) {
operator = zitadel
pushGitFunc = func(desired *tree.Tree) error {
return api.PushZitadelDesiredFunc(gitClient, desired)(monitor)
} else if operator == database {
}
applyCRDFunc = func(t *tree.Tree) error {
return crdzit.WriteCrd(k8sClient, t)
}
} else if strings.HasPrefix(path, database) {
operator = database
pushGitFunc = func(desired *tree.Tree) error {
return api.PushDatabaseDesiredFunc(gitClient, desired)(monitor)
}
return errors.New("Operator push function unknown")
applyCRDFunc = func(t *tree.Tree) error {
return crddb.WriteCrd(k8sClient, t)
}
} else {
return errors.New("operator unknown")
}
desired, found := trees[operator]
if !found {
return fmt.Errorf("desired state for %s not found", operator)
}
if gitops {
return pushGitFunc(desired)
}
return applyCRDFunc(desired)
}

View File

@ -3,10 +3,10 @@ package iam
import (
"fmt"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/orbos/pkg/orb"
"github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/database"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator"
@ -22,7 +22,8 @@ func GetQueryAndDestroyFuncs(
currentTree *tree.Tree,
nodeselector map[string]string,
tolerations []core.Toleration,
orbconfig *orb.Orb,
dbClient database.Client,
namespace string,
action string,
version *string,
features []string,
@ -30,6 +31,8 @@ func GetQueryAndDestroyFuncs(
query operator.QueryFunc,
destroy operator.DestroyFunc,
secrets map[string]*secret.Secret,
existing map[string]*secret.Existing,
migrate bool,
err error,
) {
@ -42,8 +45,8 @@ func GetQueryAndDestroyFuncs(
switch desiredTree.Common.Kind {
case "zitadel.caos.ch/ZITADEL":
apiLabels := labels.MustForAPI(operatorLabels, "ZITADEL", desiredTree.Common.Version)
return zitadel.AdaptFunc(apiLabels, nodeselector, tolerations, orbconfig, action, version, features)(monitor, desiredTree, currentTree)
return zitadel.AdaptFunc(apiLabels, nodeselector, tolerations, dbClient, namespace, action, version, features)(monitor, desiredTree, currentTree)
default:
return nil, nil, nil, errors.Errorf("unknown iam kind %s", desiredTree.Common.Kind)
return nil, nil, nil, nil, false, errors.Errorf("unknown iam kind %s", desiredTree.Common.Kind)
}
}

View File

@ -4,7 +4,6 @@ import (
"strconv"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/orbos/pkg/orb"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/database"
"github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/setup"
@ -13,7 +12,6 @@ import (
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/namespace"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator"
"github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/ambassador"
@ -28,7 +26,8 @@ func AdaptFunc(
apiLabels *labels.API,
nodeselector map[string]string,
tolerations []core.Toleration,
orbconfig *orb.Orb,
dbClient database.Client,
namespace string,
action string,
version *string,
features []string,
@ -41,24 +40,25 @@ func AdaptFunc(
operator.QueryFunc,
operator.DestroyFunc,
map[string]*secret.Secret,
map[string]*secret.Existing,
bool,
error,
) {
allSecrets := make(map[string]*secret.Secret)
internalMonitor := monitor.WithField("kind", "iam")
desiredKind, err := parseDesiredV0(desired)
if err != nil {
return nil, nil, allSecrets, errors.Wrap(err, "parsing desired state failed")
return nil, nil, nil, nil, false, errors.Wrap(err, "parsing desired state failed")
}
desired.Parsed = desiredKind
secret.AppendSecrets("", allSecrets, getSecretsMap(desiredKind))
allSecrets, allExisting := getSecretsMap(desiredKind)
if !monitor.IsVerbose() && desiredKind.Spec.Verbose {
internalMonitor.Verbose()
}
namespaceStr := "caos-zitadel"
// shared elements
cmName := "zitadel-vars"
secretName := "zitadel-secret"
@ -70,28 +70,12 @@ func AdaptFunc(
secretPath := "/secret"
//services which are kubernetes resources and are used in the ambassador elements
grpcServiceName := "grpc-v1"
var grpcPort uint16 = 80
grpcPort := 80
httpServiceName := "http-v1"
var httpPort uint16 = 80
httpPort := 80
uiServiceName := "ui-v1"
var uiPort uint16 = 80
// labels := getLabels()
users := getAllUsers(desiredKind)
allZitadelUsers := getZitadelUserList()
dbClient, err := database.NewClient(monitor, orbconfig.URL, orbconfig.Repokey)
if err != nil {
return nil, nil, allSecrets, err
}
queryNS, err := namespace.AdaptFuncToEnsure(namespaceStr)
if err != nil {
return nil, nil, allSecrets, err
}
destroyNS, err := namespace.AdaptFuncToDestroy(namespaceStr)
if err != nil {
return nil, nil, allSecrets, err
}
uiPort := 80
usersWithoutPWs := getUserListWithoutPasswords(desiredKind)
zitadelComponent := labels.MustForComponent(apiLabels, "ZITADEL")
zitadelDeploymentName := labels.MustForName(zitadelComponent, "zitadel")
@ -100,21 +84,21 @@ func AdaptFunc(
internalMonitor,
zitadelComponent,
zitadelPodSelector,
namespaceStr,
namespace,
grpcServiceName,
grpcPort,
uint16(grpcPort),
httpServiceName,
httpPort,
uint16(httpPort),
uiServiceName,
uiPort)
uint16(uiPort))
if err != nil {
return nil, nil, allSecrets, err
return nil, nil, nil, nil, false, err
}
queryC, destroyC, getConfigurationHashes, err := configuration.AdaptFunc(
getQueryC, destroyC, getConfigurationHashes, err := configuration.AdaptFunc(
internalMonitor,
zitadelComponent,
namespaceStr,
namespace,
desiredKind.Spec.Configuration,
cmName,
certPath,
@ -123,12 +107,11 @@ func AdaptFunc(
consoleCMName,
secretVarsName,
secretPasswordName,
users,
services.GetClientIDFunc(namespaceStr, httpServiceName, httpPort),
dbClient,
services.GetClientIDFunc(namespace, httpServiceName, httpPort),
)
if err != nil {
return nil, nil, allSecrets, err
return nil, nil, nil, nil, false, err
}
queryDB, err := database.AdaptFunc(
@ -136,28 +119,28 @@ func AdaptFunc(
dbClient,
)
if err != nil {
return nil, nil, allSecrets, err
return nil, nil, nil, nil, false, err
}
queryM, destroyM, err := migration.AdaptFunc(
internalMonitor,
labels.MustForComponent(apiLabels, "database"),
namespaceStr,
namespace,
action,
secretPasswordName,
migrationUser,
allZitadelUsers,
usersWithoutPWs,
nodeselector,
tolerations,
)
if err != nil {
return nil, nil, allSecrets, err
return nil, nil, nil, nil, false, err
}
querySetup, destroySetup, err := setup.AdaptFunc(
getQuerySetup, destroySetup, err := setup.AdaptFunc(
internalMonitor,
zitadelComponent,
namespaceStr,
namespace,
action,
desiredKind.Spec.NodeSelector,
desiredKind.Spec.Tolerations,
@ -170,11 +153,10 @@ func AdaptFunc(
consoleCMName,
secretVarsName,
secretPasswordName,
allZitadelUsers,
migration.GetDoneFunc(monitor, namespaceStr, action),
configuration.GetReadyFunc(monitor, namespaceStr, secretName, secretVarsName, secretPasswordName, cmName, consoleCMName),
getConfigurationHashes,
)
if err != nil {
return nil, nil, nil, nil, false, err
}
queryD, destroyD, err := deployment.AdaptFunc(
internalMonitor,
@ -182,7 +164,7 @@ func AdaptFunc(
zitadelPodSelector,
desiredKind.Spec.Force,
version,
namespaceStr,
namespace,
desiredKind.Spec.ReplicaCount,
desiredKind.Spec.Affinity,
cmName,
@ -192,64 +174,38 @@ func AdaptFunc(
consoleCMName,
secretVarsName,
secretPasswordName,
allZitadelUsers,
desiredKind.Spec.NodeSelector,
desiredKind.Spec.Tolerations,
desiredKind.Spec.Resources,
migration.GetDoneFunc(monitor, namespaceStr, action),
configuration.GetReadyFunc(monitor, namespaceStr, secretName, secretVarsName, secretPasswordName, cmName, consoleCMName),
setup.GetDoneFunc(monitor, namespaceStr, action),
getConfigurationHashes,
migration.GetDoneFunc(monitor, namespace, action),
configuration.GetReadyFunc(monitor, namespace, secretName, secretVarsName, secretPasswordName, cmName, consoleCMName),
setup.GetDoneFunc(monitor, namespace, action),
)
if err != nil {
return nil, nil, allSecrets, err
return nil, nil, nil, nil, false, err
}
queryAmbassador, destroyAmbassador, err := ambassador.AdaptFunc(
internalMonitor,
labels.MustForComponent(apiLabels, "apiGateway"),
namespaceStr,
grpcServiceName+"."+namespaceStr+":"+strconv.Itoa(int(grpcPort)),
"http://"+httpServiceName+"."+namespaceStr+":"+strconv.Itoa(int(httpPort)),
"http://"+uiServiceName+"."+namespaceStr,
namespace,
grpcServiceName+"."+namespace+":"+strconv.Itoa(grpcPort),
"http://"+httpServiceName+"."+namespace+":"+strconv.Itoa(httpPort),
"http://"+uiServiceName+"."+namespace,
desiredKind.Spec.Configuration.DNS,
)
if err != nil {
return nil, nil, allSecrets, err
return nil, nil, nil, nil, false, err
}
destroyers := make([]operator.DestroyFunc, 0)
queriers := make([]operator.QueryFunc, 0)
for _, feature := range features {
switch feature {
case "migration":
queriers = append(queriers,
queryDB,
//configuration
queryC,
//migration
queryM,
//wait until migration is completed
operator.EnsureFuncToQueryFunc(migration.GetDoneFunc(monitor, namespaceStr, action)),
)
destroyers = append(destroyers,
destroyM,
)
case "iam":
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryNS),
queryDB,
//configuration
queryC,
//migration
queryM,
//services
queryS,
querySetup,
queryD,
operator.EnsureFuncToQueryFunc(deployment.GetReadyFunc(monitor, namespaceStr, zitadelDeploymentName)),
queryAmbassador,
)
destroyers = append(destroyers,
destroyAmbassador,
destroyS,
@ -257,24 +213,85 @@ func AdaptFunc(
destroyD,
destroySetup,
destroyC,
operator.ResourceDestroyToZitadelDestroy(destroyNS),
)
case "scaledown":
queriers = append(queriers,
operator.EnsureFuncToQueryFunc(deployment.GetScaleFunc(monitor, namespaceStr, zitadelDeploymentName)(0)),
)
case "scaleup":
queriers = append(queriers,
operator.EnsureFuncToQueryFunc(deployment.GetScaleFunc(monitor, namespaceStr, zitadelDeploymentName)(desiredKind.Spec.ReplicaCount)),
)
}
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
users, err := getAllUsers(k8sClient, desiredKind)
if err != nil {
return nil, err
}
allZitadelUsers, err := getZitadelUserList(k8sClient, desiredKind)
if err != nil {
return nil, err
}
queryReadyM := operator.EnsureFuncToQueryFunc(migration.GetDoneFunc(monitor, namespace, action))
queryC := getQueryC(users)
queryReadyC := operator.EnsureFuncToQueryFunc(configuration.GetReadyFunc(monitor, namespace, secretName, secretVarsName, secretPasswordName, cmName, consoleCMName))
querySetup := getQuerySetup(allZitadelUsers, getConfigurationHashes)
queryReadySetup := operator.EnsureFuncToQueryFunc(setup.GetDoneFunc(monitor, namespace, action))
queryD := queryD(allZitadelUsers, getConfigurationHashes)
queryReadyD := operator.EnsureFuncToQueryFunc(deployment.GetReadyFunc(monitor, namespace, zitadelDeploymentName))
queriers := make([]operator.QueryFunc, 0)
for _, feature := range features {
switch feature {
case "migration":
queriers = append(queriers,
queryDB,
//configuration
queryC,
queryReadyC,
//migration
queryM,
queryReadyM,
operator.EnsureFuncToQueryFunc(migration.GetCleanupFunc(monitor, namespace, action)),
)
case "iam":
queriers = append(queriers,
queryDB,
//configuration
queryC,
queryReadyC,
//migration
queryM,
queryReadyM,
//services
queryS,
//setup
querySetup,
queryReadySetup,
//deployment
queryD,
queryReadyD,
//handle change if necessary for clientID
queryC,
queryReadyC,
//again apply deployment if config changed
queryD,
queryReadyD,
//apply ambassador crds after zitadel is ready
queryAmbassador,
)
case "scaledown":
queriers = append(queriers,
operator.EnsureFuncToQueryFunc(deployment.GetScaleFunc(monitor, namespace, zitadelDeploymentName)(0)),
)
case "scaleup":
queriers = append(queriers,
operator.EnsureFuncToQueryFunc(deployment.GetScaleFunc(monitor, namespace, zitadelDeploymentName)(desiredKind.Spec.ReplicaCount)),
)
}
}
return operator.QueriersToEnsureFunc(internalMonitor, true, queriers, k8sClient, queried)
},
operator.DestroyersToDestroyFunc(monitor, destroyers),
allSecrets,
allExisting,
false,
nil
}
}

View File

@ -21,9 +21,9 @@ type ConsoleEnv struct {
}
const (
googleServiceAccountJSONPath = "google-serviceaccount-key.json"
zitadelKeysPath = "zitadel-keys.yaml"
timeout time.Duration = 60
googleServiceAccountJSONPath = "google-serviceaccount-key.json"
zitadelKeysPath = "zitadel-keys.yaml"
timeout = 60 * time.Second
)
func AdaptFunc(
@ -38,20 +38,21 @@ func AdaptFunc(
consoleCMName string,
secretVarsName string,
secretPasswordName string,
necessaryUsers map[string]string,
dbClient database.Client,
getClientID func() string,
dbClient database.ClientInt,
) (
operator.QueryFunc,
func(
necessaryUsers map[string]string,
) operator.QueryFunc,
operator.DestroyFunc,
func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) map[string]string,
func(
k8sClient kubernetes.ClientInt,
queried map[string]interface{},
necessaryUsers map[string]string,
) (map[string]string, error),
error,
) {
internalMonitor := monitor.WithField("component", "configuration")
literalsSecret := literalsSecret(desired, googleServiceAccountJSONPath, zitadelKeysPath)
literalsSecretVars := literalsSecretVars(desired)
destroyCM, err := configmap.AdaptFuncToDestroy(namespace, cmName)
if err != nil {
return nil, nil, nil, err
@ -73,7 +74,7 @@ func AdaptFunc(
return nil, nil, nil, err
}
_, destroyUser, err := users.AdaptFunc(internalMonitor, necessaryUsers, dbClient)
_, destroyUser, err := users.AdaptFunc(internalMonitor, dbClient)
if err != nil {
return nil, nil, nil, err
}
@ -87,71 +88,97 @@ func AdaptFunc(
operator.ResourceDestroyToZitadelDestroy(destroySP),
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
queryUser, _, err := users.AdaptFunc(internalMonitor, necessaryUsers, dbClient)
if err != nil {
return nil, err
}
queryS, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, secretName), literalsSecret)
if err != nil {
return nil, err
}
querySV, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, secretVarsName), literalsSecretVars)
if err != nil {
return nil, err
}
querySP, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, secretPasswordName), necessaryUsers)
if err != nil {
return nil, err
}
return func(
necessaryUsers map[string]string,
) operator.QueryFunc {
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
literalsSecret, err := literalsSecret(k8sClient, desired, googleServiceAccountJSONPath, zitadelKeysPath)
if err != nil {
return nil, err
}
literalsSecretVars, err := literalsSecretVars(k8sClient, desired)
if err != nil {
return nil, err
}
queryCCM, err := configmap.AdaptFuncToEnsure(
namespace,
consoleCMName,
labels.MustForNameK8SMap(componentLabels, consoleCMName),
literalsConsoleCM(
getClientID(),
desired.DNS,
k8sClient,
queryUser, _, err := users.AdaptFunc(internalMonitor, dbClient)
if err != nil {
return nil, err
}
queryS, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, secretName), literalsSecret)
if err != nil {
return nil, err
}
querySV, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, secretVarsName), literalsSecretVars)
if err != nil {
return nil, err
}
querySP, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, secretPasswordName), necessaryUsers)
if err != nil {
return nil, err
}
queryCCM, err := configmap.AdaptFuncToEnsure(
namespace,
consoleCMName,
),
)
if err != nil {
return nil, err
}
labels.MustForNameK8SMap(componentLabels, consoleCMName),
literalsConsoleCM(
getClientID(),
desired.DNS,
k8sClient,
namespace,
consoleCMName,
),
)
if err != nil {
return nil, err
}
queryCM, err := configmap.AdaptFuncToEnsure(
namespace,
cmName,
labels.MustForNameK8SMap(componentLabels, cmName),
literalsConfigMap(
desired,
necessaryUsers,
certPath,
secretPath,
googleServiceAccountJSONPath,
zitadelKeysPath,
queried,
),
)
if err != nil {
return nil, err
}
queryCM, err := configmap.AdaptFuncToEnsure(
namespace,
cmName,
labels.MustForNameK8SMap(componentLabels, cmName),
literalsConfigMap(
desired,
necessaryUsers,
certPath,
secretPath,
googleServiceAccountJSONPath,
zitadelKeysPath,
queried,
),
)
if err != nil {
return nil, err
}
queriers := []operator.QueryFunc{
queryUser,
operator.ResourceQueryToZitadelQuery(queryS),
operator.ResourceQueryToZitadelQuery(queryCCM),
operator.ResourceQueryToZitadelQuery(querySV),
operator.ResourceQueryToZitadelQuery(querySP),
operator.ResourceQueryToZitadelQuery(queryCM),
}
queriers := []operator.QueryFunc{
queryUser(necessaryUsers),
operator.ResourceQueryToZitadelQuery(queryS),
operator.ResourceQueryToZitadelQuery(queryCCM),
operator.ResourceQueryToZitadelQuery(querySV),
operator.ResourceQueryToZitadelQuery(querySP),
operator.ResourceQueryToZitadelQuery(queryCM),
}
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
}
},
operator.DestroyersToDestroyFunc(internalMonitor, destroyers),
func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) map[string]string {
func(
k8sClient kubernetes.ClientInt,
queried map[string]interface{},
necessaryUsers map[string]string,
) (map[string]string, error) {
literalsSecret, err := literalsSecret(k8sClient, desired, googleServiceAccountJSONPath, zitadelKeysPath)
if err != nil {
return nil, err
}
literalsSecretVars, err := literalsSecretVars(k8sClient, desired)
if err != nil {
return nil, err
}
return map[string]string{
secretName: getHash(literalsSecret),
secretVarsName: getHash(literalsSecretVars),
@ -176,7 +203,7 @@ func AdaptFunc(
consoleCMName,
),
),
}
}, nil
},
nil
}

View File

@ -38,6 +38,7 @@ func SetConfigMap(
}
func SetSecretVars(
t *testing.T,
k8sClient *kubernetesmock.MockClientInt,
namespace string,
secretVarsName string,
@ -45,6 +46,8 @@ func SetSecretVars(
desired *Configuration,
) {
literalsSV, err := literalsSecretVars(k8sClient, desired)
assert.NoError(t, err)
k8sClient.EXPECT().ApplySecret(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
@ -52,7 +55,7 @@ func SetSecretVars(
Labels: labels,
},
Type: "Opaque",
StringData: literalsSecretVars(desired),
StringData: literalsSV,
}).Times(1)
}
func SetConsoleCM(
@ -76,12 +79,16 @@ func SetConsoleCM(
k8sClient.EXPECT().ApplyConfigmap(consoleCM).Times(1)
}
func SetSecrets(
t *testing.T,
k8sClient *kubernetesmock.MockClientInt,
namespace string,
secretName string,
labels map[string]string,
desired *Configuration,
) {
literalsS, err := literalsSecret(k8sClient, desired, googleServiceAccountJSONPath, zitadelKeysPath)
assert.NoError(t, err)
k8sClient.EXPECT().ApplySecret(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
@ -89,7 +96,7 @@ func SetSecrets(
Labels: labels,
},
Type: "Opaque",
StringData: literalsSecret(desired, googleServiceAccountJSONPath, zitadelKeysPath),
StringData: literalsS,
}).Times(1)
}
@ -114,7 +121,7 @@ func SetSecretPasswords(
func TestConfiguration_Adapt(t *testing.T) {
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
monitor := mntr.Monitor{Fields: map[string]interface{}{"component": "configuration"}}
namespace := "test"
@ -162,6 +169,7 @@ func TestConfiguration_Adapt(t *testing.T) {
zitadelKeysPath)
SetSecretVars(
t,
k8sClient,
namespace,
secretVarsName,
@ -179,6 +187,7 @@ func TestConfiguration_Adapt(t *testing.T) {
)
SetSecrets(
t,
k8sClient,
namespace,
secretName,
@ -194,7 +203,7 @@ func TestConfiguration_Adapt(t *testing.T) {
users,
)
query, _, _, err := AdaptFunc(
getQuery, _, _, err := AdaptFunc(
monitor,
componentLabels,
namespace,
@ -206,12 +215,11 @@ func TestConfiguration_Adapt(t *testing.T) {
consoleCMName,
secretVarsName,
secretPasswordName,
users,
getClientID,
dbClient,
getClientID,
)
assert.NoError(t, err)
query := getQuery(users)
ensure, err := query(k8sClient, queried)
assert.NoError(t, err)
assert.NoError(t, ensure(k8sClient))
@ -220,7 +228,7 @@ func TestConfiguration_Adapt(t *testing.T) {
func TestConfiguration_AdaptFull(t *testing.T) {
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
monitor := mntr.Monitor{Fields: map[string]interface{}{"component": "configuration"}}
namespace := "test2"
@ -268,6 +276,7 @@ func TestConfiguration_AdaptFull(t *testing.T) {
zitadelKeysPath)
SetSecretVars(
t,
k8sClient,
namespace,
secretVarsName,
@ -285,6 +294,7 @@ func TestConfiguration_AdaptFull(t *testing.T) {
)
SetSecrets(
t,
k8sClient,
namespace,
secretName,
@ -300,7 +310,7 @@ func TestConfiguration_AdaptFull(t *testing.T) {
users,
)
query, _, _, err := AdaptFunc(
getQuery, _, _, err := AdaptFunc(
monitor,
componentLabels,
namespace,
@ -312,12 +322,12 @@ func TestConfiguration_AdaptFull(t *testing.T) {
consoleCMName,
secretVarsName,
secretPasswordName,
users,
getClientID,
dbClient,
getClientID,
)
assert.NoError(t, err)
query := getQuery(users)
ensure, err := query(k8sClient, queried)
assert.NoError(t, err)
assert.NoError(t, ensure(k8sClient))

View File

@ -28,53 +28,67 @@ type Subdomains struct {
Issuer string `yaml:"issuer"`
}
type Passwords struct {
Migration *secret.Secret `yaml:"migration"`
Management *secret.Secret `yaml:"management"`
Auth *secret.Secret `yaml:"auth"`
Authz *secret.Secret `yaml:"authz"`
Adminapi *secret.Secret `yaml:"adminapi"`
Notification *secret.Secret `yaml:"notification"`
Eventstore *secret.Secret `yaml:"eventstore"`
Queries *secret.Secret `yaml:"queries"`
Migration *secret.Secret `yaml:"migration"`
Management *secret.Secret `yaml:"management"`
Auth *secret.Secret `yaml:"auth"`
Authz *secret.Secret `yaml:"authz"`
Adminapi *secret.Secret `yaml:"adminapi"`
Notification *secret.Secret `yaml:"notification"`
Eventstore *secret.Secret `yaml:"eventstore"`
Queries *secret.Secret `yaml:"queries"`
ExistingMigration *secret.Existing `yaml:"existingMigration"`
ExistingManagement *secret.Existing `yaml:"existingManagement"`
ExistingAuth *secret.Existing `yaml:"existingAuth"`
ExistingAuthz *secret.Existing `yaml:"existingAuthz"`
ExistingAdminapi *secret.Existing `yaml:"existingAdminapi"`
ExistingNotification *secret.Existing `yaml:"existingNotification"`
ExistingEventstore *secret.Existing `yaml:"existingEventstore"`
ExistingQueries *secret.Existing `yaml:"existingQueries"`
}
type Secrets struct {
Keys *secret.Secret `yaml:"keys,omitempty"`
UserVerificationID string `yaml:"userVerificationID,omitempty"`
OTPVerificationID string `yaml:"otpVerificationID,omitempty"`
OIDCKeysID string `yaml:"oidcKeysID,omitempty"`
CookieID string `yaml:"cookieID,omitempty"`
CSRFID string `yaml:"csrfID,omitempty"`
DomainVerificationID string `yaml:"domainVerificationID,omitempty"`
IDPConfigVerificationID string `yaml:"idpConfigVerificationID,omitempty"`
Keys *secret.Secret `yaml:"keys,omitempty"`
ExistingKeys *secret.Existing `yaml:"existingKeys,omitempty"`
UserVerificationID string `yaml:"userVerificationID,omitempty"`
OTPVerificationID string `yaml:"otpVerificationID,omitempty"`
OIDCKeysID string `yaml:"oidcKeysID,omitempty"`
CookieID string `yaml:"cookieID,omitempty"`
CSRFID string `yaml:"csrfID,omitempty"`
DomainVerificationID string `yaml:"domainVerificationID,omitempty"`
IDPConfigVerificationID string `yaml:"idpConfigVerificationID,omitempty"`
}
type Notifications struct {
GoogleChatURL *secret.Secret `yaml:"googleChatURL,omitempty"`
Email *Email `yaml:"email,omitempty"`
Twilio *Twilio `yaml:"twilio,omitempty"`
GoogleChatURL *secret.Secret `yaml:"googleChatURL,omitempty"`
ExistingGoogleChatURL *secret.Existing `yaml:"existingGoogleChatURL,omitempty"`
Email *Email `yaml:"email,omitempty"`
Twilio *Twilio `yaml:"twilio,omitempty"`
}
type Tracing struct {
ServiceAccountJSON *secret.Secret `yaml:"serviceAccountJSON,omitempty"`
ProjectID string `yaml:"projectID,omitempty"`
Fraction string `yaml:"fraction,omitempty"`
Type string `yaml:"type,omitempty"`
ServiceAccountJSON *secret.Secret `yaml:"serviceAccountJSON,omitempty"`
ExistingServiceAccountJSON *secret.Existing `yaml:"existingServiceAccountJSON,omitempty"`
ProjectID string `yaml:"projectID,omitempty"`
Fraction string `yaml:"fraction,omitempty"`
Type string `yaml:"type,omitempty"`
}
type Twilio struct {
SenderName string `yaml:"senderName,omitempty"`
AuthToken *secret.Secret `yaml:"authToken,omitempty"`
SID *secret.Secret `yaml:"sid,omitempty"`
SenderName string `yaml:"senderName,omitempty"`
AuthToken *secret.Secret `yaml:"authToken,omitempty"`
SID *secret.Secret `yaml:"sid,omitempty"`
ExistingAuthToken *secret.Existing `yaml:"existingAuthToken,omitempty"`
ExistingSID *secret.Existing `yaml:"ExistingSid,omitempty"`
}
type Email struct {
SMTPHost string `yaml:"smtpHost,omitempty"`
SMTPUser string `yaml:"smtpUser,omitempty"`
SenderAddress string `yaml:"senderAddress,omitempty"`
SenderName string `yaml:"senderName,omitempty"`
TLS bool `yaml:"tls,omitempty"`
AppKey *secret.Secret `yaml:"appKey,omitempty"`
SMTPHost string `yaml:"smtpHost,omitempty"`
SMTPUser string `yaml:"smtpUser,omitempty"`
SenderAddress string `yaml:"senderAddress,omitempty"`
SenderName string `yaml:"senderName,omitempty"`
TLS bool `yaml:"tls,omitempty"`
AppKey *secret.Secret `yaml:"appKey,omitempty"`
ExistingAppKey *secret.Existing `yaml:"existingAppKey,omitempty"`
}
type Cache struct {

View File

@ -5,8 +5,8 @@ import (
"strconv"
"strings"
"github.com/caos/orbos/pkg/helper"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/database"
)
@ -109,38 +109,62 @@ func literalsConfigMap(
return literalsConfigMap
}
func literalsSecret(desired *Configuration, googleServiceAccountJSONPath, zitadelKeysPath string) map[string]string {
func literalsSecret(k8sClient kubernetes.ClientInt, desired *Configuration, googleServiceAccountJSONPath, zitadelKeysPath string) (map[string]string, error) {
literalsSecret := map[string]string{}
if desired != nil {
if desired.Tracing != nil && desired.Tracing.ServiceAccountJSON != nil {
literalsSecret[googleServiceAccountJSONPath] = desired.Tracing.ServiceAccountJSON.Value
if desired.Tracing != nil && (desired.Tracing.ServiceAccountJSON != nil || desired.Tracing.ExistingServiceAccountJSON != nil) {
value, err := helper.GetSecretValue(k8sClient, desired.Tracing.ServiceAccountJSON, desired.Tracing.ExistingServiceAccountJSON)
if err != nil {
return nil, err
}
literalsSecret[googleServiceAccountJSONPath] = value
}
if desired.Secrets != nil && desired.Secrets.Keys != nil {
literalsSecret[zitadelKeysPath] = desired.Secrets.Keys.Value
if desired.Secrets != nil && (desired.Secrets.Keys != nil || desired.Secrets.ExistingKeys != nil) {
value, err := helper.GetSecretValue(k8sClient, desired.Secrets.Keys, desired.Secrets.ExistingKeys)
if err != nil {
return nil, err
}
literalsSecret[zitadelKeysPath] = value
}
}
return literalsSecret
return literalsSecret, nil
}
func literalsSecretVars(desired *Configuration) map[string]string {
func literalsSecretVars(k8sClient kubernetes.ClientInt, desired *Configuration) (map[string]string, error) {
literalsSecretVars := map[string]string{}
if desired != nil {
if desired.Notifications != nil {
if desired.Notifications.Email.AppKey != nil {
literalsSecretVars["ZITADEL_EMAILAPPKEY"] = desired.Notifications.Email.AppKey.Value
if desired.Notifications.Email.AppKey != nil || desired.Notifications.Email.ExistingAppKey != nil {
value, err := helper.GetSecretValue(k8sClient, desired.Notifications.Email.AppKey, desired.Notifications.Email.ExistingAppKey)
if err != nil {
return nil, err
}
literalsSecretVars["ZITADEL_EMAILAPPKEY"] = value
}
if desired.Notifications.GoogleChatURL != nil {
literalsSecretVars["ZITADEL_GOOGLE_CHAT_URL"] = desired.Notifications.GoogleChatURL.Value
if desired.Notifications.GoogleChatURL != nil || desired.Notifications.ExistingGoogleChatURL != nil {
value, err := helper.GetSecretValue(k8sClient, desired.Notifications.GoogleChatURL, desired.Notifications.ExistingGoogleChatURL)
if err != nil {
return nil, err
}
literalsSecretVars["ZITADEL_GOOGLE_CHAT_URL"] = value
}
if desired.Notifications.Twilio.AuthToken != nil {
literalsSecretVars["ZITADEL_TWILIO_AUTH_TOKEN"] = desired.Notifications.Twilio.AuthToken.Value
if desired.Notifications.Twilio.AuthToken != nil || desired.Notifications.Twilio.ExistingAuthToken != nil {
value, err := helper.GetSecretValue(k8sClient, desired.Notifications.Twilio.AuthToken, desired.Notifications.Twilio.ExistingAuthToken)
if err != nil {
return nil, err
}
literalsSecretVars["ZITADEL_TWILIO_AUTH_TOKEN"] = value
}
if desired.Notifications.Twilio.SID != nil {
literalsSecretVars["ZITADEL_TWILIO_SID"] = desired.Notifications.Twilio.SID.Value
if desired.Notifications.Twilio.SID != nil || desired.Notifications.Twilio.ExistingSID != nil {
value, err := helper.GetSecretValue(k8sClient, desired.Notifications.Twilio.SID, desired.Notifications.Twilio.ExistingSID)
if err != nil {
return nil, err
}
literalsSecretVars["ZITADEL_TWILIO_SID"] = value
}
}
}
return literalsSecretVars
return literalsSecretVars, nil
}
func literalsConsoleCM(

View File

@ -127,6 +127,62 @@ var (
},
ClusterDNS: "cluster",
}
desiredFullExisting = &Configuration{
Tracing: &Tracing{
ExistingServiceAccountJSON: &secret.Existing{"sajson", "sajson", "sajson"},
ProjectID: "projectid",
Fraction: "fraction",
Type: "type",
},
Secrets: &Secrets{
ExistingKeys: &secret.Existing{"keys", "keys", "keys"},
UserVerificationID: "userid",
OTPVerificationID: "otpid",
OIDCKeysID: "oidcid",
CookieID: "cookieid",
CSRFID: "csrfid",
DomainVerificationID: "domainid",
IDPConfigVerificationID: "idpid",
},
Notifications: &Notifications{
ExistingGoogleChatURL: &secret.Existing{"chat", "chat", "chat"},
Email: &Email{
SMTPHost: "smtphost",
SMTPUser: "smtpuser",
SenderAddress: "sender",
SenderName: "sendername",
TLS: true,
ExistingAppKey: &secret.Existing{"appkey", "appkey", "appkey"},
},
Twilio: &Twilio{
SenderName: "sendername",
ExistingAuthToken: &secret.Existing{"migration", "migration", "migration"},
ExistingSID: &secret.Existing{"sid", "sid", "sid"},
},
},
Passwords: &Passwords{
ExistingMigration: &secret.Existing{"migration", "migration", "migration"},
ExistingEventstore: &secret.Existing{"eventstore", "eventstore", "eventstore"},
ExistingNotification: &secret.Existing{"notification", "notification", "notification"},
ExistingAuthz: &secret.Existing{"authz", "authz", "authz"},
ExistingAuth: &secret.Existing{"auth", "auth", "auth"},
ExistingAdminapi: &secret.Existing{"adminapi", "adminapi", "adminapi"},
ExistingManagement: &secret.Existing{"management", "management", "management"},
},
DebugMode: true,
LogLevel: "debug",
DNS: &DNS{
Domain: "domain",
TlsSecret: "tls",
Subdomains: &Subdomains{
Accounts: "accounts",
API: "api",
Console: "console",
Issuer: "issuer",
},
},
ClusterDNS: "cluster",
}
)
func TestConfiguration_LiteralsConfigMap(t *testing.T) {
@ -285,6 +341,7 @@ func TestConfiguration_LiteralsConfigMapFull(t *testing.T) {
}
func TestConfiguration_LiteralsSecrets(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
googleSA := "sajson"
zitadelKeyPath := "zitadel"
@ -293,11 +350,13 @@ func TestConfiguration_LiteralsSecrets(t *testing.T) {
zitadelKeyPath: "",
}
literals := literalsSecret(desiredEmpty, googleSA, zitadelKeyPath)
literals, err := literalsSecret(client, desiredEmpty, googleSA, zitadelKeyPath)
assert.NoError(t, err)
assert.EqualValues(t, equals, literals)
}
func TestConfiguration_LiteralsSecretsFull(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
googleSA := "sajson"
zitadelKeyPath := "zitadel"
@ -306,31 +365,123 @@ func TestConfiguration_LiteralsSecretsFull(t *testing.T) {
zitadelKeyPath: "keys",
}
literals := literalsSecret(desiredFull, googleSA, zitadelKeyPath)
literals, err := literalsSecret(client, desiredFull, googleSA, zitadelKeyPath)
assert.NoError(t, err)
assert.EqualValues(t, equals, literals)
}
func TestConfiguration_LiteralsSecretsExisting(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
sajson := "sajson"
keys := "keys"
namespace := "caos-system"
client.EXPECT().GetSecret(namespace, desiredFullExisting.Tracing.ExistingServiceAccountJSON.Name).Return(&corev1.Secret{
StringData: map[string]string{
desiredFullExisting.Tracing.ExistingServiceAccountJSON.Key: sajson,
},
Data: map[string][]byte{
desiredFullExisting.Tracing.ExistingServiceAccountJSON.Key: []byte(sajson),
},
}, nil)
client.EXPECT().GetSecret(namespace, desiredFullExisting.Secrets.ExistingKeys.Name).Return(&corev1.Secret{
StringData: map[string]string{
desiredFullExisting.Secrets.ExistingKeys.Key: keys,
},
Data: map[string][]byte{
desiredFullExisting.Secrets.ExistingKeys.Key: []byte(keys),
},
}, nil)
googleSA := "sajson"
zitadelKeyPath := "zitadel"
equals := map[string]string{
googleSA: sajson,
zitadelKeyPath: keys,
}
literals, err := literalsSecret(client, desiredFullExisting, googleSA, zitadelKeyPath)
assert.NoError(t, err)
assert.EqualValues(t, equals, literals)
}
func TestConfiguration_LiteralsSecretVars(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
equals := map[string]string{
"ZITADEL_EMAILAPPKEY": "",
"ZITADEL_GOOGLE_CHAT_URL": "",
"ZITADEL_TWILIO_AUTH_TOKEN": "",
"ZITADEL_TWILIO_SID": "",
}
literals := literalsSecretVars(desiredEmpty)
literals, err := literalsSecretVars(client, desiredEmpty)
assert.NoError(t, err)
assert.EqualValues(t, equals, literals)
}
func TestConfiguration_LiteralsSecretVarsFull(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
equals := map[string]string{
"ZITADEL_EMAILAPPKEY": "appkey",
"ZITADEL_GOOGLE_CHAT_URL": "chat",
"ZITADEL_TWILIO_AUTH_TOKEN": "authtoken",
"ZITADEL_TWILIO_SID": "sid",
}
literals := literalsSecretVars(desiredFull)
literals, err := literalsSecretVars(client, desiredFull)
assert.NoError(t, err)
assert.EqualValues(t, equals, literals)
}
func TestConfiguration_LiteralsSecretVarsExisting(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
// namespace := "caos-system"
appkey := "appkey"
chat := "chat"
authtoken := "authtoken"
sid := "sid"
/* TODO: incomment!!!
client.EXPECT().GetSecret(namespace, desiredFullExisting.Notifications.Email.ExistingAppKey.Name).Return(&corev1.Secret{
StringData: map[string]string{
desiredFullExisting.Notifications.Email.ExistingAppKey.Key: appkey,
},
Data: map[string][]byte{
desiredFullExisting.Notifications.Email.ExistingAppKey.Key: []byte(appkey),
},
}, nil)
client.EXPECT().GetSecret(namespace, desiredFullExisting.Notifications.ExistingGoogleChatURL.Name).Return(&corev1.Secret{
StringData: map[string]string{
desiredFullExisting.Notifications.ExistingGoogleChatURL.Key: chat,
},
Data: map[string][]byte{
desiredFullExisting.Notifications.ExistingGoogleChatURL.Key: []byte(chat),
},
}, nil)
client.EXPECT().GetSecret(namespace, desiredFullExisting.Notifications.Twilio.ExistingAuthToken.Name).Return(&corev1.Secret{
StringData: map[string]string{
desiredFullExisting.Notifications.Twilio.ExistingAuthToken.Key: authtoken,
},
Data: map[string][]byte{
desiredFullExisting.Notifications.Twilio.ExistingAuthToken.Key: []byte(authtoken),
},
}, nil)
client.EXPECT().GetSecret(namespace, desiredFullExisting.Notifications.Twilio.ExistingSID.Name).Return(&corev1.Secret{
StringData: map[string]string{
desiredFullExisting.Notifications.Twilio.ExistingSID.Key: sid,
},
Data: map[string][]byte{
desiredFullExisting.Notifications.Twilio.ExistingSID.Key: []byte(sid),
},
}, nil)
*/
equals := map[string]string{
"ZITADEL_EMAILAPPKEY": appkey,
"ZITADEL_GOOGLE_CHAT_URL": chat,
"ZITADEL_TWILIO_AUTH_TOKEN": authtoken,
"ZITADEL_TWILIO_SID": sid,
}
literals, err := literalsSecretVars(client, desiredFull)
assert.NoError(t, err)
assert.EqualValues(t, equals, literals)
}

View File

@ -9,14 +9,14 @@ import (
func AdaptFunc(
monitor mntr.Monitor,
users map[string]string,
dbClient database.ClientInt,
dbClient database.Client,
) (
operator.QueryFunc,
func(users map[string]string) operator.QueryFunc,
operator.DestroyFunc,
error,
) {
internalMonitor := monitor.WithField("component", "db-users")
destroyers := make([]operator.DestroyFunc, 0)
destroyers = append(destroyers, func(k8sClient kubernetes.ClientInt) error {
@ -32,35 +32,39 @@ func AdaptFunc(
return nil
})
usernames := []string{}
for username := range users {
usernames = append(usernames, username)
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
queriers := make([]operator.QueryFunc, 0)
db, err := database.GetDatabaseInQueried(queried)
if err != nil {
return nil, err
}
for _, username := range usernames {
ensure := createIfNecessary(monitor, username, db.Users, dbClient)
if ensure != nil {
queriers = append(queriers, operator.EnsureFuncToQueryFunc(ensure))
return func(users map[string]string) operator.QueryFunc {
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
queriers := make([]operator.QueryFunc, 0)
db, err := database.GetDatabaseInQueried(queried)
if err != nil {
return nil, err
}
}
for _, listedUser := range db.Users {
ensure := deleteIfNotRequired(monitor, listedUser, usernames, dbClient)
if ensure != nil {
queriers = append(queriers, operator.EnsureFuncToQueryFunc(ensure))
}
}
if queriers == nil || len(queriers) == 0 {
return func(k8sClient kubernetes.ClientInt) error { return nil }, nil
usernames := []string{}
for username := range users {
usernames = append(usernames, username)
}
for _, username := range usernames {
ensure := createIfNecessary(monitor, username, db.Users, dbClient)
if ensure != nil {
queriers = append(queriers, operator.EnsureFuncToQueryFunc(ensure))
}
}
for _, listedUser := range db.Users {
ensure := deleteIfNotRequired(monitor, listedUser, usernames, dbClient)
if ensure != nil {
queriers = append(queriers, operator.EnsureFuncToQueryFunc(ensure))
}
}
if queriers == nil || len(queriers) == 0 {
return func(k8sClient kubernetes.ClientInt) error { return nil }, nil
}
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
}
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
}, operator.DestroyersToDestroyFunc(internalMonitor, destroyers),
},
operator.DestroyersToDestroyFunc(internalMonitor, destroyers),
nil
}

View File

@ -1,19 +1,20 @@
package users
import (
"testing"
"github.com/caos/orbos/mntr"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/database"
databasemock "github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/database/mock"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"testing"
)
func TestUsers_Adapt_CreateFirst(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
users := map[string]string{"test": "testpw"}
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
monitor := mntr.Monitor{}
queried := map[string]interface{}{}
@ -24,8 +25,9 @@ func TestUsers_Adapt_CreateFirst(t *testing.T) {
})
dbClient.EXPECT().AddUser(monitor, "test", client)
query, _, err := AdaptFunc(monitor, users, dbClient)
getQuery, _, err := AdaptFunc(monitor, dbClient)
assert.NoError(t, err)
query := getQuery(users)
ensure, err := query(client, queried)
assert.NoError(t, err)
err = ensure(client)
@ -35,7 +37,7 @@ func TestUsers_Adapt_CreateFirst(t *testing.T) {
func TestUsers_Adapt_DoNothing(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
users := map[string]string{"test": "testpw"}
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
monitor := mntr.Monitor{}
queried := map[string]interface{}{}
@ -45,8 +47,9 @@ func TestUsers_Adapt_DoNothing(t *testing.T) {
Users: []string{"test"},
})
query, _, err := AdaptFunc(monitor, users, dbClient)
getQuery, _, err := AdaptFunc(monitor, dbClient)
assert.NoError(t, err)
query := getQuery(users)
ensure, err := query(client, queried)
assert.NoError(t, err)
assert.NotNil(t, ensure)
@ -56,7 +59,7 @@ func TestUsers_Adapt_DoNothing(t *testing.T) {
func TestUsers_Adapt_Add(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
users := map[string]string{"test": "testpw", "test2": "testpw"}
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
monitor := mntr.Monitor{}
queried := map[string]interface{}{}
@ -67,8 +70,9 @@ func TestUsers_Adapt_Add(t *testing.T) {
})
dbClient.EXPECT().AddUser(monitor, "test2", client)
query, _, err := AdaptFunc(monitor, users, dbClient)
getQuery, _, err := AdaptFunc(monitor, dbClient)
assert.NoError(t, err)
query := getQuery(users)
ensure, err := query(client, queried)
assert.NoError(t, err)
err = ensure(client)
@ -78,7 +82,7 @@ func TestUsers_Adapt_Add(t *testing.T) {
func TestUsers_Adapt_Delete(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
users := map[string]string{"test": "testpw", "test2": "testpw"}
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
monitor := mntr.Monitor{}
queried := map[string]interface{}{}
@ -90,8 +94,9 @@ func TestUsers_Adapt_Delete(t *testing.T) {
dbClient.EXPECT().DeleteUser(monitor, "test3", client)
query, _, err := AdaptFunc(monitor, users, dbClient)
getQuery, _, err := AdaptFunc(monitor, dbClient)
assert.NoError(t, err)
query := getQuery(users)
ensure, err := query(client, queried)
err = ensure(client)
assert.NoError(t, err)
@ -100,7 +105,7 @@ func TestUsers_Adapt_Delete(t *testing.T) {
func TestUsers_Adapt_DeleteMultiple(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
users := map[string]string{}
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
monitor := mntr.Monitor{}
queried := map[string]interface{}{}
@ -114,8 +119,9 @@ func TestUsers_Adapt_DeleteMultiple(t *testing.T) {
dbClient.EXPECT().DeleteUser(monitor, "test2", client)
dbClient.EXPECT().DeleteUser(monitor, "test3", client)
query, _, err := AdaptFunc(monitor, users, dbClient)
getQuery, _, err := AdaptFunc(monitor, dbClient)
assert.NoError(t, err)
query := getQuery(users)
ensure, err := query(client, queried)
err = ensure(client)
assert.NoError(t, err)

View File

@ -7,7 +7,7 @@ import (
"github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/database"
)
func createIfNecessary(monitor mntr.Monitor, user string, list []string, dbClient database.ClientInt) operator.EnsureFunc {
func createIfNecessary(monitor mntr.Monitor, user string, list []string, dbClient database.Client) operator.EnsureFunc {
existing := false
for _, listedUser := range list {
if listedUser == user {
@ -23,7 +23,7 @@ func createIfNecessary(monitor mntr.Monitor, user string, list []string, dbClien
return nil
}
func deleteIfNotRequired(monitor mntr.Monitor, listedUser string, list []string, dbClient database.ClientInt) operator.EnsureFunc {
func deleteIfNotRequired(monitor mntr.Monitor, listedUser string, list []string, dbClient database.Client) operator.EnsureFunc {
required := false
for _, user := range list {
if user == listedUser {

View File

@ -1,19 +1,20 @@
package users
import (
"testing"
"github.com/caos/orbos/mntr"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
databasemock "github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/database/mock"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"testing"
)
func TestUsers_CreateIfNecessary(t *testing.T) {
users := []string{}
monitor := mntr.Monitor{}
user := "test"
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
dbClient.EXPECT().AddUser(monitor, user, k8sClient)
@ -38,7 +39,7 @@ func TestUsers_DeleteIfNotRequired(t *testing.T) {
users := []string{}
monitor := mntr.Monitor{}
user := "test"
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
dbClient.EXPECT().DeleteUser(monitor, user, k8sClient)

View File

@ -8,7 +8,7 @@ import (
func AdaptFunc(
monitor mntr.Monitor,
dbClient ClientInt,
dbClient Client,
) (
operator.QueryFunc,
error,

View File

@ -2,16 +2,17 @@ package database
import (
"errors"
"testing"
"github.com/caos/orbos/mntr"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
databasemock "github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/database/mock"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"testing"
)
func TestDatabase_Adapt(t *testing.T) {
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
host := "host"
port := "port"
@ -41,7 +42,7 @@ func TestDatabase_Adapt(t *testing.T) {
}
func TestDatabase_Adapt2(t *testing.T) {
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
host := "host2"
port := "port2"
@ -71,7 +72,7 @@ func TestDatabase_Adapt2(t *testing.T) {
}
func TestDatabase_AdaptFailConnection(t *testing.T) {
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
monitor := mntr.Monitor{}
@ -95,7 +96,7 @@ func TestDatabase_AdaptFailConnection(t *testing.T) {
}
func TestDatabase_AdaptFailUsers(t *testing.T) {
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
host := "host"
port := "port"

View File

@ -9,27 +9,28 @@ import (
"github.com/caos/zitadel/pkg/databases"
)
var _ ClientInt = (*Client)(nil)
var _ Client = (*GitOpsClient)(nil)
var _ Client = (*CrdClient)(nil)
type ClientInt interface {
type Client interface {
GetConnectionInfo(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) (string, string, error)
DeleteUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error
AddUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error
ListUsers(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) ([]string, error)
}
type Client struct {
type GitOpsClient struct {
Monitor mntr.Monitor
gitClient *git.Client
}
func NewClient(monitor mntr.Monitor, repoURL, repoKey string) (*Client, error) {
func NewGitOpsClient(monitor mntr.Monitor, repoURL, repoKey string) (*GitOpsClient, error) {
gitClient, err := newGit(monitor, repoURL, repoKey)
if err != nil {
return nil, err
}
return &Client{
return &GitOpsClient{
Monitor: monitor,
gitClient: gitClient,
}, nil
@ -47,10 +48,27 @@ func newGit(monitor mntr.Monitor, repoURL string, repoKey string) (*git.Client,
return gitClient, nil
}
func (c *Client) GetConnectionInfo(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) (string, string, error) {
return databases.GetConnectionInfo(
func (c *GitOpsClient) GetConnectionInfo(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) (string, string, error) {
return databases.GitOpsGetConnectionInfo(
monitor,
k8sClient,
c.gitClient,
)
}
type CrdClient struct {
Monitor mntr.Monitor
}
func NewCrdClient(monitor mntr.Monitor) *CrdClient {
return &CrdClient{
Monitor: monitor,
}
}
func (c *CrdClient) GetConnectionInfo(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) (string, string, error) {
return databases.CrdGetConnectionInfo(
monitor,
k8sClient,
)
}

View File

@ -11,31 +11,31 @@ import (
reflect "reflect"
)
// MockClientInt is a mock of ClientInt interface
type MockClientInt struct {
// MockClient is a mock of Client interface
type MockClient struct {
ctrl *gomock.Controller
recorder *MockClientIntMockRecorder
recorder *MockClientMockRecorder
}
// MockClientIntMockRecorder is the mock recorder for MockClientInt
type MockClientIntMockRecorder struct {
mock *MockClientInt
// MockClientMockRecorder is the mock recorder for MockClient
type MockClientMockRecorder struct {
mock *MockClient
}
// NewMockClientInt creates a new mock instance
func NewMockClientInt(ctrl *gomock.Controller) *MockClientInt {
mock := &MockClientInt{ctrl: ctrl}
mock.recorder = &MockClientIntMockRecorder{mock}
// NewMockClient creates a new mock instance
func NewMockClient(ctrl *gomock.Controller) *MockClient {
mock := &MockClient{ctrl: ctrl}
mock.recorder = &MockClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockClientInt) EXPECT() *MockClientIntMockRecorder {
func (m *MockClient) EXPECT() *MockClientMockRecorder {
return m.recorder
}
// GetConnectionInfo mocks base method
func (m *MockClientInt) GetConnectionInfo(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) (string, string, error) {
func (m *MockClient) GetConnectionInfo(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) (string, string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetConnectionInfo", monitor, k8sClient)
ret0, _ := ret[0].(string)
@ -45,13 +45,13 @@ func (m *MockClientInt) GetConnectionInfo(monitor mntr.Monitor, k8sClient kubern
}
// GetConnectionInfo indicates an expected call of GetConnectionInfo
func (mr *MockClientIntMockRecorder) GetConnectionInfo(monitor, k8sClient interface{}) *gomock.Call {
func (mr *MockClientMockRecorder) GetConnectionInfo(monitor, k8sClient interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConnectionInfo", reflect.TypeOf((*MockClientInt)(nil).GetConnectionInfo), monitor, k8sClient)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConnectionInfo", reflect.TypeOf((*MockClient)(nil).GetConnectionInfo), monitor, k8sClient)
}
// DeleteUser mocks base method
func (m *MockClientInt) DeleteUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error {
func (m *MockClient) DeleteUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteUser", monitor, user, k8sClient)
ret0, _ := ret[0].(error)
@ -59,13 +59,13 @@ func (m *MockClientInt) DeleteUser(monitor mntr.Monitor, user string, k8sClient
}
// DeleteUser indicates an expected call of DeleteUser
func (mr *MockClientIntMockRecorder) DeleteUser(monitor, user, k8sClient interface{}) *gomock.Call {
func (mr *MockClientMockRecorder) DeleteUser(monitor, user, k8sClient interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUser", reflect.TypeOf((*MockClientInt)(nil).DeleteUser), monitor, user, k8sClient)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUser", reflect.TypeOf((*MockClient)(nil).DeleteUser), monitor, user, k8sClient)
}
// AddUser mocks base method
func (m *MockClientInt) AddUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error {
func (m *MockClient) AddUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddUser", monitor, user, k8sClient)
ret0, _ := ret[0].(error)
@ -73,13 +73,13 @@ func (m *MockClientInt) AddUser(monitor mntr.Monitor, user string, k8sClient kub
}
// AddUser indicates an expected call of AddUser
func (mr *MockClientIntMockRecorder) AddUser(monitor, user, k8sClient interface{}) *gomock.Call {
func (mr *MockClientMockRecorder) AddUser(monitor, user, k8sClient interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUser", reflect.TypeOf((*MockClientInt)(nil).AddUser), monitor, user, k8sClient)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUser", reflect.TypeOf((*MockClient)(nil).AddUser), monitor, user, k8sClient)
}
// ListUsers mocks base method
func (m *MockClientInt) ListUsers(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) ([]string, error) {
func (m *MockClient) ListUsers(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListUsers", monitor, k8sClient)
ret0, _ := ret[0].([]string)
@ -88,7 +88,7 @@ func (m *MockClientInt) ListUsers(monitor mntr.Monitor, k8sClient kubernetes.Cli
}
// ListUsers indicates an expected call of ListUsers
func (mr *MockClientIntMockRecorder) ListUsers(monitor, k8sClient interface{}) *gomock.Call {
func (mr *MockClientMockRecorder) ListUsers(monitor, k8sClient interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUsers", reflect.TypeOf((*MockClientInt)(nil).ListUsers), monitor, k8sClient)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUsers", reflect.TypeOf((*MockClient)(nil).ListUsers), monitor, k8sClient)
}

View File

@ -6,8 +6,8 @@ import (
"github.com/caos/zitadel/pkg/databases"
)
func (c *Client) DeleteUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error {
return databases.DeleteUser(
func (c *GitOpsClient) DeleteUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error {
return databases.GitOpsDeleteUser(
monitor,
user,
k8sClient,
@ -15,8 +15,8 @@ func (c *Client) DeleteUser(monitor mntr.Monitor, user string, k8sClient kuberne
)
}
func (c *Client) AddUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error {
return databases.AddUser(
func (c *GitOpsClient) AddUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error {
return databases.GitOpsAddUser(
monitor,
user,
k8sClient,
@ -24,10 +24,33 @@ func (c *Client) AddUser(monitor mntr.Monitor, user string, k8sClient kubernetes
)
}
func (c *Client) ListUsers(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) ([]string, error) {
return databases.ListUsers(
func (c *GitOpsClient) ListUsers(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) ([]string, error) {
return databases.GitOpsListUsers(
monitor,
k8sClient,
c.gitClient,
)
}
func (c *CrdClient) DeleteUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error {
return databases.CrdDeleteUser(
monitor,
user,
k8sClient,
)
}
func (c *CrdClient) AddUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error {
return databases.CrdAddUser(
monitor,
user,
k8sClient,
)
}
func (c *CrdClient) ListUsers(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) ([]string, error) {
return databases.CrdListUsers(
monitor,
k8sClient,
)
}

View File

@ -21,8 +21,8 @@ const (
containerName = "zitadel"
RunAsUser = int64(1000)
//zitadelImage can be found in github.com/caos/zitadel repo
zitadelImage = "ghcr.io/caos/zitadel"
timeout time.Duration = 60
zitadelImage = "ghcr.io/caos/zitadel"
timeout = 60 * time.Second
)
func AdaptFunc(
@ -41,16 +41,17 @@ func AdaptFunc(
consoleCMName string,
secretVarsName string,
secretPasswordsName string,
users []string,
nodeSelector map[string]string,
tolerations []corev1.Toleration,
resources *k8s.Resources,
migrationDone operator.EnsureFunc,
configurationDone operator.EnsureFunc,
setupDone operator.EnsureFunc,
getConfigurationHashes func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) map[string]string,
) (
operator.QueryFunc,
func(
necessaryUsers map[string]string,
getConfigurationHashes func(k8sClient kubernetes.ClientInt, queried map[string]interface{}, necessaryUsers map[string]string) (map[string]string, error),
) operator.QueryFunc,
operator.DestroyFunc,
error,
) {
@ -64,52 +65,65 @@ func AdaptFunc(
operator.ResourceDestroyToZitadelDestroy(destroy),
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
deploymentDef := deploymentDef(
nameLabels,
namespace,
replicaCount,
podSelector,
nodeSelector,
tolerations,
affinity,
users,
version,
resources,
cmName,
certPath,
secretName,
secretPath,
consoleCMName,
secretVarsName,
secretPasswordsName,
)
hashes := getConfigurationHashes(k8sClient, queried)
if hashes != nil && len(hashes) != 0 {
for k, v := range hashes {
deploymentDef.Annotations[k] = v
deploymentDef.Spec.Template.Annotations[k] = v
return func(
necessaryUsers map[string]string,
getConfigurationHashes func(k8sClient kubernetes.ClientInt, queried map[string]interface{}, necessaryUsers map[string]string) (map[string]string, error),
) operator.QueryFunc {
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
users := make([]string, 0)
for user := range necessaryUsers {
users = append(users, user)
}
}
query, err := deployment.AdaptFuncToEnsure(deploymentDef, force)
if err != nil {
return nil, err
}
deploymentDef := deploymentDef(
nameLabels,
namespace,
replicaCount,
podSelector,
nodeSelector,
tolerations,
affinity,
users,
version,
resources,
cmName,
certPath,
secretName,
secretPath,
consoleCMName,
secretVarsName,
secretPasswordsName,
)
queriers := []operator.QueryFunc{
operator.EnsureFuncToQueryFunc(migrationDone),
operator.EnsureFuncToQueryFunc(configurationDone),
operator.EnsureFuncToQueryFunc(setupDone),
operator.ResourceQueryToZitadelQuery(query),
}
hashes, err := getConfigurationHashes(k8sClient, queried, necessaryUsers)
if err != nil {
return nil, err
}
if hashes != nil && len(hashes) != 0 {
for k, v := range hashes {
deploymentDef.Annotations[k] = v
deploymentDef.Spec.Template.Annotations[k] = v
}
}
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
query, err := deployment.AdaptFuncToEnsure(deploymentDef, force)
if err != nil {
return nil, err
}
queriers := []operator.QueryFunc{
operator.EnsureFuncToQueryFunc(migrationDone),
operator.EnsureFuncToQueryFunc(configurationDone),
operator.EnsureFuncToQueryFunc(setupDone),
operator.ResourceQueryToZitadelQuery(query),
}
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
}
},
operator.DestroyersToDestroyFunc(internalMonitor, destroyers),
nil
}
func deploymentDef(nameLabels *labels.Name, namespace string, replicaCount int, podSelector *labels.Selector, nodeSelector map[string]string, tolerations []corev1.Toleration, affinity *k8s.Affinity, users []string, version *string, resources *k8s.Resources, cmName string, certPath string, secretName string, secretPath string, consoleCMName string, secretVarsName string, secretPasswordsName string) *appsv1.Deployment {

View File

@ -32,7 +32,11 @@ func TestDeployment_Adapt(t *testing.T) {
secretName := "testSecret"
consoleCMName := "testConsoleCM"
cmName := "testCM"
users := []string{"test"}
usersMap := map[string]string{"test": "test"}
users := []string{}
for _, user := range usersMap {
users = append(users, user)
}
annotations := map[string]string{"testHash": "test"}
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
@ -115,8 +119,8 @@ func TestDeployment_Adapt(t *testing.T) {
}
k8sClient.EXPECT().ApplyDeployment(deploymentDef, false).Times(1)
getConfigurationHashes := func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) map[string]string {
return map[string]string{"testHash": "test"}
getConfigurationHashes := func(k8sClient kubernetes.ClientInt, queried map[string]interface{}, necessaryUsers map[string]string) (map[string]string, error) {
return map[string]string{"testHash": "test"}, nil
}
migrationDone := func(k8sClient kubernetes.ClientInt) error {
return nil
@ -128,7 +132,7 @@ func TestDeployment_Adapt(t *testing.T) {
return nil
}
query, _, err := AdaptFunc(
getQuery, _, err := AdaptFunc(
monitor,
mocklabels.Name,
mocklabels.ClosedNameSelector,
@ -144,17 +148,16 @@ func TestDeployment_Adapt(t *testing.T) {
consoleCMName,
secretVarsName,
secretPasswordsName,
users,
nodeSelector,
nil,
resources,
migrationDone,
configurationDone,
setupDone,
getConfigurationHashes,
)
assert.NoError(t, err)
queried := map[string]interface{}{}
query := getQuery(usersMap, getConfigurationHashes)
ensure, err := query(k8sClient, queried)
assert.NoError(t, err)
assert.NoError(t, ensure(k8sClient))

View File

@ -1,6 +1,7 @@
package deployment
import (
"sort"
"strings"
"github.com/caos/orbos/pkg/kubernetes/k8s"
@ -63,6 +64,7 @@ func GetContainer(
}},
}
sort.Strings(users)
for _, user := range users {
envVars = append(envVars, corev1.EnvVar{
Name: "CR_" + strings.ToUpper(user) + "_PASSWORD",
@ -92,7 +94,7 @@ func GetContainer(
},
Name: containerName,
Image: zitadelImage + ":" + version,
ImagePullPolicy: "IfNotPresent",
ImagePullPolicy: corev1.PullIfNotPresent,
Ports: []corev1.ContainerPort{
{Name: "grpc", ContainerPort: 50001},
{Name: "http", ContainerPort: 50002},
@ -127,5 +129,7 @@ func GetContainer(
PeriodSeconds: 5,
FailureThreshold: 2,
},
TerminationMessagePolicy: "File",
TerminationMessagePath: "/dev/termination-log",
}
}

View File

@ -95,7 +95,7 @@ func TestDeployment_GetContainer(t *testing.T) {
},
Name: containerName,
Image: zitadelImage + ":" + version,
ImagePullPolicy: "IfNotPresent",
ImagePullPolicy: corev1.PullIfNotPresent,
Ports: []corev1.ContainerPort{
{Name: "grpc", ContainerPort: 50001},
{Name: "http", ContainerPort: 50002},
@ -130,6 +130,8 @@ func TestDeployment_GetContainer(t *testing.T) {
PeriodSeconds: 5,
FailureThreshold: 2,
},
TerminationMessagePolicy: "File",
TerminationMessagePath: "/dev/termination-log",
}
container := GetContainer(

View File

@ -2,6 +2,7 @@ package deployment
import (
corev1 "k8s.io/api/core/v1"
"sort"
"strconv"
"strings"
)
@ -24,6 +25,8 @@ func GetInitContainer(
}
copySecrets := append([]string{}, "cp "+certMountPath+"/client_root/ca.crt "+certTempMountPath+"/ca.crt")
sort.Strings(users)
for _, user := range users {
userReplaced := strings.ReplaceAll(user, "_", "-")
internalName := "client-" + userReplaced
@ -43,10 +46,13 @@ func GetInitContainer(
)
return corev1.Container{
Name: "fix-permissions",
Image: "alpine:3.11",
Command: []string{"/bin/sh", "-c"},
Args: []string{strings.Join(initCommands, " && ")},
VolumeMounts: initVolumeMounts,
Name: "fix-permissions",
Image: "alpine:3.11",
Command: []string{"/bin/sh", "-c"},
Args: []string{strings.Join(initCommands, " && ")},
VolumeMounts: initVolumeMounts,
TerminationMessagePolicy: "File",
TerminationMessagePath: "/dev/termination-log",
ImagePullPolicy: corev1.PullIfNotPresent,
}
}

View File

@ -25,11 +25,14 @@ func TestDeployment_GetInitContainer(t *testing.T) {
}
equals := corev1.Container{
Name: "fix-permissions",
Image: "alpine:3.11",
Command: []string{"/bin/sh", "-c"},
Args: []string{strings.Join(initCommands, " && ")},
VolumeMounts: initVolumeMounts,
Name: "fix-permissions",
Image: "alpine:3.11",
Command: []string{"/bin/sh", "-c"},
Args: []string{strings.Join(initCommands, " && ")},
VolumeMounts: initVolumeMounts,
ImagePullPolicy: corev1.PullIfNotPresent,
TerminationMessagePolicy: "File",
TerminationMessagePath: "/dev/termination-log",
}
init := GetInitContainer(rootSecret, dbSecrets, users, RunAsUser)
@ -55,11 +58,14 @@ func TestDeployment_GetInitContainer1(t *testing.T) {
}
equals := corev1.Container{
Name: "fix-permissions",
Image: "alpine:3.11",
Command: []string{"/bin/sh", "-c"},
Args: []string{strings.Join(initCommands, " && ")},
VolumeMounts: initVolumeMounts,
Name: "fix-permissions",
Image: "alpine:3.11",
Command: []string{"/bin/sh", "-c"},
Args: []string{strings.Join(initCommands, " && ")},
VolumeMounts: initVolumeMounts,
TerminationMessagePolicy: "File",
TerminationMessagePath: "/dev/termination-log",
ImagePullPolicy: corev1.PullIfNotPresent,
}
init := GetInitContainer(rootSecret, dbSecrets, users, RunAsUser)
@ -88,11 +94,14 @@ func TestDeployment_GetInitContainer2(t *testing.T) {
}
equals := corev1.Container{
Name: "fix-permissions",
Image: "alpine:3.11",
Command: []string{"/bin/sh", "-c"},
Args: []string{strings.Join(initCommands, " && ")},
VolumeMounts: initVolumeMounts,
Name: "fix-permissions",
Image: "alpine:3.11",
Command: []string{"/bin/sh", "-c"},
Args: []string{strings.Join(initCommands, " && ")},
VolumeMounts: initVolumeMounts,
ImagePullPolicy: corev1.PullIfNotPresent,
TerminationMessagePolicy: "File",
TerminationMessagePath: "/dev/termination-log",
}
init := GetInitContainer(rootSecret, dbSecrets, users, RunAsUser)

View File

@ -1,6 +1,8 @@
package deployment
import (
"time"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/labels"
@ -11,7 +13,7 @@ import (
func GetReadyFunc(monitor mntr.Monitor, namespace string, name *labels.Name) operator.EnsureFunc {
return func(k8sClient kubernetes.ClientInt) error {
monitor.Info("waiting for deployment to be ready")
if err := k8sClient.WaitUntilDeploymentReady(namespace, name.Name(), true, true, 60); err != nil {
if err := k8sClient.WaitUntilDeploymentReady(namespace, name.Name(), true, true, 60*time.Second); err != nil {
monitor.Error(errors.Wrap(err, "error while waiting for deployment to be ready"))
return err
}

View File

@ -3,6 +3,7 @@ package deployment
import (
"github.com/caos/zitadel/operator/helpers"
corev1 "k8s.io/api/core/v1"
"sort"
"strings"
)
@ -16,7 +17,8 @@ func GetVolumes(
Name: secretName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretName,
SecretName: secretName,
DefaultMode: helpers.PointerInt32(420),
},
},
}, {
@ -31,7 +33,8 @@ func GetVolumes(
Name: secretPasswordsName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretPasswordsName,
SecretName: secretPasswordsName,
DefaultMode: helpers.PointerInt32(384),
},
},
}, {
@ -39,6 +42,7 @@ func GetVolumes(
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{Name: consoleCMName},
DefaultMode: helpers.PointerInt32(420),
},
},
}, {
@ -56,6 +60,7 @@ func userVolumes(
) []corev1.Volume {
volumes := make([]corev1.Volume, 0)
sort.Strings(users)
for _, user := range users {
userReplaced := strings.ReplaceAll(user, "_", "-")
internalName := "client-" + userReplaced

View File

@ -17,7 +17,8 @@ func TestDeployment_Volumes(t *testing.T) {
Name: secretName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretName,
SecretName: secretName,
DefaultMode: helpers.PointerInt32(420),
},
},
}, {
@ -32,7 +33,8 @@ func TestDeployment_Volumes(t *testing.T) {
Name: secretPasswordsName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretPasswordsName,
SecretName: secretPasswordsName,
DefaultMode: helpers.PointerInt32(384),
},
},
}, {
@ -40,6 +42,7 @@ func TestDeployment_Volumes(t *testing.T) {
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{Name: consoleCMName},
DefaultMode: helpers.PointerInt32(420),
},
},
}, {

View File

@ -4,14 +4,13 @@ import (
"crypto/sha512"
"encoding/base64"
"encoding/json"
"github.com/pkg/errors"
"github.com/rakyll/statik/fs"
"os"
"path/filepath"
"regexp"
"sort"
"github.com/pkg/errors"
"github.com/rakyll/statik/fs"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/configmap"
@ -102,10 +101,9 @@ func AdaptFunc(
},
},
Spec: corev1.PodSpec{
NodeSelector: nodeselector,
Tolerations: tolerations,
SecurityContext: &corev1.PodSecurityContext{},
InitContainers: getPreContainer(dbHost, dbPort, migrationUser, secretPasswordName),
NodeSelector: nodeselector,
Tolerations: tolerations,
InitContainers: getPreContainer(dbHost, dbPort, migrationUser, secretPasswordName),
Containers: []corev1.Container{
getMigrationContainer(dbHost, dbPort, migrationUser, secretPasswordName, users),
},

View File

@ -97,10 +97,9 @@ func TestMigration_AdaptFunc(t *testing.T) {
},
},
Spec: corev1.PodSpec{
NodeSelector: nodeselector,
Tolerations: tolerations,
SecurityContext: &corev1.PodSecurityContext{},
InitContainers: getPreContainer(dbHost, dbPort, migrationUser, secretPasswordName),
NodeSelector: nodeselector,
Tolerations: tolerations,
InitContainers: getPreContainer(dbHost, dbPort, migrationUser, secretPasswordName),
Containers: []corev1.Container{
getMigrationContainer(dbHost, dbPort, migrationUser, secretPasswordName, users),
},

View File

@ -2,48 +2,104 @@ package zitadel
import (
"github.com/caos/orbos/pkg/secret"
"github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/configuration"
)
func getSecretsMap(desiredKind *DesiredV0) map[string]*secret.Secret {
secrets := map[string]*secret.Secret{}
func getSecretsMap(desiredKind *DesiredV0) (
map[string]*secret.Secret,
map[string]*secret.Existing,
) {
if desiredKind.Spec != nil && desiredKind.Spec.Configuration != nil {
conf := desiredKind.Spec.Configuration
if conf.Tracing != nil {
if conf.Tracing.ServiceAccountJSON == nil {
conf.Tracing.ServiceAccountJSON = &secret.Secret{}
}
secrets["tracingserviceaccountjson"] = conf.Tracing.ServiceAccountJSON
}
var (
secrets = map[string]*secret.Secret{}
existing = map[string]*secret.Existing{}
)
if conf.Secrets != nil {
if conf.Secrets.Keys == nil {
conf.Secrets.Keys = &secret.Secret{}
}
secrets["keys"] = conf.Secrets.Keys
}
if conf.Notifications != nil {
if conf.Notifications.GoogleChatURL == nil {
conf.Notifications.GoogleChatURL = &secret.Secret{}
}
secrets["googlechaturl"] = conf.Notifications.GoogleChatURL
if conf.Notifications.Twilio.SID == nil {
conf.Notifications.Twilio.SID = &secret.Secret{}
}
secrets["twiliosid"] = conf.Notifications.Twilio.SID
if conf.Notifications.Twilio.AuthToken == nil {
conf.Notifications.Twilio.AuthToken = &secret.Secret{}
}
secrets["twilioauthtoken"] = conf.Notifications.Twilio.AuthToken
if conf.Notifications.Email.AppKey == nil {
conf.Notifications.Email.AppKey = &secret.Secret{}
}
secrets["emailappkey"] = conf.Notifications.Email.AppKey
}
if desiredKind.Spec == nil {
desiredKind.Spec = &Spec{}
}
return secrets
if desiredKind.Spec.Configuration == nil {
desiredKind.Spec.Configuration = &configuration.Configuration{}
}
conf := desiredKind.Spec.Configuration
if conf.Tracing == nil {
conf.Tracing = &configuration.Tracing{}
}
if conf.Tracing.ServiceAccountJSON == nil {
conf.Tracing.ServiceAccountJSON = &secret.Secret{}
}
if conf.Tracing.ExistingServiceAccountJSON == nil {
conf.Tracing.ExistingServiceAccountJSON = &secret.Existing{}
}
sakey := "tracingserviceaccountjson"
secrets[sakey] = conf.Tracing.ServiceAccountJSON
existing[sakey] = conf.Tracing.ExistingServiceAccountJSON
if conf.Secrets == nil {
conf.Secrets = &configuration.Secrets{}
}
if conf.Secrets.Keys == nil {
conf.Secrets.Keys = &secret.Secret{}
}
if conf.Secrets.ExistingKeys == nil {
conf.Secrets.ExistingKeys = &secret.Existing{}
}
keysKey := "keys"
secrets[keysKey] = conf.Secrets.Keys
existing[keysKey] = conf.Secrets.ExistingKeys
if conf.Notifications == nil {
conf.Notifications = &configuration.Notifications{}
}
if conf.Notifications.GoogleChatURL == nil {
conf.Notifications.GoogleChatURL = &secret.Secret{}
}
if conf.Notifications.ExistingGoogleChatURL == nil {
conf.Notifications.ExistingGoogleChatURL = &secret.Existing{}
}
gchatkey := "googlechaturl"
secrets[gchatkey] = conf.Notifications.GoogleChatURL
existing[gchatkey] = conf.Notifications.ExistingGoogleChatURL
if conf.Notifications.Twilio == nil {
conf.Notifications.Twilio = &configuration.Twilio{}
}
if conf.Notifications.Twilio.SID == nil {
conf.Notifications.Twilio.SID = &secret.Secret{}
}
if conf.Notifications.Twilio.ExistingSID == nil {
conf.Notifications.Twilio.ExistingSID = &secret.Existing{}
}
twilKey := "twiliosid"
secrets[twilKey] = conf.Notifications.Twilio.SID
existing[twilKey] = conf.Notifications.Twilio.ExistingSID
if conf.Notifications.Twilio.AuthToken == nil {
conf.Notifications.Twilio.AuthToken = &secret.Secret{}
}
if conf.Notifications.Twilio.ExistingAuthToken == nil {
conf.Notifications.Twilio.ExistingAuthToken = &secret.Existing{}
}
twilOAuthKey := "twilioauthtoken"
secrets[twilOAuthKey] = conf.Notifications.Twilio.AuthToken
existing[twilOAuthKey] = conf.Notifications.Twilio.ExistingAuthToken
if conf.Notifications.Email == nil {
conf.Notifications.Email = &configuration.Email{}
}
if conf.Notifications.Email.AppKey == nil {
conf.Notifications.Email.AppKey = &secret.Secret{}
}
if conf.Notifications.Email.ExistingAppKey == nil {
conf.Notifications.Email.ExistingAppKey = &secret.Existing{}
}
mailKey := "emailappkey"
secrets[mailKey] = conf.Notifications.Email.AppKey
existing[mailKey] = conf.Notifications.Email.ExistingAppKey
return secrets, existing
}

View File

@ -20,13 +20,13 @@ func GetExpectedService(
zitadelPodSelector *labels.Selector,
grpcPortName string,
grpcServiceName *labels.Name,
grpcPort uint16,
grpcPort int,
httpPortName string,
httpServiceName *labels.Name,
httpPort uint16,
httpPort int,
uiPortName string,
uiServiceName *labels.Name,
uiPort uint16,
uiPort int,
) []*corev1.Service {
grpcPorts := []corev1.ServicePort{{
@ -123,11 +123,13 @@ func TestServices_AdaptEnsure1(t *testing.T) {
namespace := "test"
grpcPortName := "grpc"
grpcServiceName := "grpc"
grpcPort := 1
httpPortName := "http"
httpServiceName := "http"
httpPort := 2
uiPortName := "ui"
uiServiceName := "ui"
var grpcPort, httpPort, uiPort uint16 = 1, 2, 3
uiPort := 3
componentLabels, podSelectorLabels, nameLabels := serviceLabels(grpcServiceName, httpServiceName, uiServiceName)
@ -153,11 +155,11 @@ func TestServices_AdaptEnsure1(t *testing.T) {
podSelectorLabels,
namespace,
grpcServiceName,
grpcPort,
uint16(grpcPort),
httpServiceName,
httpPort,
uint16(httpPort),
uiServiceName,
uiPort,
uint16(uiPort),
)
assert.NilError(t, err)
@ -172,11 +174,13 @@ func TestServices_AdaptEnsure2(t *testing.T) {
namespace := "test0"
grpcPortName := "grpc"
grpcServiceName := "grpc1"
grpcPort := 11
httpPortName := "http"
httpServiceName := "http2"
httpPort := 22
uiPortName := "ui"
uiServiceName := "ui3"
var grpcPort, httpPort, uiPort uint16 = 11, 22, 33
uiPort := 33
componentLabels, podSelectorLabels, nameLabels := serviceLabels(grpcServiceName, httpServiceName, uiServiceName)
@ -203,11 +207,11 @@ func TestServices_AdaptEnsure2(t *testing.T) {
podSelectorLabels,
namespace,
grpcServiceName,
grpcPort,
uint16(grpcPort),
httpServiceName,
httpPort,
uint16(httpPort),
uiServiceName,
uiPort)
uint16(uiPort))
assert.NilError(t, err)
ensure, err := query(client, nil)
@ -221,11 +225,13 @@ func TestServices_AdaptEnsure3(t *testing.T) {
namespace := "test00"
grpcPortName := "grpc"
grpcServiceName := "grpc11"
grpcPort := 111
httpPortName := "http"
httpServiceName := "http22"
httpPort := 222
uiPortName := "ui"
uiServiceName := "ui33"
var grpcPort, httpPort, uiPort uint16 = 111, 222, 333
uiPort := 333
componentLabels, podSelectorLabels, nameLabels := serviceLabels(grpcServiceName, httpServiceName, uiServiceName)
@ -252,11 +258,11 @@ func TestServices_AdaptEnsure3(t *testing.T) {
podSelectorLabels,
namespace,
grpcServiceName,
grpcPort,
uint16(grpcPort),
httpServiceName,
httpPort,
uint16(httpPort),
uiServiceName,
uiPort)
uint16(uiPort))
assert.NilError(t, err)
ensure, err := query(client, nil)
@ -270,11 +276,13 @@ func TestServices_AdaptDestroy1(t *testing.T) {
namespace := "test"
grpcPortName := "grpc"
grpcServiceName := "grpc"
grpcPort := 1
httpPortName := "http"
httpServiceName := "http"
httpPort := 2
uiPortName := "ui"
uiServiceName := "ui"
var grpcPort, httpPort, uiPort uint16 = 1, 2, 3
uiPort := 3
componentLabels, podSelectorLabels, nameLabels := serviceLabels(grpcServiceName, httpServiceName, uiServiceName)
@ -301,11 +309,11 @@ func TestServices_AdaptDestroy1(t *testing.T) {
podSelectorLabels,
namespace,
grpcServiceName,
grpcPort,
uint16(grpcPort),
httpServiceName,
httpPort,
uint16(httpPort),
uiServiceName,
uiPort)
uint16(uiPort))
assert.NilError(t, err)
assert.NilError(t, destroy(client))
@ -317,11 +325,13 @@ func TestServices_AdaptDestroy2(t *testing.T) {
namespace := "test0"
grpcPortName := "grpc"
grpcServiceName := "grpc1"
grpcPort := 11
httpPortName := "http"
httpServiceName := "http2"
httpPort := 22
uiPortName := "ui"
uiServiceName := "ui3"
var grpcPort, httpPort, uiPort uint16 = 11, 22, 33
uiPort := 33
componentLabels, podSelectorLabels, nameLabels := serviceLabels(grpcServiceName, httpServiceName, uiServiceName)
@ -348,11 +358,11 @@ func TestServices_AdaptDestroy2(t *testing.T) {
podSelectorLabels,
namespace,
grpcServiceName,
grpcPort,
uint16(grpcPort),
httpServiceName,
httpPort,
uint16(httpPort),
uiServiceName,
uiPort)
uint16(uiPort))
assert.NilError(t, err)
assert.NilError(t, destroy(client))
@ -364,11 +374,13 @@ func TestServices_AdaptDestroy3(t *testing.T) {
namespace := "test00"
grpcPortName := "grpc"
grpcServiceName := "grpc11"
grpcPort := 111
httpPortName := "http"
httpServiceName := "http22"
httpPort := 222
uiPortName := "ui"
uiServiceName := "ui33"
var grpcPort, httpPort, uiPort uint16 = 111, 222, 333
uiPort := 333
componentLabels, podSelectorLabels, nameLabels := serviceLabels(grpcServiceName, httpServiceName, uiServiceName)
@ -395,11 +407,11 @@ func TestServices_AdaptDestroy3(t *testing.T) {
podSelectorLabels,
namespace,
grpcServiceName,
grpcPort,
uint16(grpcPort),
httpServiceName,
httpPort,
uint16(httpPort),
uiServiceName,
uiPort)
uint16(uiPort))
assert.NilError(t, err)
assert.NilError(t, destroy(client))

View File

@ -10,10 +10,10 @@ import (
func GetClientIDFunc(
namespace string,
httpServiceName string,
httpPort uint16,
httpPort int,
) func() string {
return func() string {
resp, err := http.Get("http://" + httpServiceName + "." + namespace + ":" + strconv.Itoa(int(httpPort)) + "/clientID")
resp, err := http.Get("http://" + httpServiceName + "." + namespace + ":" + strconv.Itoa(httpPort) + "/clientID")
if err != nil || resp.StatusCode >= 400 {
return ""
}

View File

@ -1,8 +1,6 @@
package setup
import (
"time"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/k8s"
@ -21,7 +19,6 @@ const (
containerName = "zitadel"
rootSecret = "client-root"
dbSecrets = "db-secrets"
timeout = 300 * time.Second
)
func AdaptFunc(
@ -40,12 +37,11 @@ func AdaptFunc(
consoleCMName string,
secretVarsName string,
secretPasswordsName string,
users []string,
migrationDone operator.EnsureFunc,
configurationDone operator.EnsureFunc,
getConfigurationHashes func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) map[string]string,
) (
operator.QueryFunc,
func(
necessaryUsers map[string]string,
getConfigurationHashes func(k8sClient kubernetes.ClientInt, queried map[string]interface{}, necessaryUsers map[string]string) (map[string]string, error),
) operator.QueryFunc,
operator.DestroyFunc,
error,
) {
@ -63,49 +59,60 @@ func AdaptFunc(
operator.ResourceDestroyToZitadelDestroy(destroyJ),
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
jobDef := jobDef(
nameLabels,
users,
version,
resources,
cmName,
certPath,
secretName,
secretPath,
consoleCMName,
secretVarsName,
secretPasswordsName,
namespace,
componentLabels,
nodeselector,
tolerations,
)
hashes := getConfigurationHashes(k8sClient, queried)
if hashes != nil && len(hashes) != 0 {
for k, v := range hashes {
jobDef.Annotations[k] = v
jobDef.Spec.Template.Annotations[k] = v
return func(
necessaryUsers map[string]string,
getConfigurationHashes func(k8sClient kubernetes.ClientInt, queried map[string]interface{}, necessaryUsers map[string]string) (map[string]string, error),
) operator.QueryFunc {
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
users := make([]string, 0)
for user := range necessaryUsers {
users = append(users, user)
}
}
query, err := job.AdaptFuncToEnsure(jobDef)
if err != nil {
return nil, err
}
jobDef := jobDef(
nameLabels,
users,
version,
resources,
cmName,
certPath,
secretName,
secretPath,
consoleCMName,
secretVarsName,
secretPasswordsName,
namespace,
componentLabels,
nodeselector,
tolerations,
)
queriers := []operator.QueryFunc{
operator.EnsureFuncToQueryFunc(migrationDone),
operator.EnsureFuncToQueryFunc(configurationDone),
operator.ResourceQueryToZitadelQuery(query),
}
hashes, err := getConfigurationHashes(k8sClient, queried, necessaryUsers)
if err != nil {
return nil, err
}
if hashes != nil && len(hashes) != 0 {
for k, v := range hashes {
jobDef.Annotations[k] = v
jobDef.Spec.Template.Annotations[k] = v
}
}
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
query, err := job.AdaptFuncToEnsure(jobDef)
if err != nil {
return nil, err
}
queriers := []operator.QueryFunc{
operator.ResourceQueryToZitadelQuery(query),
}
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
}
},
operator.DestroyersToDestroyFunc(internalMonitor, destroyers),
nil
}
func jobDef(name *labels.Name, users []string, version *string, resources *k8s.Resources, cmName string, certPath string, secretName string, secretPath string, consoleCMName string, secretVarsName string, secretPasswordsName string, namespace string, componentLabels *labels.Component, nodeselector map[string]string, tolerations []corev1.Toleration) *batchv1.Job {
@ -152,11 +159,10 @@ func jobDef(name *labels.Name, users []string, version *string, resources *k8s.R
Annotations: map[string]string{},
},
Spec: corev1.PodSpec{
NodeSelector: nodeselector,
Tolerations: tolerations,
InitContainers: initContainers,
Containers: containers,
SecurityContext: &corev1.PodSecurityContext{},
NodeSelector: nodeselector,
Tolerations: tolerations,
InitContainers: initContainers,
Containers: containers,
RestartPolicy: "Never",
DNSPolicy: "ClusterFirst",

View File

@ -27,7 +27,11 @@ func TestSetup_AdaptFunc(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
namespace := "test"
reason := "test"
users := []string{"test"}
usersMap := map[string]string{"test": "test"}
users := []string{}
for _, user := range usersMap {
users = append(users, user)
}
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{}
dbHost := "test"
@ -91,11 +95,10 @@ func TestSetup_AdaptFunc(t *testing.T) {
Annotations: annotations,
},
Spec: corev1.PodSpec{
NodeSelector: nodeselector,
Tolerations: tolerations,
InitContainers: initContainers,
Containers: containers,
SecurityContext: &corev1.PodSecurityContext{},
NodeSelector: nodeselector,
Tolerations: tolerations,
InitContainers: initContainers,
Containers: containers,
RestartPolicy: "Never",
DNSPolicy: "ClusterFirst",
@ -110,17 +113,11 @@ func TestSetup_AdaptFunc(t *testing.T) {
client.EXPECT().ApplyJob(jobDef).Times(1)
client.EXPECT().GetJob(namespace, getJobName(reason)).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobNamePrefix+reason))
getConfigurationHashes := func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) map[string]string {
return map[string]string{"testHash": "test"}
}
migrationDone := func(k8sClient kubernetes.ClientInt) error {
return nil
}
configurationDone := func(k8sClient kubernetes.ClientInt) error {
return nil
getConfigurationHashes := func(k8sClient kubernetes.ClientInt, queried map[string]interface{}, necessaryUsers map[string]string) (map[string]string, error) {
return map[string]string{"testHash": "test"}, nil
}
query, _, err := AdaptFunc(
getQuery, _, err := AdaptFunc(
monitor,
componentLabels,
namespace,
@ -136,10 +133,6 @@ func TestSetup_AdaptFunc(t *testing.T) {
consoleCMName,
secretVarsName,
secretPasswordsName,
users,
migrationDone,
configurationDone,
getConfigurationHashes,
)
queried := map[string]interface{}{}
@ -149,6 +142,7 @@ func TestSetup_AdaptFunc(t *testing.T) {
})
assert.NoError(t, err)
query := getQuery(usersMap, getConfigurationHashes)
ensure, err := query(client, queried)
assert.NoError(t, err)
assert.NoError(t, ensure(client))

View File

@ -5,6 +5,11 @@ import (
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/zitadel/operator"
"github.com/pkg/errors"
"time"
)
const (
timeout = 20 * time.Minute
)
func GetDoneFunc(

View File

@ -3,88 +3,109 @@ package zitadel
import (
"sort"
"github.com/caos/orbos/pkg/helper"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/configuration"
)
const migrationUser = "flyway"
const (
migrationUser = "flyway"
mgmtUser = "management"
adminUser = "adminapi"
authUser = "auth"
authzUser = "authz"
notUser = "notification"
esUser = "eventstore"
queriesUser = "queries"
)
func getAllUsers(desired *DesiredV0) map[string]string {
func getUserListWithoutPasswords(desired *DesiredV0) []string {
userpw, _ := getAllUsers(nil, desired)
users := make([]string, 0)
for user := range userpw {
users = append(users, user)
}
sort.Slice(users, func(i, j int) bool {
return users[i] < users[j]
})
return users
}
func getAllUsers(k8sClient kubernetes.ClientInt, desired *DesiredV0) (map[string]string, error) {
passwords := &configuration.Passwords{}
if desired != nil && desired.Spec != nil && desired.Spec.Configuration != nil && desired.Spec.Configuration.Passwords != nil {
passwords = desired.Spec.Configuration.Passwords
}
users := make(map[string]string, 0)
migrationPassword := migrationUser
if passwords.Migration != nil {
migrationPassword = passwords.Migration.Value
if err := fillInUserPassword(k8sClient, migrationUser, passwords.Migration, passwords.ExistingMigration, users); err != nil {
return nil, err
}
users[migrationUser] = migrationPassword
mgmtUser := "management"
mgmtPassword := mgmtUser
if passwords != nil && passwords.Management != nil {
mgmtPassword = passwords.Management.Value
if err := fillInUserPassword(k8sClient, mgmtUser, passwords.Management, passwords.ExistingManagement, users); err != nil {
return nil, err
}
users[mgmtUser] = mgmtPassword
adminUser := "adminapi"
adminPassword := adminUser
if passwords != nil && passwords.Adminapi != nil {
adminPassword = passwords.Adminapi.Value
if err := fillInUserPassword(k8sClient, adminUser, passwords.Adminapi, passwords.ExistingAdminapi, users); err != nil {
return nil, err
}
users[adminUser] = adminPassword
authUser := "auth"
authPassword := authUser
if passwords != nil && passwords.Auth != nil {
authPassword = passwords.Auth.Value
if err := fillInUserPassword(k8sClient, authUser, passwords.Auth, passwords.ExistingAuth, users); err != nil {
return nil, err
}
users[authUser] = authPassword
authzUser := "authz"
authzPassword := authzUser
if passwords != nil && passwords.Authz != nil {
authzPassword = passwords.Authz.Value
if err := fillInUserPassword(k8sClient, authzUser, passwords.Authz, passwords.ExistingAuthz, users); err != nil {
return nil, err
}
users[authzUser] = authzPassword
notUser := "notification"
notPassword := notUser
if passwords != nil && passwords.Notification != nil {
notPassword = passwords.Notification.Value
if err := fillInUserPassword(k8sClient, notUser, passwords.Notification, passwords.ExistingNotification, users); err != nil {
return nil, err
}
users[notUser] = notPassword
esUser := "eventstore"
esPassword := esUser
if passwords != nil && passwords.Eventstore != nil {
esPassword = passwords.Eventstore.Value
if err := fillInUserPassword(k8sClient, esUser, passwords.Eventstore, passwords.ExistingEventstore, users); err != nil {
return nil, err
}
users[esUser] = esPassword
queryUser := "queries"
queryPassword := queryUser
if passwords != nil && passwords.Queries != nil {
queryPassword = passwords.Queries.Value
if err := fillInUserPassword(k8sClient, queriesUser, passwords.Queries, passwords.ExistingQueries, users); err != nil {
return nil, err
}
users[queryUser] = queryPassword
return users
return users, nil
}
func getZitadelUserList() []string {
allUsersMap := getAllUsers(nil)
func fillInUserPassword(
k8sClient kubernetes.ClientInt,
user string,
secret *secret.Secret,
existing *secret.Existing,
userpw map[string]string,
) error {
if k8sClient == nil {
userpw[user] = user
return nil
}
allZitadelUsers := make([]string, 0)
for k := range allUsersMap {
pw, err := helper.GetSecretValue(k8sClient, secret, existing)
if err != nil {
return err
}
if pw != "" {
userpw[user] = pw
} else {
userpw[user] = user
}
return nil
}
func getZitadelUserList(k8sClient kubernetes.ClientInt, desired *DesiredV0) (map[string]string, error) {
allUsersMap, err := getAllUsers(k8sClient, desired)
if err != nil {
return nil, err
}
allZitadelUsers := make(map[string]string, 0)
for k, v := range allUsersMap {
if k != migrationUser {
allZitadelUsers = append(allZitadelUsers, k)
allZitadelUsers[k] = v
}
}
sort.Slice(allZitadelUsers, func(i, j int) bool {
return allZitadelUsers[i] < allZitadelUsers[j]
})
return allZitadelUsers
return allZitadelUsers, nil
}

View File

@ -3,18 +3,25 @@ package orb
import (
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/namespace"
"github.com/caos/orbos/pkg/orb"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator"
"github.com/caos/zitadel/operator/zitadel/kinds/iam"
zitadeldb "github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/database"
"github.com/pkg/errors"
)
const (
namespaceName = "caos-zitadel"
)
func AdaptFunc(
orbconfig *orb.Orb,
action string,
binaryVersion *string,
gitops bool,
features []string,
) operator.AdaptFunc {
return func(
@ -25,6 +32,8 @@ func AdaptFunc(
queryFunc operator.QueryFunc,
destroyFunc operator.DestroyFunc,
allSecrets map[string]*secret.Secret,
allExisting map[string]*secret.Existing,
migrate bool,
err error,
) {
defer func() {
@ -32,12 +41,13 @@ func AdaptFunc(
}()
allSecrets = make(map[string]*secret.Secret)
allExisting = make(map[string]*secret.Existing)
orbMonitor := monitor.WithField("kind", "orb")
desiredKind, err := parseDesiredV0(desiredTree)
desiredKind, err := ParseDesiredV0(desiredTree)
if err != nil {
return nil, nil, allSecrets, errors.Wrap(err, "parsing desired state failed")
return nil, nil, nil, nil, false, errors.Wrap(err, "parsing desired state failed")
}
desiredTree.Parsed = desiredKind
currentTree = &tree.Tree{}
@ -46,35 +56,64 @@ func AdaptFunc(
orbMonitor = orbMonitor.Verbose()
}
var dbClient zitadeldb.Client
if gitops {
dbClientT, err := zitadeldb.NewGitOpsClient(monitor, orbconfig.URL, orbconfig.Repokey)
if err != nil {
monitor.Error(err)
return nil, nil, nil, nil, false, err
}
dbClient = dbClientT
} else {
dbClient = zitadeldb.NewCrdClient(monitor)
}
operatorLabels := mustZITADELOperator(binaryVersion)
queryNS, err := namespace.AdaptFuncToEnsure(namespaceName)
if err != nil {
return nil, nil, nil, nil, false, err
}
/*destroyNS, err := namespace.AdaptFuncToDestroy(namespaceName)
if err != nil {
return nil, nil, allSecrets, err
}*/
iamCurrent := &tree.Tree{}
queryIAM, destroyIAM, zitadelSecrets, err := iam.GetQueryAndDestroyFuncs(
queryIAM, destroyIAM, zitadelSecrets, zitadelExisting, migrateIAM, err := iam.GetQueryAndDestroyFuncs(
orbMonitor,
operatorLabels,
desiredKind.IAM,
iamCurrent,
desiredKind.Spec.NodeSelector,
desiredKind.Spec.Tolerations,
orbconfig,
dbClient,
namespaceName,
action,
&desiredKind.Spec.Version,
features,
)
if err != nil {
return nil, nil, allSecrets, err
return nil, nil, nil, nil, false, err
}
secret.AppendSecrets("", allSecrets, zitadelSecrets)
migrate = migrate || migrateIAM
secret.AppendSecrets("", allSecrets, zitadelSecrets, allExisting, zitadelExisting)
destroyers := make([]operator.DestroyFunc, 0)
queriers := make([]operator.QueryFunc, 0)
for _, feature := range features {
switch feature {
case "iam", "migration", "scaleup", "scaledown":
queriers = append(queriers, queryIAM)
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryNS),
queryIAM,
)
destroyers = append(destroyers, destroyIAM)
case "operator":
queriers = append(queriers, operator.EnsureFuncToQueryFunc(Reconcile(monitor, desiredTree, false)))
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryNS),
operator.EnsureFuncToQueryFunc(Reconcile(monitor, desiredKind.Spec)),
)
}
}
@ -96,6 +135,8 @@ func AdaptFunc(
return operator.DestroyersToDestroyFunc(monitor, destroyers)(k8sClient)
},
allSecrets,
allExisting,
migrate,
nil
}
}

View File

@ -8,20 +8,24 @@ import (
type DesiredV0 struct {
Common *tree.Common `yaml:",inline"`
Spec struct {
Verbose bool
NodeSelector map[string]string `yaml:"nodeSelector,omitempty"`
Tolerations []corev1.Toleration `yaml:"tolerations,omitempty"`
Version string `yaml:"version,omitempty"`
SelfReconciling bool `yaml:"selfReconciling"`
//Use this registry to pull the database operator image from
//@default: ghcr.io
CustomImageRegistry string `json:"customImageRegistry,omitempty" yaml:"customImageRegistry,omitempty"`
}
IAM *tree.Tree
Spec *Spec `json:"spec" yaml:"spec"`
IAM *tree.Tree `json:"iam" yaml:"iam"`
}
func parseDesiredV0(desiredTree *tree.Tree) (*DesiredV0, error) {
// +kubebuilder:object:generate=true
type Spec struct {
Verbose bool `json:"verbose" yaml:"verbose"`
NodeSelector map[string]string `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty"`
Tolerations []corev1.Toleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty"`
Version string `json:"version,omitempty" yaml:"version,omitempty"`
SelfReconciling bool `json:"selfReconciling" yaml:"selfReconciling"`
GitOps bool `json:"gitops,omitempty" yaml:"gitops,omitempty"`
//Use this registry to pull the zitadel operator image from
//@default: ghcr.io
CustomImageRegistry string `json:"customImageRegistry,omitempty" yaml:"customImageRegistry,omitempty"`
}
func ParseDesiredV0(desiredTree *tree.Tree) (*DesiredV0, error) {
desiredKind := &DesiredV0{Common: desiredTree.Common}
if err := desiredTree.Original.Decode(desiredKind); err != nil {

View File

@ -10,37 +10,36 @@ import (
"github.com/pkg/errors"
)
func Reconcile(monitor mntr.Monitor, desiredTree *tree.Tree, takeoff bool) operator.EnsureFunc {
func Reconcile(
monitor mntr.Monitor,
spec *Spec,
) operator.EnsureFunc {
return func(k8sClient kubernetes2.ClientInt) (err error) {
defer func() {
err = errors.Wrapf(err, "building %s failed", desiredTree.Common.Kind)
}()
recMonitor := monitor.WithField("version", spec.Version)
desiredKind, err := parseDesiredV0(desiredTree)
if err != nil {
return errors.Wrap(err, "parsing desired state failed")
}
desiredTree.Parsed = desiredKind
recMonitor := monitor.WithField("version", desiredKind.Spec.Version)
if desiredKind.Spec.Version == "" {
err := errors.New("No version set in zitadel.yml")
if spec.Version == "" {
err := errors.New("No version provided for self-reconciling")
recMonitor.Error(err)
return err
}
imageRegistry := desiredKind.Spec.CustomImageRegistry
imageRegistry := spec.CustomImageRegistry
if imageRegistry == "" {
imageRegistry = "ghcr.io"
}
if takeoff || desiredKind.Spec.SelfReconciling {
if err := kubernetes.EnsureZitadelOperatorArtifacts(monitor, treelabels.MustForAPI(desiredTree, mustZITADELOperator(&desiredKind.Spec.Version)), k8sClient, desiredKind.Spec.Version, desiredKind.Spec.NodeSelector, desiredKind.Spec.Tolerations, imageRegistry); err != nil {
if spec.SelfReconciling {
desiredTree := &tree.Tree{
Common: &tree.Common{
Kind: "zitadel.caos.ch/Orb",
Version: "v0",
},
}
if err := kubernetes.EnsureZitadelOperatorArtifacts(monitor, treelabels.MustForAPI(desiredTree, mustZITADELOperator(&spec.Version)), k8sClient, spec.Version, spec.NodeSelector, spec.Tolerations, imageRegistry, spec.GitOps); err != nil {
recMonitor.Error(errors.Wrap(err, "Failed to deploy zitadel-operator into k8s-cluster"))
return err
}
recMonitor.Info("Applied zitadel-operator")
}
return nil

View File

@ -0,0 +1,54 @@
// +build !ignore_autogenerated
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package orb
import (
v1 "k8s.io/api/core/v1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Spec) DeepCopyInto(out *Spec) {
*out = *in
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]v1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Spec.
func (in *Spec) DeepCopy() *Spec {
if in == nil {
return nil
}
out := new(Spec)
in.DeepCopyInto(out)
return out
}

View File

@ -2,6 +2,7 @@ package zitadel
import (
"errors"
"github.com/caos/zitadel/operator"
"github.com/caos/orbos/mntr"
@ -10,7 +11,12 @@ import (
"github.com/caos/orbos/pkg/tree"
)
func Takeoff(monitor mntr.Monitor, gitClient *git.Client, adapt operator.AdaptFunc, k8sClient *kubernetes.Client) func() error {
func Takeoff(
monitor mntr.Monitor,
gitClient *git.Client,
adapt operator.AdaptFunc,
k8sClient *kubernetes.Client,
) func() error {
return func() error {
internalMonitor := monitor.WithField("operator", "zitadel")
internalMonitor.Info("Takeoff")
@ -26,7 +32,7 @@ func Takeoff(monitor mntr.Monitor, gitClient *git.Client, adapt operator.AdaptFu
return err
}
query, _, _, err := adapt(internalMonitor, treeDesired, treeCurrent)
query, _, _, _, _, err := adapt(internalMonitor, treeDesired, treeCurrent)
if err != nil {
internalMonitor.Error(err)
return err

View File

@ -22,7 +22,7 @@ func InstantBackup(
}
current := &tree.Tree{}
query, _, _, err := orbdb.AdaptFunc(name, nil, "instantbackup")(monitor, desired, current)
query, _, _, _, _, err := orbdb.AdaptFunc(name, nil, false, "instantbackup")(monitor, desired, current)
if err != nil {
monitor.Error(err)
return err

View File

@ -23,7 +23,7 @@ func Clear(
}
current := &tree.Tree{}
query, _, _, err := orbdb.AdaptFunc("", nil, "clean")(monitor, desired, current)
query, _, _, _, _, err := orbdb.AdaptFunc("", nil, false, "clean")(monitor, desired, current)
if err != nil {
monitor.Error(err)
return err

View File

@ -6,11 +6,24 @@ import (
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/api"
"github.com/caos/zitadel/operator/api/database"
coredb "github.com/caos/zitadel/operator/database/kinds/databases/core"
orbdb "github.com/caos/zitadel/operator/database/kinds/orb"
)
func GetConnectionInfo(
func CrdGetConnectionInfo(
monitor mntr.Monitor,
k8sClient kubernetes.ClientInt,
) (string, string, error) {
desired, err := database.ReadCrd(k8sClient)
if err != nil {
return "", "", err
}
return getConnectionInfo(monitor, k8sClient, desired)
}
func GitOpsGetConnectionInfo(
monitor mntr.Monitor,
k8sClient kubernetes.ClientInt,
gitClient *git.Client,
@ -20,9 +33,18 @@ func GetConnectionInfo(
monitor.Error(err)
return "", "", err
}
return getConnectionInfo(monitor, k8sClient, desired)
}
func getConnectionInfo(
monitor mntr.Monitor,
k8sClient kubernetes.ClientInt,
desired *tree.Tree,
) (string, string, error) {
current := &tree.Tree{}
query, _, _, err := orbdb.AdaptFunc("", nil)(monitor, desired, current)
query, _, _, _, _, err := orbdb.AdaptFunc("", nil, false, "database")(monitor, desired, current)
if err != nil {
return "", "", err
}

Some files were not shown because too many files have changed in this diff Show More