mirror of
https://github.com/zitadel/zitadel.git
synced 2025-08-20 09:07:40 +00:00
fix: operator picks (#1463)
* feat(crd): add crd mode for operators (#1329) * feat(operator): add base for zitadel operator * fix(operator): changed pipeline to release operator * fix(operator): fmt with only one parameter * fix(operator): corrected workflow job name * fix(zitadelctl): added restore and backuplist command * fix(zitadelctl): scale for restore * chore(container): use scratch for deploy container * fix(zitadelctl): limit image to scratch * fix(migration): added migration scripts for newer version * fix(operator): changed handling of kubeconfig in operator logic * fix(operator): changed handling of secrets in operator logic * fix(operator): use new version of zitadel * fix(operator): added path for migrations * fix(operator): delete doublets of migration scripts * fix(operator): delete subpaths and integrate logic into init container * fix(operator): corrected path in dockerfile for local migrations * fix(operator): added migrations for cockroachdb-secure * fix(operator): delete logic for ambassador module * fix(operator): added read and write secret commands * fix(operator): correct and align operator pipeline with zitadel pipeline * fix(operator): correct yaml error in operator pipeline * fix(operator): correct action name in operator pipeline * fix(operator): correct case-sensitive filename in operator pipeline * fix(operator): upload artifacts from buildx output * fix(operator): corrected attribute spelling error * fix(operator): combined jobs for operator binary and image * fix(operator): added missing comma in operator pipeline * fix(operator): added codecov for operator image * fix(operator): added codecov for operator image * fix(testing): code changes for testing and several unit-tests (#1009) * fix(operator): usage of interface of kubernetes client for testing and several unit-tests * fix(operator): several unit-tests * fix(operator): several unit-tests * fix(operator): changed order for the operator logic * fix(operator): added version of zitadelctl from semantic release * fix(operator): corrected function call with version of zitadelctl * fix(operator): corrected function call with version of zitadelctl * fix(operator): add check output to operator release pipeline * fix(operator): set --short length everywhere to 12 * fix(operator): zitadel setup in job instead of exec with several unit tests * fix(operator): fixes to combine newest zitadel and testing branch * fix(operator): corrected path in Dockerfile * fix(operator): fixed unit-test that was ignored during changes * fix(operator): fixed unit-test that was ignored during changes * fix(operator): corrected Dockerfile to correctly use env variable * fix(operator): quickfix takeoff deployment * fix(operator): corrected the clusterrolename in the applied artifacts * fix: update secure migrations * fix(operator): migrations (#1057) * fix(operator): copied migrations from orbos repository * fix(operator): newest migrations * chore: use cockroach-secure * fix: rename migration * fix: remove insecure cockroach migrations Co-authored-by: Stefan Benz <stefan@caos.ch> * fix: finalize labels * fix(operator): cli logging concurrent and fixe deployment of operator during restore * fix: finalize labels and cli commands * fix: restore * chore: cockroachdb is always secure * chore: use orbos consistent-labels latest commit * test: make tests compatible with new labels * fix: default to sa token for start command * fix: use cockroachdb v12.02 * fix: don't delete flyway user * test: fix migration test * fix: use correct table qualifiers * fix: don't alter sequence ownership * fix: upgrade flyway * fix: change ownership of all dbs and tables to admin user * fix: change defaultdb user * fix: treat clientid status codes >= 400 as errors * fix: reconcile specified ZITADEL version, not binary version * fix: add ca-certs * fix: use latest orbos code * fix: use orbos with fixed race condition * fix: use latest ORBOS code * fix: use latest ORBOS code * fix: make migration and scaling around restoring work * fix(operator): move zitadel operator * chore(migrations): include owner change migration * feat(db): add code base for database operator * fix(db): change used image registry for database operator * fix(db): generated mock * fix(db): add accidentally ignored file * fix(db): add cockroachdb backup image to pipeline * fix(db): correct pipeline and image versions * fix(db): correct version of used orbos * fix(db): correct database import * fix(db): go mod tidy * fix(db): use new version for orbos * fix(migrations): include migrations into zitadelctl binary (#1211) * fix(db): use statik to integrate migrations into binary * fix(migrations): corrections unit tests and pipeline for integrated migrations into zitadelctl binary * fix(migrations): correction in dockerfile for pipeline build * fix(migrations): correction in dockerfile for pipeline build * fix(migrations): dockerfile changes for cache optimization * fix(database): correct used part-of label in database operator * fix(database): correct used selectable label in zitadel operator * fix(operator): correct lables for user secrets in zitadel operator * fix(operator): correct lables for service test in zitadel operator * fix: don't enable database features for user operations (#1227) * fix: don't enable database features for user operations * fix: omit database feature for connection info adapter * fix: use latest orbos version * fix(crd): corrected logic to get database connection and other info * fix(crd): corrected yaml tags and start for zitadel operator * fix(crd): move some dependencies and use consistent structure * fix(crd): corrected unit-tests * fix(crd): corrected main files for debug starts * chore(pipeline): use correct version for zitadelctl build * fix(crd): correct calculating of current db state for zitadel operator * fix(crd): use binary version for deployment of crd mode operators * fix(crd): add gitops attribute for reconciling * fix(crd): corrected crd with newest version * fix(migration): collect cleanup functions and only use them if all jobs are successful * fix(zitadelctl): import gcp auth to connect to gke cluster * feat: Add read and writesecret options for crd mode (#1435) * fix: don't require orbconfig for crd mode * test: pass * fix(zitadelctl): import gcp auth to connect to gke cluster * feat: add read and writesecret option for crd mode * test: fix * fix: make all crd secrets writable * fix: use in-cluster configs for in-cluster operators * chore: remove unnecessary debug files Co-authored-by: Stefan Benz <stefan@caos.ch> * fix: Crdoperatormerge review (#1385) * fix: don't require orbconfig for crd mode * test: pass * fix(zitadelctl): import gcp auth to connect to gke cluster * fix: ensure caos-system namespace * fix: apply orbconfig at takeoff * docs: improve help for creating an orbconfig * docs: describe orbconfig properties * docs: add --gitops to help message example * fix(pipeline): correct upload of artifacts in dev releases * test: pass Co-authored-by: Stefan Benz <stefan@caos.ch> * fix(test): corrected falsely merged tests * chore: update orbos library * fix: only handle exactly named and namespaced crd resource * fix: print errors, check correct crd namespace * fix: validate bucket secret * chore: compile * fix(operator): corrected secret handling when unused secrets are not defined * fix(operator): corrected handling of jobs * fix: dont print logs when readsecret path is provided * fix(operator): corrected handling of jobs and sort for mounted volumes * fix(operator): sort for volumes * fix(operator): change orboos import to newest release Co-authored-by: Florian Forster <florian@caos.ch> Co-authored-by: Elio Bischof <eliobischof@gmail.com> (cherry picked from commitfa9bd5a8e7
) * fix(operator): Standard timeout handling (#1458) * fix: always use standard time.Duration * fix: give backup and restore more time * fix: give backup and restore jobs more time (cherry picked from commit7468b7d1e8
) * fix go mod Co-authored-by: Stefan Benz <46600784+stebenz@users.noreply.github.com> Co-authored-by: Elio Bischof <eliobischof@gmail.com>
This commit is contained in:
@@ -28,6 +28,8 @@ func GetQueryAndDestroyFuncs(
|
||||
operator.QueryFunc,
|
||||
operator.DestroyFunc,
|
||||
map[string]*secret.Secret,
|
||||
map[string]*secret.Existing,
|
||||
bool,
|
||||
error,
|
||||
) {
|
||||
switch desiredTree.Common.Kind {
|
||||
@@ -50,7 +52,7 @@ func GetQueryAndDestroyFuncs(
|
||||
features,
|
||||
)(monitor, desiredTree, currentTree)
|
||||
default:
|
||||
return nil, nil, nil, errors.Errorf("unknown database kind %s", desiredTree.Common.Kind)
|
||||
return nil, nil, nil, nil, false, errors.Errorf("unknown database kind %s", desiredTree.Common.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -2,6 +2,7 @@ package bucket
|
||||
|
||||
import (
|
||||
"github.com/caos/orbos/mntr"
|
||||
"github.com/caos/orbos/pkg/helper"
|
||||
"github.com/caos/orbos/pkg/kubernetes"
|
||||
"github.com/caos/orbos/pkg/kubernetes/resources/secret"
|
||||
"github.com/caos/orbos/pkg/labels"
|
||||
@@ -32,28 +33,36 @@ func AdaptFunc(
|
||||
version string,
|
||||
features []string,
|
||||
) operator.AdaptFunc {
|
||||
return func(monitor mntr.Monitor, desired *tree.Tree, current *tree.Tree) (queryFunc operator.QueryFunc, destroyFunc operator.DestroyFunc, secrets map[string]*secretpkg.Secret, err error) {
|
||||
return func(
|
||||
monitor mntr.Monitor,
|
||||
desired *tree.Tree,
|
||||
current *tree.Tree,
|
||||
) (
|
||||
operator.QueryFunc,
|
||||
operator.DestroyFunc,
|
||||
map[string]*secretpkg.Secret,
|
||||
map[string]*secretpkg.Existing,
|
||||
bool,
|
||||
error,
|
||||
) {
|
||||
|
||||
internalMonitor := monitor.WithField("component", "backup")
|
||||
|
||||
desiredKind, err := ParseDesiredV0(desired)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "parsing desired state failed")
|
||||
return nil, nil, nil, nil, false, errors.Wrap(err, "parsing desired state failed")
|
||||
}
|
||||
desired.Parsed = desiredKind
|
||||
|
||||
secrets, existing := getSecretsMap(desiredKind)
|
||||
|
||||
if !monitor.IsVerbose() && desiredKind.Spec.Verbose {
|
||||
internalMonitor.Verbose()
|
||||
}
|
||||
|
||||
destroyS, err := secret.AdaptFuncToDestroy(namespace, secretName)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
queryS, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, secretName), map[string]string{secretKey: desiredKind.Spec.ServiceAccountJSON.Value})
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, false, err
|
||||
}
|
||||
|
||||
_, destroyB, err := backup.AdaptFunc(
|
||||
@@ -74,7 +83,7 @@ func AdaptFunc(
|
||||
version,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, false, err
|
||||
}
|
||||
|
||||
_, destroyR, err := restore.AdaptFunc(
|
||||
@@ -93,7 +102,7 @@ func AdaptFunc(
|
||||
version,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, false, err
|
||||
}
|
||||
|
||||
_, destroyC, err := clean.AdaptFunc(
|
||||
@@ -110,7 +119,7 @@ func AdaptFunc(
|
||||
version,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, false, err
|
||||
}
|
||||
|
||||
destroyers := make([]operator.DestroyFunc, 0)
|
||||
@@ -133,6 +142,11 @@ func AdaptFunc(
|
||||
}
|
||||
|
||||
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
|
||||
|
||||
if err := desiredKind.validateSecrets(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
currentDB, err := coreDB.ParseQueriedForDatabase(queried)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -143,6 +157,16 @@ func AdaptFunc(
|
||||
databases = []string{}
|
||||
}
|
||||
|
||||
value, err := helper.GetSecretValue(k8sClient, desiredKind.Spec.ServiceAccountJSON, desiredKind.Spec.ExistingServiceAccountJSON)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
queryS, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, secretName), map[string]string{secretKey: value})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
queryB, _, err := backup.AdaptFunc(
|
||||
internalMonitor,
|
||||
name,
|
||||
@@ -201,30 +225,53 @@ func AdaptFunc(
|
||||
}
|
||||
|
||||
queriers := make([]operator.QueryFunc, 0)
|
||||
cleanupQueries := make([]operator.QueryFunc, 0)
|
||||
if databases != nil && len(databases) != 0 {
|
||||
for _, feature := range features {
|
||||
switch feature {
|
||||
case backup.Normal, backup.Instant:
|
||||
case backup.Normal:
|
||||
queriers = append(queriers,
|
||||
operator.ResourceQueryToZitadelQuery(queryS),
|
||||
queryB,
|
||||
)
|
||||
case backup.Instant:
|
||||
queriers = append(queriers,
|
||||
operator.ResourceQueryToZitadelQuery(queryS),
|
||||
queryB,
|
||||
)
|
||||
cleanupQueries = append(cleanupQueries,
|
||||
operator.EnsureFuncToQueryFunc(backup.GetCleanupFunc(monitor, namespace, name)),
|
||||
)
|
||||
case clean.Instant:
|
||||
queriers = append(queriers,
|
||||
operator.ResourceQueryToZitadelQuery(queryS),
|
||||
queryC,
|
||||
)
|
||||
cleanupQueries = append(cleanupQueries,
|
||||
operator.EnsureFuncToQueryFunc(clean.GetCleanupFunc(monitor, namespace, name)),
|
||||
)
|
||||
case restore.Instant:
|
||||
queriers = append(queriers,
|
||||
operator.ResourceQueryToZitadelQuery(queryS),
|
||||
queryR,
|
||||
)
|
||||
cleanupQueries = append(cleanupQueries,
|
||||
operator.EnsureFuncToQueryFunc(restore.GetCleanupFunc(monitor, namespace, name)),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, cleanup := range cleanupQueries {
|
||||
queriers = append(queriers, cleanup)
|
||||
}
|
||||
|
||||
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
|
||||
},
|
||||
operator.DestroyersToDestroyFunc(internalMonitor, destroyers),
|
||||
getSecretsMap(desiredKind),
|
||||
secrets,
|
||||
existing,
|
||||
false,
|
||||
nil
|
||||
}
|
||||
}
|
||||
|
@@ -1,6 +1,8 @@
|
||||
package bucket
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/caos/orbos/mntr"
|
||||
"github.com/caos/orbos/pkg/kubernetes"
|
||||
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
|
||||
@@ -13,7 +15,6 @@ import (
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBucket_Secrets(t *testing.T) {
|
||||
@@ -60,7 +61,7 @@ func TestBucket_Secrets(t *testing.T) {
|
||||
"serviceaccountjson": saJson,
|
||||
}
|
||||
|
||||
_, _, secrets, err := AdaptFunc(
|
||||
_, _, secrets, existing, _, err := AdaptFunc(
|
||||
backupName,
|
||||
namespace,
|
||||
componentLabels,
|
||||
@@ -78,6 +79,7 @@ func TestBucket_Secrets(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
for key, value := range allSecrets {
|
||||
assert.Contains(t, secrets, key)
|
||||
assert.Contains(t, existing, key)
|
||||
assert.Equal(t, value, secrets[key].Value)
|
||||
}
|
||||
}
|
||||
@@ -131,7 +133,7 @@ func TestBucket_AdaptBackup(t *testing.T) {
|
||||
|
||||
SetBackup(client, namespace, k8sLabels, saJson)
|
||||
|
||||
query, _, _, err := AdaptFunc(
|
||||
query, _, _, _, _, err := AdaptFunc(
|
||||
backupName,
|
||||
namespace,
|
||||
componentLabels,
|
||||
@@ -205,7 +207,7 @@ func TestBucket_AdaptInstantBackup(t *testing.T) {
|
||||
|
||||
SetInstantBackup(client, namespace, backupName, k8sLabels, saJson)
|
||||
|
||||
query, _, _, err := AdaptFunc(
|
||||
query, _, _, _, _, err := AdaptFunc(
|
||||
backupName,
|
||||
namespace,
|
||||
componentLabels,
|
||||
@@ -280,7 +282,7 @@ func TestBucket_AdaptRestore(t *testing.T) {
|
||||
|
||||
SetRestore(client, namespace, backupName, k8sLabels, saJson)
|
||||
|
||||
query, _, _, err := AdaptFunc(
|
||||
query, _, _, _, _, err := AdaptFunc(
|
||||
backupName,
|
||||
namespace,
|
||||
componentLabels,
|
||||
@@ -316,6 +318,15 @@ func TestBucket_AdaptClean(t *testing.T) {
|
||||
namespace := "testNs"
|
||||
|
||||
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "BucketBackup", "v0"), "testComponent")
|
||||
k8sLabels := map[string]string{
|
||||
"app.kubernetes.io/component": "testComponent",
|
||||
"app.kubernetes.io/managed-by": "testOp",
|
||||
"app.kubernetes.io/name": "backup-serviceaccountjson",
|
||||
"app.kubernetes.io/part-of": "testProd",
|
||||
"app.kubernetes.io/version": "testVersion",
|
||||
"caos.ch/apiversion": "v0",
|
||||
"caos.ch/kind": "BucketBackup",
|
||||
}
|
||||
|
||||
timestamp := "test"
|
||||
nodeselector := map[string]string{"test": "test"}
|
||||
@@ -344,9 +355,9 @@ func TestBucket_AdaptClean(t *testing.T) {
|
||||
return nil
|
||||
}
|
||||
|
||||
SetClean(client, namespace, backupName)
|
||||
SetClean(client, namespace, backupName, k8sLabels, saJson)
|
||||
|
||||
query, _, _, err := AdaptFunc(
|
||||
query, _, _, _, _, err := AdaptFunc(
|
||||
backupName,
|
||||
namespace,
|
||||
componentLabels,
|
||||
|
@@ -1,9 +1,10 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"github.com/caos/zitadel/operator"
|
||||
"time"
|
||||
|
||||
"github.com/caos/zitadel/operator"
|
||||
|
||||
"github.com/caos/orbos/mntr"
|
||||
"github.com/caos/orbos/pkg/kubernetes"
|
||||
"github.com/caos/orbos/pkg/kubernetes/resources/cronjob"
|
||||
@@ -13,18 +14,18 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultMode int32 = 256
|
||||
certPath = "/cockroach/cockroach-certs"
|
||||
secretPath = "/secrets/sa.json"
|
||||
backupPath = "/cockroach"
|
||||
backupNameEnv = "BACKUP_NAME"
|
||||
cronJobNamePrefix = "backup-"
|
||||
internalSecretName = "client-certs"
|
||||
image = "ghcr.io/caos/zitadel-crbackup"
|
||||
rootSecretName = "cockroachdb.client.root"
|
||||
timeout time.Duration = 60
|
||||
Normal = "backup"
|
||||
Instant = "instantbackup"
|
||||
defaultMode int32 = 256
|
||||
certPath = "/cockroach/cockroach-certs"
|
||||
secretPath = "/secrets/sa.json"
|
||||
backupPath = "/cockroach"
|
||||
backupNameEnv = "BACKUP_NAME"
|
||||
cronJobNamePrefix = "backup-"
|
||||
internalSecretName = "client-certs"
|
||||
image = "ghcr.io/caos/zitadel-crbackup"
|
||||
rootSecretName = "cockroachdb.client.root"
|
||||
timeout = 15 * time.Minute
|
||||
Normal = "backup"
|
||||
Instant = "instantbackup"
|
||||
)
|
||||
|
||||
func AdaptFunc(
|
||||
@@ -119,7 +120,6 @@ func AdaptFunc(
|
||||
queriers = append(queriers,
|
||||
operator.EnsureFuncToQueryFunc(checkDBReady),
|
||||
operator.ResourceQueryToZitadelQuery(queryJ),
|
||||
operator.EnsureFuncToQueryFunc(getCleanupFunc(monitor, jobDef.Namespace, jobDef.Name)),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@@ -1,6 +1,8 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/caos/orbos/mntr"
|
||||
"github.com/caos/orbos/pkg/kubernetes"
|
||||
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
|
||||
@@ -10,7 +12,6 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
macherrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBackup_AdaptInstantBackup1(t *testing.T) {
|
||||
@@ -60,8 +61,6 @@ func TestBackup_AdaptInstantBackup1(t *testing.T) {
|
||||
|
||||
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
|
||||
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
|
||||
client.EXPECT().WaitUntilJobCompleted(jobDef.Namespace, jobDef.Name, timeout).Times(1).Return(nil)
|
||||
client.EXPECT().DeleteJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil)
|
||||
|
||||
query, _, err := AdaptFunc(
|
||||
monitor,
|
||||
@@ -134,8 +133,6 @@ func TestBackup_AdaptInstantBackup2(t *testing.T) {
|
||||
|
||||
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
|
||||
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
|
||||
client.EXPECT().WaitUntilJobCompleted(jobDef.Namespace, jobDef.Name, timeout).Times(1).Return(nil)
|
||||
client.EXPECT().DeleteJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil)
|
||||
|
||||
query, _, err := AdaptFunc(
|
||||
monitor,
|
||||
|
@@ -7,15 +7,19 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func getCleanupFunc(monitor mntr.Monitor, namespace string, name string) operator.EnsureFunc {
|
||||
func GetCleanupFunc(
|
||||
monitor mntr.Monitor,
|
||||
namespace string,
|
||||
backupName string,
|
||||
) operator.EnsureFunc {
|
||||
return func(k8sClient kubernetes.ClientInt) error {
|
||||
monitor.Info("waiting for backup to be completed")
|
||||
if err := k8sClient.WaitUntilJobCompleted(namespace, name, timeout); err != nil {
|
||||
if err := k8sClient.WaitUntilJobCompleted(namespace, GetJobName(backupName), timeout); err != nil {
|
||||
monitor.Error(errors.Wrap(err, "error while waiting for backup to be completed"))
|
||||
return err
|
||||
}
|
||||
monitor.Info("backup is completed, cleanup")
|
||||
if err := k8sClient.DeleteJob(namespace, name); err != nil {
|
||||
if err := k8sClient.DeleteJob(namespace, GetJobName(backupName)); err != nil {
|
||||
monitor.Error(errors.Wrap(err, "error while trying to cleanup backup"))
|
||||
return err
|
||||
}
|
||||
|
@@ -1,12 +1,13 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/caos/orbos/mntr"
|
||||
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBackup_Cleanup1(t *testing.T) {
|
||||
@@ -15,12 +16,12 @@ func TestBackup_Cleanup1(t *testing.T) {
|
||||
name := "test"
|
||||
namespace := "testNs"
|
||||
|
||||
cleanupFunc := getCleanupFunc(monitor, namespace, name)
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(nil)
|
||||
client.EXPECT().DeleteJob(namespace, name).Times(1)
|
||||
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
|
||||
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
|
||||
assert.NoError(t, cleanupFunc(client))
|
||||
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(errors.New("fail"))
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
|
||||
assert.Error(t, cleanupFunc(client))
|
||||
}
|
||||
|
||||
@@ -30,11 +31,11 @@ func TestBackup_Cleanup2(t *testing.T) {
|
||||
name := "test2"
|
||||
namespace := "testNs2"
|
||||
|
||||
cleanupFunc := getCleanupFunc(monitor, namespace, name)
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(nil)
|
||||
client.EXPECT().DeleteJob(namespace, name).Times(1)
|
||||
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
|
||||
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
|
||||
assert.NoError(t, cleanupFunc(client))
|
||||
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(errors.New("fail"))
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
|
||||
assert.Error(t, cleanupFunc(client))
|
||||
}
|
||||
|
@@ -1,9 +1,10 @@
|
||||
package clean
|
||||
|
||||
import (
|
||||
"github.com/caos/zitadel/operator"
|
||||
"time"
|
||||
|
||||
"github.com/caos/zitadel/operator"
|
||||
|
||||
"github.com/caos/orbos/mntr"
|
||||
"github.com/caos/orbos/pkg/kubernetes"
|
||||
"github.com/caos/orbos/pkg/kubernetes/resources/job"
|
||||
@@ -12,16 +13,16 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
Instant = "clean"
|
||||
defaultMode = int32(256)
|
||||
certPath = "/cockroach/cockroach-certs"
|
||||
secretPath = "/secrets/sa.json"
|
||||
internalSecretName = "client-certs"
|
||||
image = "ghcr.io/caos/zitadel-crbackup"
|
||||
rootSecretName = "cockroachdb.client.root"
|
||||
jobPrefix = "backup-"
|
||||
jobSuffix = "-clean"
|
||||
timeout time.Duration = 60
|
||||
Instant = "clean"
|
||||
defaultMode = int32(256)
|
||||
certPath = "/cockroach/cockroach-certs"
|
||||
secretPath = "/secrets/sa.json"
|
||||
internalSecretName = "client-certs"
|
||||
image = "ghcr.io/caos/zitadel-crbackup"
|
||||
rootSecretName = "cockroachdb.client.root"
|
||||
jobPrefix = "backup-"
|
||||
jobSuffix = "-clean"
|
||||
timeout = 60 * time.Second
|
||||
)
|
||||
|
||||
func AdaptFunc(
|
||||
@@ -71,7 +72,6 @@ func AdaptFunc(
|
||||
queriers := []operator.QueryFunc{
|
||||
operator.EnsureFuncToQueryFunc(checkDBReady),
|
||||
operator.ResourceQueryToZitadelQuery(queryJ),
|
||||
operator.EnsureFuncToQueryFunc(getCleanupFunc(monitor, jobDef.Namespace, jobDef.Name)),
|
||||
}
|
||||
|
||||
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
|
||||
|
@@ -1,6 +1,8 @@
|
||||
package clean
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/caos/orbos/mntr"
|
||||
"github.com/caos/orbos/pkg/kubernetes"
|
||||
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
|
||||
@@ -10,7 +12,6 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
macherrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBackup_Adapt1(t *testing.T) {
|
||||
@@ -49,8 +50,6 @@ func TestBackup_Adapt1(t *testing.T) {
|
||||
|
||||
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
|
||||
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
|
||||
client.EXPECT().WaitUntilJobCompleted(jobDef.Namespace, jobDef.Name, timeout).Times(1).Return(nil)
|
||||
client.EXPECT().DeleteJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil)
|
||||
|
||||
query, _, err := AdaptFunc(
|
||||
monitor,
|
||||
@@ -109,8 +108,6 @@ func TestBackup_Adapt2(t *testing.T) {
|
||||
|
||||
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
|
||||
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
|
||||
client.EXPECT().WaitUntilJobCompleted(jobDef.Namespace, jobDef.Name, timeout).Times(1).Return(nil)
|
||||
client.EXPECT().DeleteJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil)
|
||||
|
||||
query, _, err := AdaptFunc(
|
||||
monitor,
|
||||
|
@@ -7,19 +7,19 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func getCleanupFunc(
|
||||
func GetCleanupFunc(
|
||||
monitor mntr.Monitor,
|
||||
namespace string,
|
||||
jobName string,
|
||||
backupName string,
|
||||
) operator.EnsureFunc {
|
||||
return func(k8sClient kubernetes.ClientInt) error {
|
||||
monitor.Info("waiting for clean to be completed")
|
||||
if err := k8sClient.WaitUntilJobCompleted(namespace, jobName, 60); err != nil {
|
||||
if err := k8sClient.WaitUntilJobCompleted(namespace, GetJobName(backupName), timeout); err != nil {
|
||||
monitor.Error(errors.Wrap(err, "error while waiting for clean to be completed"))
|
||||
return err
|
||||
}
|
||||
monitor.Info("clean is completed, cleanup")
|
||||
if err := k8sClient.DeleteJob(namespace, jobName); err != nil {
|
||||
if err := k8sClient.DeleteJob(namespace, GetJobName(backupName)); err != nil {
|
||||
monitor.Error(errors.Wrap(err, "error while trying to cleanup clean"))
|
||||
return err
|
||||
}
|
||||
|
@@ -15,12 +15,12 @@ func TestBackup_Cleanup1(t *testing.T) {
|
||||
name := "test"
|
||||
namespace := "testNs"
|
||||
|
||||
cleanupFunc := getCleanupFunc(monitor, namespace, name)
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(nil)
|
||||
client.EXPECT().DeleteJob(namespace, name).Times(1)
|
||||
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
|
||||
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
|
||||
assert.NoError(t, cleanupFunc(client))
|
||||
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(errors.New("fail"))
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
|
||||
assert.Error(t, cleanupFunc(client))
|
||||
}
|
||||
|
||||
@@ -30,11 +30,11 @@ func TestBackup_Cleanup2(t *testing.T) {
|
||||
name := "test2"
|
||||
namespace := "testNs2"
|
||||
|
||||
cleanupFunc := getCleanupFunc(monitor, namespace, name)
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(nil)
|
||||
client.EXPECT().DeleteJob(namespace, name).Times(1)
|
||||
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
|
||||
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
|
||||
assert.NoError(t, cleanupFunc(client))
|
||||
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(errors.New("fail"))
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
|
||||
assert.Error(t, cleanupFunc(client))
|
||||
}
|
||||
|
@@ -1,7 +1,9 @@
|
||||
package bucket
|
||||
|
||||
import (
|
||||
secret2 "github.com/caos/orbos/pkg/secret"
|
||||
"fmt"
|
||||
|
||||
"github.com/caos/orbos/pkg/secret"
|
||||
"github.com/caos/orbos/pkg/tree"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -12,14 +14,15 @@ type DesiredV0 struct {
|
||||
}
|
||||
|
||||
type Spec struct {
|
||||
Verbose bool
|
||||
Cron string `yaml:"cron,omitempty"`
|
||||
Bucket string `yaml:"bucket,omitempty"`
|
||||
ServiceAccountJSON *secret2.Secret `yaml:"serviceAccountJSON,omitempty"`
|
||||
Verbose bool
|
||||
Cron string `yaml:"cron,omitempty"`
|
||||
Bucket string `yaml:"bucket,omitempty"`
|
||||
ServiceAccountJSON *secret.Secret `yaml:"serviceAccountJSON,omitempty"`
|
||||
ExistingServiceAccountJSON *secret.Existing `yaml:"existingServiceAccountJSON,omitempty"`
|
||||
}
|
||||
|
||||
func (s *Spec) IsZero() bool {
|
||||
if (s.ServiceAccountJSON == nil || s.ServiceAccountJSON.IsZero()) &&
|
||||
if ((s.ServiceAccountJSON == nil || s.ServiceAccountJSON.IsZero()) && (s.ExistingServiceAccountJSON == nil || s.ExistingServiceAccountJSON.IsZero())) &&
|
||||
!s.Verbose &&
|
||||
s.Cron == "" &&
|
||||
s.Bucket == "" {
|
||||
@@ -40,3 +43,10 @@ func ParseDesiredV0(desiredTree *tree.Tree) (*DesiredV0, error) {
|
||||
|
||||
return desiredKind, nil
|
||||
}
|
||||
|
||||
func (d *DesiredV0) validateSecrets() error {
|
||||
if err := secret.ValidateSecret(d.Spec.ServiceAccountJSON, d.Spec.ExistingServiceAccountJSON); err != nil {
|
||||
return fmt.Errorf("validating api key failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -65,7 +65,18 @@ func SetClean(
|
||||
k8sClient *kubernetesmock.MockClientInt,
|
||||
namespace string,
|
||||
backupName string,
|
||||
labels map[string]string,
|
||||
saJson string,
|
||||
) {
|
||||
k8sClient.EXPECT().ApplySecret(&corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secretName,
|
||||
Namespace: namespace,
|
||||
Labels: labels,
|
||||
},
|
||||
StringData: map[string]string{secretKey: saJson},
|
||||
Type: "Opaque",
|
||||
}).Times(1).Return(nil)
|
||||
|
||||
k8sClient.EXPECT().ApplyJob(gomock.Any()).Times(1).Return(nil)
|
||||
k8sClient.EXPECT().GetJob(namespace, clean.GetJobName(backupName)).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, clean.GetJobName(backupName)))
|
||||
|
@@ -1,9 +1,10 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"github.com/caos/zitadel/operator"
|
||||
"time"
|
||||
|
||||
"github.com/caos/zitadel/operator"
|
||||
|
||||
"github.com/caos/orbos/mntr"
|
||||
"github.com/caos/orbos/pkg/kubernetes"
|
||||
"github.com/caos/orbos/pkg/kubernetes/resources/job"
|
||||
@@ -12,16 +13,16 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
Instant = "restore"
|
||||
defaultMode = int32(256)
|
||||
certPath = "/cockroach/cockroach-certs"
|
||||
secretPath = "/secrets/sa.json"
|
||||
jobPrefix = "backup-"
|
||||
jobSuffix = "-restore"
|
||||
image = "ghcr.io/caos/zitadel-crbackup"
|
||||
internalSecretName = "client-certs"
|
||||
rootSecretName = "cockroachdb.client.root"
|
||||
timeout time.Duration = 60
|
||||
Instant = "restore"
|
||||
defaultMode = int32(256)
|
||||
certPath = "/cockroach/cockroach-certs"
|
||||
secretPath = "/secrets/sa.json"
|
||||
jobPrefix = "backup-"
|
||||
jobSuffix = "-restore"
|
||||
image = "ghcr.io/caos/zitadel-crbackup"
|
||||
internalSecretName = "client-certs"
|
||||
rootSecretName = "cockroachdb.client.root"
|
||||
timeout = 15 * time.Minute
|
||||
)
|
||||
|
||||
func AdaptFunc(
|
||||
@@ -79,7 +80,6 @@ func AdaptFunc(
|
||||
queriers := []operator.QueryFunc{
|
||||
operator.EnsureFuncToQueryFunc(checkDBReady),
|
||||
operator.ResourceQueryToZitadelQuery(queryJ),
|
||||
operator.EnsureFuncToQueryFunc(getCleanupFunc(monitor, jobdef.Namespace, jobdef.Name)),
|
||||
}
|
||||
|
||||
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
|
||||
|
@@ -1,6 +1,8 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/caos/orbos/mntr"
|
||||
"github.com/caos/orbos/pkg/kubernetes"
|
||||
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
|
||||
@@ -10,7 +12,6 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
macherrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBackup_Adapt1(t *testing.T) {
|
||||
@@ -54,8 +55,6 @@ func TestBackup_Adapt1(t *testing.T) {
|
||||
|
||||
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
|
||||
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
|
||||
client.EXPECT().WaitUntilJobCompleted(jobDef.Namespace, jobDef.Name, timeout).Times(1).Return(nil)
|
||||
client.EXPECT().DeleteJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil)
|
||||
|
||||
query, _, err := AdaptFunc(
|
||||
monitor,
|
||||
@@ -121,8 +120,6 @@ func TestBackup_Adapt2(t *testing.T) {
|
||||
|
||||
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
|
||||
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
|
||||
client.EXPECT().WaitUntilJobCompleted(jobDef.Namespace, jobDef.Name, timeout).Times(1).Return(nil)
|
||||
client.EXPECT().DeleteJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil)
|
||||
|
||||
query, _, err := AdaptFunc(
|
||||
monitor,
|
||||
|
@@ -7,15 +7,19 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func getCleanupFunc(monitor mntr.Monitor, namespace, jobName string) operator.EnsureFunc {
|
||||
func GetCleanupFunc(
|
||||
monitor mntr.Monitor,
|
||||
namespace,
|
||||
backupName string,
|
||||
) operator.EnsureFunc {
|
||||
return func(k8sClient kubernetes.ClientInt) error {
|
||||
monitor.Info("waiting for restore to be completed")
|
||||
if err := k8sClient.WaitUntilJobCompleted(namespace, jobName, timeout); err != nil {
|
||||
if err := k8sClient.WaitUntilJobCompleted(namespace, GetJobName(backupName), timeout); err != nil {
|
||||
monitor.Error(errors.Wrap(err, "error while waiting for restore to be completed"))
|
||||
return err
|
||||
}
|
||||
monitor.Info("restore is completed, cleanup")
|
||||
if err := k8sClient.DeleteJob(namespace, jobName); err != nil {
|
||||
if err := k8sClient.DeleteJob(namespace, GetJobName(backupName)); err != nil {
|
||||
monitor.Error(errors.Wrap(err, "error while trying to cleanup restore"))
|
||||
return err
|
||||
}
|
||||
|
@@ -15,12 +15,12 @@ func TestBackup_Cleanup1(t *testing.T) {
|
||||
name := "test"
|
||||
namespace := "testNs"
|
||||
|
||||
cleanupFunc := getCleanupFunc(monitor, namespace, name)
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(nil)
|
||||
client.EXPECT().DeleteJob(namespace, name).Times(1)
|
||||
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
|
||||
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
|
||||
assert.NoError(t, cleanupFunc(client))
|
||||
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(errors.New("fail"))
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
|
||||
assert.Error(t, cleanupFunc(client))
|
||||
}
|
||||
|
||||
@@ -30,11 +30,11 @@ func TestBackup_Cleanup2(t *testing.T) {
|
||||
name := "test2"
|
||||
namespace := "testNs2"
|
||||
|
||||
cleanupFunc := getCleanupFunc(monitor, namespace, name)
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(nil)
|
||||
client.EXPECT().DeleteJob(namespace, name).Times(1)
|
||||
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
|
||||
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
|
||||
assert.NoError(t, cleanupFunc(client))
|
||||
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(errors.New("fail"))
|
||||
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
|
||||
assert.Error(t, cleanupFunc(client))
|
||||
}
|
||||
|
@@ -4,8 +4,12 @@ import (
|
||||
"github.com/caos/orbos/pkg/secret"
|
||||
)
|
||||
|
||||
func getSecretsMap(desiredKind *DesiredV0) map[string]*secret.Secret {
|
||||
secrets := make(map[string]*secret.Secret, 0)
|
||||
func getSecretsMap(desiredKind *DesiredV0) (map[string]*secret.Secret, map[string]*secret.Existing) {
|
||||
|
||||
var (
|
||||
secrets = make(map[string]*secret.Secret, 0)
|
||||
existing = make(map[string]*secret.Existing, 0)
|
||||
)
|
||||
if desiredKind.Spec == nil {
|
||||
desiredKind.Spec = &Spec{}
|
||||
}
|
||||
@@ -13,7 +17,14 @@ func getSecretsMap(desiredKind *DesiredV0) map[string]*secret.Secret {
|
||||
if desiredKind.Spec.ServiceAccountJSON == nil {
|
||||
desiredKind.Spec.ServiceAccountJSON = &secret.Secret{}
|
||||
}
|
||||
secrets["serviceaccountjson"] = desiredKind.Spec.ServiceAccountJSON
|
||||
|
||||
return secrets
|
||||
if desiredKind.Spec.ExistingServiceAccountJSON == nil {
|
||||
desiredKind.Spec.ExistingServiceAccountJSON = &secret.Existing{}
|
||||
}
|
||||
|
||||
sakey := "serviceaccountjson"
|
||||
secrets[sakey] = desiredKind.Spec.ServiceAccountJSON
|
||||
existing[sakey] = desiredKind.Spec.ExistingServiceAccountJSON
|
||||
|
||||
return secrets, existing
|
||||
}
|
||||
|
@@ -1,22 +1,26 @@
|
||||
package bucket
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/caos/orbos/pkg/secret"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBucket_getSecretsFull(t *testing.T) {
|
||||
secrets := getSecretsMap(&desired)
|
||||
secrets, existing := getSecretsMap(&desired)
|
||||
assert.Equal(t, desired.Spec.ServiceAccountJSON, secrets["serviceaccountjson"])
|
||||
assert.Equal(t, desired.Spec.ExistingServiceAccountJSON, existing["serviceaccountjson"])
|
||||
}
|
||||
|
||||
func TestBucket_getSecretsEmpty(t *testing.T) {
|
||||
secrets := getSecretsMap(&desiredWithoutSecret)
|
||||
secrets, existing := getSecretsMap(&desiredWithoutSecret)
|
||||
assert.Equal(t, &secret.Secret{}, secrets["serviceaccountjson"])
|
||||
assert.Equal(t, &secret.Existing{}, existing["serviceaccountjson"])
|
||||
}
|
||||
|
||||
func TestBucket_getSecretsNil(t *testing.T) {
|
||||
secrets := getSecretsMap(&desiredNil)
|
||||
secrets, existing := getSecretsMap(&desiredNil)
|
||||
assert.Equal(t, &secret.Secret{}, secrets["serviceaccountjson"])
|
||||
assert.Equal(t, &secret.Existing{}, existing["serviceaccountjson"])
|
||||
}
|
||||
|
@@ -35,6 +35,8 @@ func GetQueryAndDestroyFuncs(
|
||||
query operator.QueryFunc,
|
||||
destroy operator.DestroyFunc,
|
||||
secrets map[string]*secret.Secret,
|
||||
existing map[string]*secret.Existing,
|
||||
migrate bool,
|
||||
err error,
|
||||
) {
|
||||
componentLabels := labels.MustForComponent(apiLabels, component)
|
||||
@@ -46,7 +48,7 @@ func GetQueryAndDestroyFuncs(
|
||||
case "databases.caos.ch/ProvidedDatabase":
|
||||
return provided.AdaptFunc()(internalMonitor, desiredTree, currentTree)
|
||||
default:
|
||||
return nil, nil, nil, errors.Errorf("unknown database kind %s", desiredTree.Common.Kind)
|
||||
return nil, nil, nil, nil, false, errors.Errorf("unknown database kind %s", desiredTree.Common.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,10 +1,11 @@
|
||||
package managed
|
||||
|
||||
import (
|
||||
"github.com/caos/zitadel/operator"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/caos/zitadel/operator"
|
||||
|
||||
"github.com/caos/orbos/pkg/labels"
|
||||
|
||||
"github.com/caos/orbos/pkg/secret"
|
||||
@@ -51,6 +52,8 @@ func AdaptFunc(
|
||||
operator.QueryFunc,
|
||||
operator.DestroyFunc,
|
||||
map[string]*secret.Secret,
|
||||
map[string]*secret.Existing,
|
||||
bool,
|
||||
error,
|
||||
) {
|
||||
|
||||
@@ -62,14 +65,21 @@ func AdaptFunc(
|
||||
operator.QueryFunc,
|
||||
operator.DestroyFunc,
|
||||
map[string]*secret.Secret,
|
||||
map[string]*secret.Existing,
|
||||
bool,
|
||||
error,
|
||||
) {
|
||||
internalMonitor := monitor.WithField("kind", "cockroachdb")
|
||||
allSecrets := map[string]*secret.Secret{}
|
||||
|
||||
var (
|
||||
internalMonitor = monitor.WithField("kind", "cockroachdb")
|
||||
allSecrets = make(map[string]*secret.Secret)
|
||||
allExisting = make(map[string]*secret.Existing)
|
||||
migrate bool
|
||||
)
|
||||
|
||||
desiredKind, err := parseDesiredV0(desired)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "parsing desired state failed")
|
||||
return nil, nil, nil, nil, false, errors.Wrap(err, "parsing desired state failed")
|
||||
}
|
||||
desired.Parsed = desiredKind
|
||||
|
||||
@@ -92,15 +102,15 @@ func AdaptFunc(
|
||||
|
||||
queryCert, destroyCert, addUser, deleteUser, listUsers, err := certificate.AdaptFunc(internalMonitor, namespace, componentLabels, desiredKind.Spec.ClusterDns, isFeatureDatabase)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, false, err
|
||||
}
|
||||
addRoot, err := addUser("root")
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, false, err
|
||||
}
|
||||
destroyRoot, err := deleteUser("root")
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, false, err
|
||||
}
|
||||
|
||||
queryRBAC, destroyRBAC, err := rbac.AdaptFunc(internalMonitor, namespace, labels.MustForName(componentLabels, serviceAccountName))
|
||||
@@ -126,7 +136,7 @@ func AdaptFunc(
|
||||
desiredKind.Spec.Resources,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, false, err
|
||||
}
|
||||
|
||||
queryS, destroyS, err := services.AdaptFunc(
|
||||
@@ -147,12 +157,12 @@ func AdaptFunc(
|
||||
|
||||
queryPDB, err := pdb.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, pdbName), cockroachSelector, "1")
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, false, err
|
||||
}
|
||||
|
||||
destroyPDB, err := pdb.AdaptFuncToDestroy(namespace, pdbName)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, false, err
|
||||
}
|
||||
|
||||
currentDB := &Current{
|
||||
@@ -203,7 +213,7 @@ func AdaptFunc(
|
||||
for backupName, desiredBackup := range desiredKind.Spec.Backups {
|
||||
currentBackup := &tree.Tree{}
|
||||
if timestamp == "" || !oneBackup || (timestamp != "" && strings.HasPrefix(timestamp, backupName)) {
|
||||
queryB, destroyB, secrets, err := backups.GetQueryAndDestroyFuncs(
|
||||
queryB, destroyB, secrets, existing, migrateB, err := backups.GetQueryAndDestroyFuncs(
|
||||
internalMonitor,
|
||||
desiredBackup,
|
||||
currentBackup,
|
||||
@@ -218,10 +228,12 @@ func AdaptFunc(
|
||||
features,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, false, err
|
||||
}
|
||||
|
||||
secret.AppendSecrets(backupName, allSecrets, secrets)
|
||||
migrate = migrate || migrateB
|
||||
|
||||
secret.AppendSecrets(backupName, allSecrets, secrets, allExisting, existing)
|
||||
destroyers = append(destroyers, destroyB)
|
||||
queriers = append(queriers, queryB)
|
||||
}
|
||||
@@ -251,6 +263,8 @@ func AdaptFunc(
|
||||
},
|
||||
operator.DestroyersToDestroyFunc(internalMonitor, destroyers),
|
||||
allSecrets,
|
||||
allExisting,
|
||||
migrate,
|
||||
nil
|
||||
}
|
||||
}
|
||||
|
@@ -1,6 +1,9 @@
|
||||
package managed
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/caos/orbos/mntr"
|
||||
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
|
||||
"github.com/caos/orbos/pkg/labels"
|
||||
@@ -13,8 +16,6 @@ import (
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func getTreeWithDBAndBackup(t *testing.T, masterkey string, saJson string, backupName string) *tree.Tree {
|
||||
@@ -81,9 +82,9 @@ func TestManaged_AdaptBucketBackup(t *testing.T) {
|
||||
|
||||
features := []string{backup.Normal}
|
||||
bucket.SetBackup(k8sClient, namespace, labels, saJson)
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60))
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second)
|
||||
|
||||
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
||||
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
databases := []string{"test1", "test2"}
|
||||
@@ -119,11 +120,11 @@ func TestManaged_AdaptBucketInstantBackup(t *testing.T) {
|
||||
|
||||
features := []string{backup.Instant}
|
||||
bucket.SetInstantBackup(k8sClient, namespace, backupName, labels, saJson)
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60))
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second)
|
||||
|
||||
desired := getTreeWithDBAndBackup(t, masterkey, saJson, backupName)
|
||||
|
||||
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
||||
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
databases := []string{"test1", "test2"}
|
||||
@@ -159,12 +160,12 @@ func TestManaged_AdaptBucketCleanAndRestore(t *testing.T) {
|
||||
|
||||
features := []string{restore.Instant, clean.Instant}
|
||||
bucket.SetRestore(k8sClient, namespace, backupName, labels, saJson)
|
||||
bucket.SetClean(k8sClient, namespace, backupName)
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60)).Times(2)
|
||||
bucket.SetClean(k8sClient, namespace, backupName, labels, saJson)
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second).Times(2)
|
||||
|
||||
desired := getTreeWithDBAndBackup(t, masterkey, saJson, backupName)
|
||||
|
||||
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
||||
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
databases := []string{"test1", "test2"}
|
||||
|
@@ -1,10 +1,11 @@
|
||||
package managed
|
||||
|
||||
import (
|
||||
"gopkg.in/yaml.v3"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/caos/orbos/mntr"
|
||||
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
|
||||
"github.com/caos/orbos/pkg/labels"
|
||||
@@ -114,11 +115,11 @@ func TestManaged_Adapt1(t *testing.T) {
|
||||
//statefulset
|
||||
k8sClient.EXPECT().ApplyStatefulSet(gomock.Any(), gomock.Any()).MinTimes(1).MaxTimes(1)
|
||||
//running for setup
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, false, time.Duration(60)).MinTimes(1).MaxTimes(1)
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, false, 60*time.Second).MinTimes(1).MaxTimes(1)
|
||||
//not ready for setup
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(1)).MinTimes(1).MaxTimes(1)
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 1*time.Second).MinTimes(1).MaxTimes(1)
|
||||
//ready after setup
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60)).MinTimes(1).MaxTimes(1)
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second).MinTimes(1).MaxTimes(1)
|
||||
//client
|
||||
k8sClient.EXPECT().ListSecrets(namespace, nodeLabels).MinTimes(1).MaxTimes(1).Return(secretList, nil)
|
||||
dbCurrent.EXPECT().GetCertificate().MinTimes(1).MaxTimes(1).Return(nil)
|
||||
@@ -132,7 +133,7 @@ func TestManaged_Adapt1(t *testing.T) {
|
||||
dbCurrent.EXPECT().SetCertificateKey(gomock.Any()).MinTimes(1).MaxTimes(1)
|
||||
k8sClient.EXPECT().ApplySecret(gomock.Any()).MinTimes(1).MaxTimes(1)
|
||||
|
||||
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
||||
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
ensure, err := query(k8sClient, queried)
|
||||
@@ -226,11 +227,11 @@ func TestManaged_Adapt2(t *testing.T) {
|
||||
//statefulset
|
||||
k8sClient.EXPECT().ApplyStatefulSet(gomock.Any(), gomock.Any()).MinTimes(1).MaxTimes(1)
|
||||
//running for setup
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, false, time.Duration(60)).MinTimes(1).MaxTimes(1)
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, false, 60*time.Second).MinTimes(1).MaxTimes(1)
|
||||
//not ready for setup
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(1)).MinTimes(1).MaxTimes(1)
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 1*time.Second).MinTimes(1).MaxTimes(1)
|
||||
//ready after setup
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60)).MinTimes(1).MaxTimes(1)
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second).MinTimes(1).MaxTimes(1)
|
||||
//client
|
||||
k8sClient.EXPECT().ListSecrets(namespace, nodeLabels).MinTimes(1).MaxTimes(1).Return(secretList, nil)
|
||||
dbCurrent.EXPECT().GetCertificate().MinTimes(1).MaxTimes(1).Return(nil)
|
||||
@@ -244,7 +245,7 @@ func TestManaged_Adapt2(t *testing.T) {
|
||||
dbCurrent.EXPECT().SetCertificateKey(gomock.Any()).MinTimes(1).MaxTimes(1)
|
||||
k8sClient.EXPECT().ApplySecret(gomock.Any()).MinTimes(1).MaxTimes(1)
|
||||
|
||||
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
||||
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
ensure, err := query(k8sClient, queried)
|
||||
|
@@ -2,12 +2,14 @@ package statefulset
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/caos/orbos/pkg/labels"
|
||||
"github.com/caos/zitadel/operator"
|
||||
"github.com/caos/zitadel/operator/helpers"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/caos/orbos/mntr"
|
||||
"github.com/caos/orbos/pkg/kubernetes"
|
||||
@@ -216,7 +218,7 @@ func AdaptFunc(
|
||||
wrapedQuery, wrapedDestroy, err := resources.WrapFuncs(internalMonitor, query, destroy)
|
||||
checkDBRunning := func(k8sClient kubernetes.ClientInt) error {
|
||||
internalMonitor.Info("waiting for statefulset to be running")
|
||||
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, false, 60); err != nil {
|
||||
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, false, 60*time.Second); err != nil {
|
||||
internalMonitor.Error(errors.Wrap(err, "error while waiting for statefulset to be running"))
|
||||
return err
|
||||
}
|
||||
@@ -226,7 +228,7 @@ func AdaptFunc(
|
||||
|
||||
checkDBNotReady := func(k8sClient kubernetes.ClientInt) error {
|
||||
internalMonitor.Info("checking for statefulset to not be ready")
|
||||
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, true, 1); err != nil {
|
||||
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, true, 1*time.Second); err != nil {
|
||||
internalMonitor.Info("statefulset is not ready")
|
||||
return nil
|
||||
}
|
||||
@@ -253,7 +255,7 @@ func AdaptFunc(
|
||||
|
||||
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
|
||||
internalMonitor.Info("waiting for statefulset to be ready")
|
||||
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, true, 60); err != nil {
|
||||
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, true, 60*time.Second); err != nil {
|
||||
internalMonitor.Error(errors.Wrap(err, "error while waiting for statefulset to be ready"))
|
||||
return err
|
||||
}
|
||||
|
@@ -17,6 +17,8 @@ func AdaptFunc() func(
|
||||
operator.QueryFunc,
|
||||
operator.DestroyFunc,
|
||||
map[string]*secret.Secret,
|
||||
map[string]*secret.Existing,
|
||||
bool,
|
||||
error,
|
||||
) {
|
||||
return func(
|
||||
@@ -27,11 +29,13 @@ func AdaptFunc() func(
|
||||
operator.QueryFunc,
|
||||
operator.DestroyFunc,
|
||||
map[string]*secret.Secret,
|
||||
map[string]*secret.Existing,
|
||||
bool,
|
||||
error,
|
||||
) {
|
||||
desiredKind, err := parseDesiredV0(desired)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "parsing desired state failed")
|
||||
return nil, nil, nil, nil, false, errors.Wrap(err, "parsing desired state failed")
|
||||
}
|
||||
desired.Parsed = desiredKind
|
||||
|
||||
@@ -53,7 +57,9 @@ func AdaptFunc() func(
|
||||
}, func(k8sClient kubernetes.ClientInt) error {
|
||||
return nil
|
||||
},
|
||||
map[string]*secret.Secret{},
|
||||
make(map[string]*secret.Secret),
|
||||
make(map[string]*secret.Existing),
|
||||
false,
|
||||
nil
|
||||
}
|
||||
}
|
||||
|
@@ -9,6 +9,9 @@ import (
|
||||
"github.com/caos/orbos/pkg/tree"
|
||||
"github.com/caos/orbos/pkg/treelabels"
|
||||
"github.com/caos/zitadel/operator"
|
||||
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/backup"
|
||||
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/clean"
|
||||
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/restore"
|
||||
"github.com/caos/zitadel/operator/database/kinds/databases"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -21,18 +24,29 @@ func OperatorSelector() *labels.Selector {
|
||||
return labels.OpenOperatorSelector("ZITADEL", "database.caos.ch")
|
||||
}
|
||||
|
||||
func AdaptFunc(timestamp string, binaryVersion *string, features ...string) operator.AdaptFunc {
|
||||
func AdaptFunc(timestamp string, binaryVersion *string, gitops bool, features ...string) operator.AdaptFunc {
|
||||
|
||||
return func(monitor mntr.Monitor, orbDesiredTree *tree.Tree, currentTree *tree.Tree) (queryFunc operator.QueryFunc, destroyFunc operator.DestroyFunc, secrets map[string]*secret.Secret, err error) {
|
||||
return func(
|
||||
monitor mntr.Monitor,
|
||||
orbDesiredTree *tree.Tree,
|
||||
currentTree *tree.Tree,
|
||||
) (
|
||||
queryFunc operator.QueryFunc,
|
||||
destroyFunc operator.DestroyFunc,
|
||||
secrets map[string]*secret.Secret,
|
||||
existing map[string]*secret.Existing,
|
||||
migrate bool,
|
||||
err error,
|
||||
) {
|
||||
defer func() {
|
||||
err = errors.Wrapf(err, "building %s failed", orbDesiredTree.Common.Kind)
|
||||
}()
|
||||
|
||||
orbMonitor := monitor.WithField("kind", "orb")
|
||||
|
||||
desiredKind, err := parseDesiredV0(orbDesiredTree)
|
||||
desiredKind, err := ParseDesiredV0(orbDesiredTree)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "parsing desired state failed")
|
||||
return nil, nil, nil, nil, migrate, errors.Wrap(err, "parsing desired state failed")
|
||||
}
|
||||
orbDesiredTree.Parsed = desiredKind
|
||||
currentTree = &tree.Tree{}
|
||||
@@ -43,18 +57,18 @@ func AdaptFunc(timestamp string, binaryVersion *string, features ...string) oper
|
||||
|
||||
queryNS, err := namespace.AdaptFuncToEnsure(NamespaceStr)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, migrate, err
|
||||
}
|
||||
destroyNS, err := namespace.AdaptFuncToDestroy(NamespaceStr)
|
||||
/*destroyNS, err := namespace.AdaptFuncToDestroy(NamespaceStr)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
}*/
|
||||
|
||||
databaseCurrent := &tree.Tree{}
|
||||
|
||||
operatorLabels := mustDatabaseOperator(binaryVersion)
|
||||
|
||||
queryDB, destroyDB, secrets, err := databases.GetQueryAndDestroyFuncs(
|
||||
queryDB, destroyDB, secrets, existing, migrate, err := databases.GetQueryAndDestroyFuncs(
|
||||
orbMonitor,
|
||||
desiredKind.Database,
|
||||
databaseCurrent,
|
||||
@@ -66,23 +80,28 @@ func AdaptFunc(timestamp string, binaryVersion *string, features ...string) oper
|
||||
desiredKind.Spec.Version,
|
||||
features,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
queriers := []operator.QueryFunc{
|
||||
operator.ResourceQueryToZitadelQuery(queryNS),
|
||||
queryDB,
|
||||
}
|
||||
if desiredKind.Spec.SelfReconciling {
|
||||
queriers = append(queriers,
|
||||
operator.EnsureFuncToQueryFunc(Reconcile(monitor, orbDesiredTree)),
|
||||
)
|
||||
return nil, nil, nil, nil, migrate, err
|
||||
}
|
||||
|
||||
destroyers := []operator.DestroyFunc{
|
||||
operator.ResourceDestroyToZitadelDestroy(destroyNS),
|
||||
destroyDB,
|
||||
destroyers := make([]operator.DestroyFunc, 0)
|
||||
queriers := make([]operator.QueryFunc, 0)
|
||||
for _, feature := range features {
|
||||
switch feature {
|
||||
case "database", backup.Instant, backup.Normal, restore.Instant, clean.Instant:
|
||||
queriers = append(queriers,
|
||||
operator.ResourceQueryToZitadelQuery(queryNS),
|
||||
queryDB,
|
||||
)
|
||||
destroyers = append(destroyers,
|
||||
destroyDB,
|
||||
)
|
||||
case "operator":
|
||||
queriers = append(queriers,
|
||||
operator.ResourceQueryToZitadelQuery(queryNS),
|
||||
operator.EnsureFuncToQueryFunc(Reconcile(monitor, desiredKind.Spec)),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
currentTree.Parsed = &DesiredV0{
|
||||
@@ -105,6 +124,8 @@ func AdaptFunc(timestamp string, binaryVersion *string, features ...string) oper
|
||||
return operator.DestroyersToDestroyFunc(monitor, destroyers)(k8sClient)
|
||||
},
|
||||
secrets,
|
||||
existing,
|
||||
migrate,
|
||||
nil
|
||||
}
|
||||
}
|
||||
|
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
func BackupListFunc() func(monitor mntr.Monitor, desiredTree *tree.Tree) (strings []string, err error) {
|
||||
return func(monitor mntr.Monitor, desiredTree *tree.Tree) (strings []string, err error) {
|
||||
desiredKind, err := parseDesiredV0(desiredTree)
|
||||
desiredKind, err := ParseDesiredV0(desiredTree)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "parsing desired state failed")
|
||||
}
|
||||
|
@@ -7,21 +7,25 @@ import (
|
||||
)
|
||||
|
||||
type DesiredV0 struct {
|
||||
Common *tree.Common `yaml:",inline"`
|
||||
Spec struct {
|
||||
Verbose bool
|
||||
NodeSelector map[string]string `yaml:"nodeSelector,omitempty"`
|
||||
Tolerations []corev1.Toleration `yaml:"tolerations,omitempty"`
|
||||
Version string `yaml:"version,omitempty"`
|
||||
SelfReconciling bool `yaml:"selfReconciling"`
|
||||
//Use this registry to pull the ZITADEL operator image from
|
||||
//@default: ghcr.io
|
||||
CustomImageRegistry string `json:"customImageRegistry,omitempty" yaml:"customImageRegistry,omitempty"`
|
||||
}
|
||||
Common *tree.Common `json:",inline" yaml:",inline"`
|
||||
Spec *Spec `json:"spec" yaml:"spec"`
|
||||
Database *tree.Tree
|
||||
}
|
||||
|
||||
func parseDesiredV0(desiredTree *tree.Tree) (*DesiredV0, error) {
|
||||
// +kubebuilder:object:generate=true
|
||||
type Spec struct {
|
||||
Verbose bool `json:"verbose" json:"verbose"`
|
||||
NodeSelector map[string]string `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty"`
|
||||
Tolerations []corev1.Toleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty"`
|
||||
Version string `json:"version,omitempty" yaml:"version,omitempty"`
|
||||
SelfReconciling bool `json:"selfReconciling" yaml:"selfReconciling"`
|
||||
GitOps bool `json:"gitOps,omitempty" yaml:"gitOps,omitempty"`
|
||||
//Use this registry to pull the Database operator image from
|
||||
//@default: ghcr.io
|
||||
CustomImageRegistry string `json:"customImageRegistry,omitempty" yaml:"customImageRegistry,omitempty"`
|
||||
}
|
||||
|
||||
func ParseDesiredV0(desiredTree *tree.Tree) (*DesiredV0, error) {
|
||||
desiredKind := &DesiredV0{Common: desiredTree.Common}
|
||||
|
||||
if err := desiredTree.Original.Decode(desiredKind); err != nil {
|
||||
|
@@ -10,38 +10,38 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func Reconcile(monitor mntr.Monitor, desiredTree *tree.Tree) operator.EnsureFunc {
|
||||
func Reconcile(
|
||||
monitor mntr.Monitor,
|
||||
spec *Spec,
|
||||
) operator.EnsureFunc {
|
||||
return func(k8sClient kubernetes.ClientInt) (err error) {
|
||||
defer func() {
|
||||
err = errors.Wrapf(err, "building %s failed", desiredTree.Common.Kind)
|
||||
}()
|
||||
recMonitor := monitor.WithField("version", spec.Version)
|
||||
|
||||
desiredKind, err := parseDesiredV0(desiredTree)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "parsing desired state failed")
|
||||
}
|
||||
desiredTree.Parsed = desiredKind
|
||||
|
||||
recMonitor := monitor.WithField("version", desiredKind.Spec.Version)
|
||||
|
||||
if desiredKind.Spec.Version == "" {
|
||||
err := errors.New("No version set in database.yml")
|
||||
monitor.Error(err)
|
||||
if spec.Version == "" {
|
||||
err := errors.New("No version provided for self-reconciling")
|
||||
recMonitor.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
imageRegistry := desiredKind.Spec.CustomImageRegistry
|
||||
imageRegistry := spec.CustomImageRegistry
|
||||
if imageRegistry == "" {
|
||||
imageRegistry = "ghcr.io"
|
||||
}
|
||||
|
||||
if err := zitadelKubernetes.EnsureDatabaseArtifacts(monitor, treelabels.MustForAPI(desiredTree, mustDatabaseOperator(&desiredKind.Spec.Version)), k8sClient, desiredKind.Spec.Version, desiredKind.Spec.NodeSelector, desiredKind.Spec.Tolerations, imageRegistry); err != nil {
|
||||
recMonitor.Error(errors.Wrap(err, "Failed to deploy database-operator into k8s-cluster"))
|
||||
return err
|
||||
if spec.SelfReconciling {
|
||||
desiredTree := &tree.Tree{
|
||||
Common: &tree.Common{
|
||||
Kind: "databases.caos.ch/Orb",
|
||||
Version: "v0",
|
||||
},
|
||||
}
|
||||
|
||||
if err := zitadelKubernetes.EnsureDatabaseArtifacts(monitor, treelabels.MustForAPI(desiredTree, mustDatabaseOperator(&spec.Version)), k8sClient, spec.Version, spec.NodeSelector, spec.Tolerations, imageRegistry, spec.GitOps); err != nil {
|
||||
recMonitor.Error(errors.Wrap(err, "Failed to deploy database-operator into k8s-cluster"))
|
||||
return err
|
||||
}
|
||||
recMonitor.Info("Applied database-operator")
|
||||
}
|
||||
|
||||
recMonitor.Info("Applied database-operator")
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
54
operator/database/kinds/orb/zz_generated.deepcopy.go
Normal file
54
operator/database/kinds/orb/zz_generated.deepcopy.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package orb
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Spec) DeepCopyInto(out *Spec) {
|
||||
*out = *in
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.Tolerations != nil {
|
||||
in, out := &in.Tolerations, &out.Tolerations
|
||||
*out = make([]v1.Toleration, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Spec.
|
||||
func (in *Spec) DeepCopy() *Spec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Spec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
@@ -2,6 +2,7 @@ package database
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/caos/orbos/mntr"
|
||||
"github.com/caos/orbos/pkg/git"
|
||||
"github.com/caos/orbos/pkg/kubernetes"
|
||||
@@ -25,7 +26,7 @@ func Takeoff(monitor mntr.Monitor, gitClient *git.Client, adapt operator.AdaptFu
|
||||
return
|
||||
}
|
||||
|
||||
query, _, _, err := adapt(internalMonitor, treeDesired, treeCurrent)
|
||||
query, _, _, _, _, err := adapt(internalMonitor, treeDesired, treeCurrent)
|
||||
if err != nil {
|
||||
internalMonitor.Error(err)
|
||||
return
|
||||
|
Reference in New Issue
Block a user