mirror of
https://github.com/zitadel/zitadel.git
synced 2025-12-05 23:02:07 +00:00
fix: operator picks (#1463)
* feat(crd): add crd mode for operators (#1329) * feat(operator): add base for zitadel operator * fix(operator): changed pipeline to release operator * fix(operator): fmt with only one parameter * fix(operator): corrected workflow job name * fix(zitadelctl): added restore and backuplist command * fix(zitadelctl): scale for restore * chore(container): use scratch for deploy container * fix(zitadelctl): limit image to scratch * fix(migration): added migration scripts for newer version * fix(operator): changed handling of kubeconfig in operator logic * fix(operator): changed handling of secrets in operator logic * fix(operator): use new version of zitadel * fix(operator): added path for migrations * fix(operator): delete doublets of migration scripts * fix(operator): delete subpaths and integrate logic into init container * fix(operator): corrected path in dockerfile for local migrations * fix(operator): added migrations for cockroachdb-secure * fix(operator): delete logic for ambassador module * fix(operator): added read and write secret commands * fix(operator): correct and align operator pipeline with zitadel pipeline * fix(operator): correct yaml error in operator pipeline * fix(operator): correct action name in operator pipeline * fix(operator): correct case-sensitive filename in operator pipeline * fix(operator): upload artifacts from buildx output * fix(operator): corrected attribute spelling error * fix(operator): combined jobs for operator binary and image * fix(operator): added missing comma in operator pipeline * fix(operator): added codecov for operator image * fix(operator): added codecov for operator image * fix(testing): code changes for testing and several unit-tests (#1009) * fix(operator): usage of interface of kubernetes client for testing and several unit-tests * fix(operator): several unit-tests * fix(operator): several unit-tests * fix(operator): changed order for the operator logic * fix(operator): added version of zitadelctl from semantic release * fix(operator): corrected function call with version of zitadelctl * fix(operator): corrected function call with version of zitadelctl * fix(operator): add check output to operator release pipeline * fix(operator): set --short length everywhere to 12 * fix(operator): zitadel setup in job instead of exec with several unit tests * fix(operator): fixes to combine newest zitadel and testing branch * fix(operator): corrected path in Dockerfile * fix(operator): fixed unit-test that was ignored during changes * fix(operator): fixed unit-test that was ignored during changes * fix(operator): corrected Dockerfile to correctly use env variable * fix(operator): quickfix takeoff deployment * fix(operator): corrected the clusterrolename in the applied artifacts * fix: update secure migrations * fix(operator): migrations (#1057) * fix(operator): copied migrations from orbos repository * fix(operator): newest migrations * chore: use cockroach-secure * fix: rename migration * fix: remove insecure cockroach migrations Co-authored-by: Stefan Benz <stefan@caos.ch> * fix: finalize labels * fix(operator): cli logging concurrent and fixe deployment of operator during restore * fix: finalize labels and cli commands * fix: restore * chore: cockroachdb is always secure * chore: use orbos consistent-labels latest commit * test: make tests compatible with new labels * fix: default to sa token for start command * fix: use cockroachdb v12.02 * fix: don't delete flyway user * test: fix migration test * fix: use correct table qualifiers * fix: don't alter sequence ownership * fix: upgrade flyway * fix: change ownership of all dbs and tables to admin user * fix: change defaultdb user * fix: treat clientid status codes >= 400 as errors * fix: reconcile specified ZITADEL version, not binary version * fix: add ca-certs * fix: use latest orbos code * fix: use orbos with fixed race condition * fix: use latest ORBOS code * fix: use latest ORBOS code * fix: make migration and scaling around restoring work * fix(operator): move zitadel operator * chore(migrations): include owner change migration * feat(db): add code base for database operator * fix(db): change used image registry for database operator * fix(db): generated mock * fix(db): add accidentally ignored file * fix(db): add cockroachdb backup image to pipeline * fix(db): correct pipeline and image versions * fix(db): correct version of used orbos * fix(db): correct database import * fix(db): go mod tidy * fix(db): use new version for orbos * fix(migrations): include migrations into zitadelctl binary (#1211) * fix(db): use statik to integrate migrations into binary * fix(migrations): corrections unit tests and pipeline for integrated migrations into zitadelctl binary * fix(migrations): correction in dockerfile for pipeline build * fix(migrations): correction in dockerfile for pipeline build * fix(migrations): dockerfile changes for cache optimization * fix(database): correct used part-of label in database operator * fix(database): correct used selectable label in zitadel operator * fix(operator): correct lables for user secrets in zitadel operator * fix(operator): correct lables for service test in zitadel operator * fix: don't enable database features for user operations (#1227) * fix: don't enable database features for user operations * fix: omit database feature for connection info adapter * fix: use latest orbos version * fix(crd): corrected logic to get database connection and other info * fix(crd): corrected yaml tags and start for zitadel operator * fix(crd): move some dependencies and use consistent structure * fix(crd): corrected unit-tests * fix(crd): corrected main files for debug starts * chore(pipeline): use correct version for zitadelctl build * fix(crd): correct calculating of current db state for zitadel operator * fix(crd): use binary version for deployment of crd mode operators * fix(crd): add gitops attribute for reconciling * fix(crd): corrected crd with newest version * fix(migration): collect cleanup functions and only use them if all jobs are successful * fix(zitadelctl): import gcp auth to connect to gke cluster * feat: Add read and writesecret options for crd mode (#1435) * fix: don't require orbconfig for crd mode * test: pass * fix(zitadelctl): import gcp auth to connect to gke cluster * feat: add read and writesecret option for crd mode * test: fix * fix: make all crd secrets writable * fix: use in-cluster configs for in-cluster operators * chore: remove unnecessary debug files Co-authored-by: Stefan Benz <stefan@caos.ch> * fix: Crdoperatormerge review (#1385) * fix: don't require orbconfig for crd mode * test: pass * fix(zitadelctl): import gcp auth to connect to gke cluster * fix: ensure caos-system namespace * fix: apply orbconfig at takeoff * docs: improve help for creating an orbconfig * docs: describe orbconfig properties * docs: add --gitops to help message example * fix(pipeline): correct upload of artifacts in dev releases * test: pass Co-authored-by: Stefan Benz <stefan@caos.ch> * fix(test): corrected falsely merged tests * chore: update orbos library * fix: only handle exactly named and namespaced crd resource * fix: print errors, check correct crd namespace * fix: validate bucket secret * chore: compile * fix(operator): corrected secret handling when unused secrets are not defined * fix(operator): corrected handling of jobs * fix: dont print logs when readsecret path is provided * fix(operator): corrected handling of jobs and sort for mounted volumes * fix(operator): sort for volumes * fix(operator): change orboos import to newest release Co-authored-by: Florian Forster <florian@caos.ch> Co-authored-by: Elio Bischof <eliobischof@gmail.com> (cherry picked from commitfa9bd5a8e7) * fix(operator): Standard timeout handling (#1458) * fix: always use standard time.Duration * fix: give backup and restore more time * fix: give backup and restore jobs more time (cherry picked from commit7468b7d1e8) * fix go mod Co-authored-by: Stefan Benz <46600784+stebenz@users.noreply.github.com> Co-authored-by: Elio Bischof <eliobischof@gmail.com>
This commit is contained in:
@@ -35,6 +35,8 @@ func GetQueryAndDestroyFuncs(
|
||||
query operator.QueryFunc,
|
||||
destroy operator.DestroyFunc,
|
||||
secrets map[string]*secret.Secret,
|
||||
existing map[string]*secret.Existing,
|
||||
migrate bool,
|
||||
err error,
|
||||
) {
|
||||
componentLabels := labels.MustForComponent(apiLabels, component)
|
||||
@@ -46,7 +48,7 @@ func GetQueryAndDestroyFuncs(
|
||||
case "databases.caos.ch/ProvidedDatabase":
|
||||
return provided.AdaptFunc()(internalMonitor, desiredTree, currentTree)
|
||||
default:
|
||||
return nil, nil, nil, errors.Errorf("unknown database kind %s", desiredTree.Common.Kind)
|
||||
return nil, nil, nil, nil, false, errors.Errorf("unknown database kind %s", desiredTree.Common.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
package managed
|
||||
|
||||
import (
|
||||
"github.com/caos/zitadel/operator"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/caos/zitadel/operator"
|
||||
|
||||
"github.com/caos/orbos/pkg/labels"
|
||||
|
||||
"github.com/caos/orbos/pkg/secret"
|
||||
@@ -51,6 +52,8 @@ func AdaptFunc(
|
||||
operator.QueryFunc,
|
||||
operator.DestroyFunc,
|
||||
map[string]*secret.Secret,
|
||||
map[string]*secret.Existing,
|
||||
bool,
|
||||
error,
|
||||
) {
|
||||
|
||||
@@ -62,14 +65,21 @@ func AdaptFunc(
|
||||
operator.QueryFunc,
|
||||
operator.DestroyFunc,
|
||||
map[string]*secret.Secret,
|
||||
map[string]*secret.Existing,
|
||||
bool,
|
||||
error,
|
||||
) {
|
||||
internalMonitor := monitor.WithField("kind", "cockroachdb")
|
||||
allSecrets := map[string]*secret.Secret{}
|
||||
|
||||
var (
|
||||
internalMonitor = monitor.WithField("kind", "cockroachdb")
|
||||
allSecrets = make(map[string]*secret.Secret)
|
||||
allExisting = make(map[string]*secret.Existing)
|
||||
migrate bool
|
||||
)
|
||||
|
||||
desiredKind, err := parseDesiredV0(desired)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "parsing desired state failed")
|
||||
return nil, nil, nil, nil, false, errors.Wrap(err, "parsing desired state failed")
|
||||
}
|
||||
desired.Parsed = desiredKind
|
||||
|
||||
@@ -92,15 +102,15 @@ func AdaptFunc(
|
||||
|
||||
queryCert, destroyCert, addUser, deleteUser, listUsers, err := certificate.AdaptFunc(internalMonitor, namespace, componentLabels, desiredKind.Spec.ClusterDns, isFeatureDatabase)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, false, err
|
||||
}
|
||||
addRoot, err := addUser("root")
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, false, err
|
||||
}
|
||||
destroyRoot, err := deleteUser("root")
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, false, err
|
||||
}
|
||||
|
||||
queryRBAC, destroyRBAC, err := rbac.AdaptFunc(internalMonitor, namespace, labels.MustForName(componentLabels, serviceAccountName))
|
||||
@@ -126,7 +136,7 @@ func AdaptFunc(
|
||||
desiredKind.Spec.Resources,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, false, err
|
||||
}
|
||||
|
||||
queryS, destroyS, err := services.AdaptFunc(
|
||||
@@ -147,12 +157,12 @@ func AdaptFunc(
|
||||
|
||||
queryPDB, err := pdb.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, pdbName), cockroachSelector, "1")
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, false, err
|
||||
}
|
||||
|
||||
destroyPDB, err := pdb.AdaptFuncToDestroy(namespace, pdbName)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, false, err
|
||||
}
|
||||
|
||||
currentDB := &Current{
|
||||
@@ -203,7 +213,7 @@ func AdaptFunc(
|
||||
for backupName, desiredBackup := range desiredKind.Spec.Backups {
|
||||
currentBackup := &tree.Tree{}
|
||||
if timestamp == "" || !oneBackup || (timestamp != "" && strings.HasPrefix(timestamp, backupName)) {
|
||||
queryB, destroyB, secrets, err := backups.GetQueryAndDestroyFuncs(
|
||||
queryB, destroyB, secrets, existing, migrateB, err := backups.GetQueryAndDestroyFuncs(
|
||||
internalMonitor,
|
||||
desiredBackup,
|
||||
currentBackup,
|
||||
@@ -218,10 +228,12 @@ func AdaptFunc(
|
||||
features,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, false, err
|
||||
}
|
||||
|
||||
secret.AppendSecrets(backupName, allSecrets, secrets)
|
||||
migrate = migrate || migrateB
|
||||
|
||||
secret.AppendSecrets(backupName, allSecrets, secrets, allExisting, existing)
|
||||
destroyers = append(destroyers, destroyB)
|
||||
queriers = append(queriers, queryB)
|
||||
}
|
||||
@@ -251,6 +263,8 @@ func AdaptFunc(
|
||||
},
|
||||
operator.DestroyersToDestroyFunc(internalMonitor, destroyers),
|
||||
allSecrets,
|
||||
allExisting,
|
||||
migrate,
|
||||
nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package managed
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/caos/orbos/mntr"
|
||||
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
|
||||
"github.com/caos/orbos/pkg/labels"
|
||||
@@ -13,8 +16,6 @@ import (
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func getTreeWithDBAndBackup(t *testing.T, masterkey string, saJson string, backupName string) *tree.Tree {
|
||||
@@ -81,9 +82,9 @@ func TestManaged_AdaptBucketBackup(t *testing.T) {
|
||||
|
||||
features := []string{backup.Normal}
|
||||
bucket.SetBackup(k8sClient, namespace, labels, saJson)
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60))
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second)
|
||||
|
||||
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
||||
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
databases := []string{"test1", "test2"}
|
||||
@@ -119,11 +120,11 @@ func TestManaged_AdaptBucketInstantBackup(t *testing.T) {
|
||||
|
||||
features := []string{backup.Instant}
|
||||
bucket.SetInstantBackup(k8sClient, namespace, backupName, labels, saJson)
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60))
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second)
|
||||
|
||||
desired := getTreeWithDBAndBackup(t, masterkey, saJson, backupName)
|
||||
|
||||
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
||||
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
databases := []string{"test1", "test2"}
|
||||
@@ -159,12 +160,12 @@ func TestManaged_AdaptBucketCleanAndRestore(t *testing.T) {
|
||||
|
||||
features := []string{restore.Instant, clean.Instant}
|
||||
bucket.SetRestore(k8sClient, namespace, backupName, labels, saJson)
|
||||
bucket.SetClean(k8sClient, namespace, backupName)
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60)).Times(2)
|
||||
bucket.SetClean(k8sClient, namespace, backupName, labels, saJson)
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second).Times(2)
|
||||
|
||||
desired := getTreeWithDBAndBackup(t, masterkey, saJson, backupName)
|
||||
|
||||
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
||||
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
databases := []string{"test1", "test2"}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
package managed
|
||||
|
||||
import (
|
||||
"gopkg.in/yaml.v3"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/caos/orbos/mntr"
|
||||
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
|
||||
"github.com/caos/orbos/pkg/labels"
|
||||
@@ -114,11 +115,11 @@ func TestManaged_Adapt1(t *testing.T) {
|
||||
//statefulset
|
||||
k8sClient.EXPECT().ApplyStatefulSet(gomock.Any(), gomock.Any()).MinTimes(1).MaxTimes(1)
|
||||
//running for setup
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, false, time.Duration(60)).MinTimes(1).MaxTimes(1)
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, false, 60*time.Second).MinTimes(1).MaxTimes(1)
|
||||
//not ready for setup
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(1)).MinTimes(1).MaxTimes(1)
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 1*time.Second).MinTimes(1).MaxTimes(1)
|
||||
//ready after setup
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60)).MinTimes(1).MaxTimes(1)
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second).MinTimes(1).MaxTimes(1)
|
||||
//client
|
||||
k8sClient.EXPECT().ListSecrets(namespace, nodeLabels).MinTimes(1).MaxTimes(1).Return(secretList, nil)
|
||||
dbCurrent.EXPECT().GetCertificate().MinTimes(1).MaxTimes(1).Return(nil)
|
||||
@@ -132,7 +133,7 @@ func TestManaged_Adapt1(t *testing.T) {
|
||||
dbCurrent.EXPECT().SetCertificateKey(gomock.Any()).MinTimes(1).MaxTimes(1)
|
||||
k8sClient.EXPECT().ApplySecret(gomock.Any()).MinTimes(1).MaxTimes(1)
|
||||
|
||||
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
||||
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
ensure, err := query(k8sClient, queried)
|
||||
@@ -226,11 +227,11 @@ func TestManaged_Adapt2(t *testing.T) {
|
||||
//statefulset
|
||||
k8sClient.EXPECT().ApplyStatefulSet(gomock.Any(), gomock.Any()).MinTimes(1).MaxTimes(1)
|
||||
//running for setup
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, false, time.Duration(60)).MinTimes(1).MaxTimes(1)
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, false, 60*time.Second).MinTimes(1).MaxTimes(1)
|
||||
//not ready for setup
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(1)).MinTimes(1).MaxTimes(1)
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 1*time.Second).MinTimes(1).MaxTimes(1)
|
||||
//ready after setup
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60)).MinTimes(1).MaxTimes(1)
|
||||
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second).MinTimes(1).MaxTimes(1)
|
||||
//client
|
||||
k8sClient.EXPECT().ListSecrets(namespace, nodeLabels).MinTimes(1).MaxTimes(1).Return(secretList, nil)
|
||||
dbCurrent.EXPECT().GetCertificate().MinTimes(1).MaxTimes(1).Return(nil)
|
||||
@@ -244,7 +245,7 @@ func TestManaged_Adapt2(t *testing.T) {
|
||||
dbCurrent.EXPECT().SetCertificateKey(gomock.Any()).MinTimes(1).MaxTimes(1)
|
||||
k8sClient.EXPECT().ApplySecret(gomock.Any()).MinTimes(1).MaxTimes(1)
|
||||
|
||||
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
||||
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
ensure, err := query(k8sClient, queried)
|
||||
|
||||
@@ -2,12 +2,14 @@ package statefulset
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/caos/orbos/pkg/labels"
|
||||
"github.com/caos/zitadel/operator"
|
||||
"github.com/caos/zitadel/operator/helpers"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/caos/orbos/mntr"
|
||||
"github.com/caos/orbos/pkg/kubernetes"
|
||||
@@ -216,7 +218,7 @@ func AdaptFunc(
|
||||
wrapedQuery, wrapedDestroy, err := resources.WrapFuncs(internalMonitor, query, destroy)
|
||||
checkDBRunning := func(k8sClient kubernetes.ClientInt) error {
|
||||
internalMonitor.Info("waiting for statefulset to be running")
|
||||
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, false, 60); err != nil {
|
||||
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, false, 60*time.Second); err != nil {
|
||||
internalMonitor.Error(errors.Wrap(err, "error while waiting for statefulset to be running"))
|
||||
return err
|
||||
}
|
||||
@@ -226,7 +228,7 @@ func AdaptFunc(
|
||||
|
||||
checkDBNotReady := func(k8sClient kubernetes.ClientInt) error {
|
||||
internalMonitor.Info("checking for statefulset to not be ready")
|
||||
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, true, 1); err != nil {
|
||||
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, true, 1*time.Second); err != nil {
|
||||
internalMonitor.Info("statefulset is not ready")
|
||||
return nil
|
||||
}
|
||||
@@ -253,7 +255,7 @@ func AdaptFunc(
|
||||
|
||||
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
|
||||
internalMonitor.Info("waiting for statefulset to be ready")
|
||||
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, true, 60); err != nil {
|
||||
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, true, 60*time.Second); err != nil {
|
||||
internalMonitor.Error(errors.Wrap(err, "error while waiting for statefulset to be ready"))
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -17,6 +17,8 @@ func AdaptFunc() func(
|
||||
operator.QueryFunc,
|
||||
operator.DestroyFunc,
|
||||
map[string]*secret.Secret,
|
||||
map[string]*secret.Existing,
|
||||
bool,
|
||||
error,
|
||||
) {
|
||||
return func(
|
||||
@@ -27,11 +29,13 @@ func AdaptFunc() func(
|
||||
operator.QueryFunc,
|
||||
operator.DestroyFunc,
|
||||
map[string]*secret.Secret,
|
||||
map[string]*secret.Existing,
|
||||
bool,
|
||||
error,
|
||||
) {
|
||||
desiredKind, err := parseDesiredV0(desired)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "parsing desired state failed")
|
||||
return nil, nil, nil, nil, false, errors.Wrap(err, "parsing desired state failed")
|
||||
}
|
||||
desired.Parsed = desiredKind
|
||||
|
||||
@@ -53,7 +57,9 @@ func AdaptFunc() func(
|
||||
}, func(k8sClient kubernetes.ClientInt) error {
|
||||
return nil
|
||||
},
|
||||
map[string]*secret.Secret{},
|
||||
make(map[string]*secret.Secret),
|
||||
make(map[string]*secret.Existing),
|
||||
false,
|
||||
nil
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user