Files
zitadel/operator/database/kinds/databases/managed/adapt_test.go

256 lines
8.9 KiB
Go
Raw Normal View History

feat(operator): zitadel and database operator (#1208) * feat(operator): add base for zitadel operator * fix(operator): changed pipeline to release operator * fix(operator): fmt with only one parameter * fix(operator): corrected workflow job name * fix(zitadelctl): added restore and backuplist command * fix(zitadelctl): scale for restore * chore(container): use scratch for deploy container * fix(zitadelctl): limit image to scratch * fix(migration): added migration scripts for newer version * fix(operator): changed handling of kubeconfig in operator logic * fix(operator): changed handling of secrets in operator logic * fix(operator): use new version of zitadel * fix(operator): added path for migrations * fix(operator): delete doublets of migration scripts * fix(operator): delete subpaths and integrate logic into init container * fix(operator): corrected path in dockerfile for local migrations * fix(operator): added migrations for cockroachdb-secure * fix(operator): delete logic for ambassador module * fix(operator): added read and write secret commands * fix(operator): correct and align operator pipeline with zitadel pipeline * fix(operator): correct yaml error in operator pipeline * fix(operator): correct action name in operator pipeline * fix(operator): correct case-sensitive filename in operator pipeline * fix(operator): upload artifacts from buildx output * fix(operator): corrected attribute spelling error * fix(operator): combined jobs for operator binary and image * fix(operator): added missing comma in operator pipeline * fix(operator): added codecov for operator image * fix(operator): added codecov for operator image * fix(testing): code changes for testing and several unit-tests (#1009) * fix(operator): usage of interface of kubernetes client for testing and several unit-tests * fix(operator): several unit-tests * fix(operator): several unit-tests * fix(operator): changed order for the operator logic * fix(operator): added version of zitadelctl from semantic release * fix(operator): corrected function call with version of zitadelctl * fix(operator): corrected function call with version of zitadelctl * fix(operator): add check output to operator release pipeline * fix(operator): set --short length everywhere to 12 * fix(operator): zitadel setup in job instead of exec with several unit tests * fix(operator): fixes to combine newest zitadel and testing branch * fix(operator): corrected path in Dockerfile * fix(operator): fixed unit-test that was ignored during changes * fix(operator): fixed unit-test that was ignored during changes * fix(operator): corrected Dockerfile to correctly use env variable * fix(operator): quickfix takeoff deployment * fix(operator): corrected the clusterrolename in the applied artifacts * fix: update secure migrations * fix(operator): migrations (#1057) * fix(operator): copied migrations from orbos repository * fix(operator): newest migrations * chore: use cockroach-secure * fix: rename migration * fix: remove insecure cockroach migrations Co-authored-by: Stefan Benz <stefan@caos.ch> * fix: finalize labels * fix(operator): cli logging concurrent and fixe deployment of operator during restore * fix: finalize labels and cli commands * fix: restore * chore: cockroachdb is always secure * chore: use orbos consistent-labels latest commit * test: make tests compatible with new labels * fix: default to sa token for start command * fix: use cockroachdb v12.02 * fix: don't delete flyway user * test: fix migration test * fix: use correct table qualifiers * fix: don't alter sequence ownership * fix: upgrade flyway * fix: change ownership of all dbs and tables to admin user * fix: change defaultdb user * fix: treat clientid status codes >= 400 as errors * fix: reconcile specified ZITADEL version, not binary version * fix: add ca-certs * fix: use latest orbos code * fix: use orbos with fixed race condition * fix: use latest ORBOS code * fix: use latest ORBOS code * fix: make migration and scaling around restoring work * fix(operator): move zitadel operator * chore(migrations): include owner change migration * feat(db): add code base for database operator * fix(db): change used image registry for database operator * fix(db): generated mock * fix(db): add accidentally ignored file * fix(db): add cockroachdb backup image to pipeline * fix(db): correct pipeline and image versions * fix(db): correct version of used orbos * fix(db): correct database import * fix(db): go mod tidy * fix(db): use new version for orbos * fix(migrations): include migrations into zitadelctl binary (#1211) * fix(db): use statik to integrate migrations into binary * fix(migrations): corrections unit tests and pipeline for integrated migrations into zitadelctl binary * fix(migrations): correction in dockerfile for pipeline build * fix(migrations): correction in dockerfile for pipeline build * fix(migrations): dockerfile changes for cache optimization * fix(database): correct used part-of label in database operator * fix(database): correct used selectable label in zitadel operator * fix(operator): correct lables for user secrets in zitadel operator * fix(operator): correct lables for service test in zitadel operator * fix: don't enable database features for user operations (#1227) * fix: don't enable database features for user operations * fix: omit database feature for connection info adapter * fix: use latest orbos version * fix: update ORBOS (#1240) Co-authored-by: Florian Forster <florian@caos.ch> Co-authored-by: Elio Bischof <eliobischof@gmail.com>
2021-02-05 19:28:12 +01:00
package managed
import (
"gopkg.in/yaml.v3"
"testing"
"time"
"github.com/caos/orbos/mntr"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
coremock "github.com/caos/zitadel/operator/database/kinds/databases/core/mock"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
func getDesiredTree(t *testing.T, masterkey string, desired interface{}) *tree.Tree {
secret.Masterkey = masterkey
desiredTree := &tree.Tree{}
data, err := yaml.Marshal(desired)
assert.NoError(t, err)
assert.NoError(t, yaml.Unmarshal(data, desiredTree))
return desiredTree
}
func TestManaged_Adapt1(t *testing.T) {
monitor := mntr.Monitor{}
nodeLabels := map[string]string{
"app.kubernetes.io/component": "database",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": "cockroachdb.node",
"app.kubernetes.io/part-of": "testProd",
"orbos.ch/selectable": "yes",
}
cockroachLabels := map[string]string{
"app.kubernetes.io/component": "database",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": "cockroachdb-budget",
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "testKind",
}
cockroachSelectorLabels := map[string]string{
"app.kubernetes.io/component": "database",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": "cockroachdb",
"app.kubernetes.io/part-of": "testProd",
"orbos.ch/selectable": "yes",
}
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "testKind", "v0"), "database")
namespace := "testNs"
timestamp := "testTs"
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{}
version := "testVersion"
features := []string{"database"}
masterkey := "testMk"
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
dbCurrent := coremock.NewMockDatabaseCurrent(gomock.NewController(t))
queried := map[string]interface{}{}
desired := getDesiredTree(t, masterkey, &DesiredV0{
Common: &tree.Common{
Kind: "databases.caos.ch/CockroachDB",
Version: "v0",
},
Spec: Spec{
Verbose: false,
ReplicaCount: 1,
StorageCapacity: "368Gi",
StorageClass: "testSC",
NodeSelector: map[string]string{},
ClusterDns: "testDns",
},
})
unav := intstr.FromInt(1)
k8sClient.EXPECT().ApplyPodDisruptionBudget(&policy.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: "cockroachdb-budget",
Namespace: namespace,
Labels: cockroachLabels,
},
Spec: policy.PodDisruptionBudgetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: cockroachSelectorLabels,
},
MaxUnavailable: &unav,
},
})
secretList := &corev1.SecretList{
Items: []corev1.Secret{},
}
k8sClient.EXPECT().ApplyService(gomock.Any()).Times(3)
k8sClient.EXPECT().ApplyServiceAccount(gomock.Any()).Times(1)
k8sClient.EXPECT().ApplyRole(gomock.Any()).Times(1)
k8sClient.EXPECT().ApplyClusterRole(gomock.Any()).Times(1)
k8sClient.EXPECT().ApplyRoleBinding(gomock.Any()).Times(1)
k8sClient.EXPECT().ApplyClusterRoleBinding(gomock.Any()).Times(1)
//statefulset
k8sClient.EXPECT().ApplyStatefulSet(gomock.Any(), gomock.Any()).Times(1)
//running for setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, false, time.Duration(60))
//not ready for setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(1))
//ready after setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60))
//client
k8sClient.EXPECT().ListSecrets(namespace, nodeLabels).Times(1).Return(secretList, nil)
dbCurrent.EXPECT().GetCertificate().Times(1).Return(nil)
dbCurrent.EXPECT().GetCertificateKey().Times(1).Return(nil)
k8sClient.EXPECT().ApplySecret(gomock.Any()).Times(1)
//node
k8sClient.EXPECT().ListSecrets(namespace, nodeLabels).Times(1).Return(secretList, nil)
dbCurrent.EXPECT().GetCertificate().Times(1).Return(nil)
dbCurrent.EXPECT().GetCertificateKey().Times(1).Return(nil)
dbCurrent.EXPECT().SetCertificate(gomock.Any()).Times(1)
dbCurrent.EXPECT().SetCertificateKey(gomock.Any()).Times(1)
k8sClient.EXPECT().ApplySecret(gomock.Any()).Times(1)
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
assert.NoError(t, err)
ensure, err := query(k8sClient, queried)
assert.NoError(t, err)
assert.NotNil(t, ensure)
assert.NoError(t, ensure(k8sClient))
}
func TestManaged_Adapt2(t *testing.T) {
monitor := mntr.Monitor{}
namespace := "testNs"
timestamp := "testTs"
nodeLabels := map[string]string{
"app.kubernetes.io/component": "database2",
"app.kubernetes.io/managed-by": "testOp2",
"app.kubernetes.io/name": "cockroachdb.node",
"app.kubernetes.io/part-of": "testProd2",
"orbos.ch/selectable": "yes",
}
cockroachLabels := map[string]string{
"app.kubernetes.io/component": "database2",
"app.kubernetes.io/managed-by": "testOp2",
"app.kubernetes.io/name": "cockroachdb-budget",
"app.kubernetes.io/part-of": "testProd2",
"app.kubernetes.io/version": "testVersion2",
"caos.ch/apiversion": "v1",
"caos.ch/kind": "testKind2",
}
cockroachSelectorLabels := map[string]string{
"app.kubernetes.io/component": "database2",
"app.kubernetes.io/managed-by": "testOp2",
"app.kubernetes.io/name": "cockroachdb",
"app.kubernetes.io/part-of": "testProd2",
"orbos.ch/selectable": "yes",
}
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd2", "testOp2", "testVersion2"), "testKind2", "v1"), "database2")
nodeselector := map[string]string{"test2": "test2"}
var tolerations []corev1.Toleration
version := "testVersion2"
features := []string{"database"}
masterkey := "testMk2"
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
dbCurrent := coremock.NewMockDatabaseCurrent(gomock.NewController(t))
queried := map[string]interface{}{}
desired := getDesiredTree(t, masterkey, &DesiredV0{
Common: &tree.Common{
Kind: "databases.caos.ch/CockroachDB",
Version: "v0",
},
Spec: Spec{
Verbose: false,
ReplicaCount: 1,
StorageCapacity: "368Gi",
StorageClass: "testSC",
NodeSelector: map[string]string{},
ClusterDns: "testDns",
},
})
unav := intstr.FromInt(1)
k8sClient.EXPECT().ApplyPodDisruptionBudget(&policy.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: "cockroachdb-budget",
Namespace: namespace,
Labels: cockroachLabels,
},
Spec: policy.PodDisruptionBudgetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: cockroachSelectorLabels,
},
MaxUnavailable: &unav,
},
})
secretList := &corev1.SecretList{
Items: []corev1.Secret{},
}
k8sClient.EXPECT().ApplyService(gomock.Any()).Times(3)
k8sClient.EXPECT().ApplyServiceAccount(gomock.Any()).Times(1)
k8sClient.EXPECT().ApplyRole(gomock.Any()).Times(1)
k8sClient.EXPECT().ApplyClusterRole(gomock.Any()).Times(1)
k8sClient.EXPECT().ApplyRoleBinding(gomock.Any()).Times(1)
k8sClient.EXPECT().ApplyClusterRoleBinding(gomock.Any()).Times(1)
//statefulset
k8sClient.EXPECT().ApplyStatefulSet(gomock.Any(), gomock.Any()).Times(1)
//running for setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, false, time.Duration(60))
//not ready for setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(1))
//ready after setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60))
//client
k8sClient.EXPECT().ListSecrets(namespace, nodeLabels).Times(1).Return(secretList, nil)
dbCurrent.EXPECT().GetCertificate().Times(1).Return(nil)
dbCurrent.EXPECT().GetCertificateKey().Times(1).Return(nil)
k8sClient.EXPECT().ApplySecret(gomock.Any()).Times(1)
//node
k8sClient.EXPECT().ListSecrets(namespace, nodeLabels).Times(1).Return(secretList, nil)
dbCurrent.EXPECT().GetCertificate().Times(1).Return(nil)
dbCurrent.EXPECT().GetCertificateKey().Times(1).Return(nil)
dbCurrent.EXPECT().SetCertificate(gomock.Any()).Times(1)
dbCurrent.EXPECT().SetCertificateKey(gomock.Any()).Times(1)
k8sClient.EXPECT().ApplySecret(gomock.Any()).Times(1)
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
assert.NoError(t, err)
ensure, err := query(k8sClient, queried)
assert.NoError(t, err)
assert.NotNil(t, ensure)
assert.NoError(t, ensure(k8sClient))
}