mirror of
https://github.com/zitadel/zitadel.git
synced 2024-12-15 04:18:01 +00:00
bacfc3b099
* feat(crd): add crd mode for operators (#1329) * feat(operator): add base for zitadel operator * fix(operator): changed pipeline to release operator * fix(operator): fmt with only one parameter * fix(operator): corrected workflow job name * fix(zitadelctl): added restore and backuplist command * fix(zitadelctl): scale for restore * chore(container): use scratch for deploy container * fix(zitadelctl): limit image to scratch * fix(migration): added migration scripts for newer version * fix(operator): changed handling of kubeconfig in operator logic * fix(operator): changed handling of secrets in operator logic * fix(operator): use new version of zitadel * fix(operator): added path for migrations * fix(operator): delete doublets of migration scripts * fix(operator): delete subpaths and integrate logic into init container * fix(operator): corrected path in dockerfile for local migrations * fix(operator): added migrations for cockroachdb-secure * fix(operator): delete logic for ambassador module * fix(operator): added read and write secret commands * fix(operator): correct and align operator pipeline with zitadel pipeline * fix(operator): correct yaml error in operator pipeline * fix(operator): correct action name in operator pipeline * fix(operator): correct case-sensitive filename in operator pipeline * fix(operator): upload artifacts from buildx output * fix(operator): corrected attribute spelling error * fix(operator): combined jobs for operator binary and image * fix(operator): added missing comma in operator pipeline * fix(operator): added codecov for operator image * fix(operator): added codecov for operator image * fix(testing): code changes for testing and several unit-tests (#1009) * fix(operator): usage of interface of kubernetes client for testing and several unit-tests * fix(operator): several unit-tests * fix(operator): several unit-tests * fix(operator): changed order for the operator logic * fix(operator): added version of zitadelctl from semantic release * fix(operator): corrected function call with version of zitadelctl * fix(operator): corrected function call with version of zitadelctl * fix(operator): add check output to operator release pipeline * fix(operator): set --short length everywhere to 12 * fix(operator): zitadel setup in job instead of exec with several unit tests * fix(operator): fixes to combine newest zitadel and testing branch * fix(operator): corrected path in Dockerfile * fix(operator): fixed unit-test that was ignored during changes * fix(operator): fixed unit-test that was ignored during changes * fix(operator): corrected Dockerfile to correctly use env variable * fix(operator): quickfix takeoff deployment * fix(operator): corrected the clusterrolename in the applied artifacts * fix: update secure migrations * fix(operator): migrations (#1057) * fix(operator): copied migrations from orbos repository * fix(operator): newest migrations * chore: use cockroach-secure * fix: rename migration * fix: remove insecure cockroach migrations Co-authored-by: Stefan Benz <stefan@caos.ch> * fix: finalize labels * fix(operator): cli logging concurrent and fixe deployment of operator during restore * fix: finalize labels and cli commands * fix: restore * chore: cockroachdb is always secure * chore: use orbos consistent-labels latest commit * test: make tests compatible with new labels * fix: default to sa token for start command * fix: use cockroachdb v12.02 * fix: don't delete flyway user * test: fix migration test * fix: use correct table qualifiers * fix: don't alter sequence ownership * fix: upgrade flyway * fix: change ownership of all dbs and tables to admin user * fix: change defaultdb user * fix: treat clientid status codes >= 400 as errors * fix: reconcile specified ZITADEL version, not binary version * fix: add ca-certs * fix: use latest orbos code * fix: use orbos with fixed race condition * fix: use latest ORBOS code * fix: use latest ORBOS code * fix: make migration and scaling around restoring work * fix(operator): move zitadel operator * chore(migrations): include owner change migration * feat(db): add code base for database operator * fix(db): change used image registry for database operator * fix(db): generated mock * fix(db): add accidentally ignored file * fix(db): add cockroachdb backup image to pipeline * fix(db): correct pipeline and image versions * fix(db): correct version of used orbos * fix(db): correct database import * fix(db): go mod tidy * fix(db): use new version for orbos * fix(migrations): include migrations into zitadelctl binary (#1211) * fix(db): use statik to integrate migrations into binary * fix(migrations): corrections unit tests and pipeline for integrated migrations into zitadelctl binary * fix(migrations): correction in dockerfile for pipeline build * fix(migrations): correction in dockerfile for pipeline build * fix(migrations): dockerfile changes for cache optimization * fix(database): correct used part-of label in database operator * fix(database): correct used selectable label in zitadel operator * fix(operator): correct lables for user secrets in zitadel operator * fix(operator): correct lables for service test in zitadel operator * fix: don't enable database features for user operations (#1227) * fix: don't enable database features for user operations * fix: omit database feature for connection info adapter * fix: use latest orbos version * fix(crd): corrected logic to get database connection and other info * fix(crd): corrected yaml tags and start for zitadel operator * fix(crd): move some dependencies and use consistent structure * fix(crd): corrected unit-tests * fix(crd): corrected main files for debug starts * chore(pipeline): use correct version for zitadelctl build * fix(crd): correct calculating of current db state for zitadel operator * fix(crd): use binary version for deployment of crd mode operators * fix(crd): add gitops attribute for reconciling * fix(crd): corrected crd with newest version * fix(migration): collect cleanup functions and only use them if all jobs are successful * fix(zitadelctl): import gcp auth to connect to gke cluster * feat: Add read and writesecret options for crd mode (#1435) * fix: don't require orbconfig for crd mode * test: pass * fix(zitadelctl): import gcp auth to connect to gke cluster * feat: add read and writesecret option for crd mode * test: fix * fix: make all crd secrets writable * fix: use in-cluster configs for in-cluster operators * chore: remove unnecessary debug files Co-authored-by: Stefan Benz <stefan@caos.ch> * fix: Crdoperatormerge review (#1385) * fix: don't require orbconfig for crd mode * test: pass * fix(zitadelctl): import gcp auth to connect to gke cluster * fix: ensure caos-system namespace * fix: apply orbconfig at takeoff * docs: improve help for creating an orbconfig * docs: describe orbconfig properties * docs: add --gitops to help message example * fix(pipeline): correct upload of artifacts in dev releases * test: pass Co-authored-by: Stefan Benz <stefan@caos.ch> * fix(test): corrected falsely merged tests * chore: update orbos library * fix: only handle exactly named and namespaced crd resource * fix: print errors, check correct crd namespace * fix: validate bucket secret * chore: compile * fix(operator): corrected secret handling when unused secrets are not defined * fix(operator): corrected handling of jobs * fix: dont print logs when readsecret path is provided * fix(operator): corrected handling of jobs and sort for mounted volumes * fix(operator): sort for volumes * fix(operator): change orboos import to newest release Co-authored-by: Florian Forster <florian@caos.ch> Co-authored-by: Elio Bischof <eliobischof@gmail.com> (cherry picked from commitfa9bd5a8e7
) * fix(operator): Standard timeout handling (#1458) * fix: always use standard time.Duration * fix: give backup and restore more time * fix: give backup and restore jobs more time (cherry picked from commit7468b7d1e8
) * fix go mod Co-authored-by: Stefan Benz <46600784+stebenz@users.noreply.github.com> Co-authored-by: Elio Bischof <eliobischof@gmail.com>
179 lines
6.3 KiB
Go
179 lines
6.3 KiB
Go
package managed
|
|
|
|
import (
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/caos/orbos/mntr"
|
|
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
|
|
"github.com/caos/orbos/pkg/labels"
|
|
"github.com/caos/orbos/pkg/secret"
|
|
"github.com/caos/orbos/pkg/tree"
|
|
"github.com/caos/zitadel/operator/database/kinds/backups/bucket"
|
|
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/backup"
|
|
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/clean"
|
|
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/restore"
|
|
"github.com/golang/mock/gomock"
|
|
"github.com/stretchr/testify/assert"
|
|
corev1 "k8s.io/api/core/v1"
|
|
)
|
|
|
|
func getTreeWithDBAndBackup(t *testing.T, masterkey string, saJson string, backupName string) *tree.Tree {
|
|
|
|
bucketDesired := getDesiredTree(t, masterkey, &bucket.DesiredV0{
|
|
Common: &tree.Common{
|
|
Kind: "databases.caos.ch/BucketBackup",
|
|
Version: "v0",
|
|
},
|
|
Spec: &bucket.Spec{
|
|
Verbose: true,
|
|
Cron: "testCron",
|
|
Bucket: "testBucket",
|
|
ServiceAccountJSON: &secret.Secret{
|
|
Value: saJson,
|
|
},
|
|
},
|
|
})
|
|
bucketDesiredKind, err := bucket.ParseDesiredV0(bucketDesired)
|
|
assert.NoError(t, err)
|
|
bucketDesired.Parsed = bucketDesiredKind
|
|
|
|
return getDesiredTree(t, masterkey, &DesiredV0{
|
|
Common: &tree.Common{
|
|
Kind: "databases.caos.ch/CockroachDB",
|
|
Version: "v0",
|
|
},
|
|
Spec: Spec{
|
|
Verbose: false,
|
|
ReplicaCount: 1,
|
|
StorageCapacity: "368Gi",
|
|
StorageClass: "testSC",
|
|
NodeSelector: map[string]string{},
|
|
ClusterDns: "testDns",
|
|
Backups: map[string]*tree.Tree{backupName: bucketDesired},
|
|
},
|
|
})
|
|
}
|
|
|
|
func TestManaged_AdaptBucketBackup(t *testing.T) {
|
|
monitor := mntr.Monitor{}
|
|
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "testKind", "v0"), "database")
|
|
|
|
labels := map[string]string{
|
|
"app.kubernetes.io/component": "backup",
|
|
"app.kubernetes.io/managed-by": "testOp",
|
|
"app.kubernetes.io/name": "backup-serviceaccountjson",
|
|
"app.kubernetes.io/part-of": "testProd",
|
|
"app.kubernetes.io/version": "testVersion",
|
|
"caos.ch/apiversion": "v0",
|
|
"caos.ch/kind": "BucketBackup",
|
|
}
|
|
namespace := "testNs"
|
|
timestamp := "testTs"
|
|
nodeselector := map[string]string{"test": "test"}
|
|
tolerations := []corev1.Toleration{}
|
|
version := "testVersion"
|
|
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
|
|
backupName := "testBucket"
|
|
saJson := "testSA"
|
|
masterkey := "testMk"
|
|
|
|
desired := getTreeWithDBAndBackup(t, masterkey, saJson, backupName)
|
|
|
|
features := []string{backup.Normal}
|
|
bucket.SetBackup(k8sClient, namespace, labels, saJson)
|
|
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second)
|
|
|
|
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
|
assert.NoError(t, err)
|
|
|
|
databases := []string{"test1", "test2"}
|
|
queried := bucket.SetQueriedForDatabases(databases)
|
|
ensure, err := query(k8sClient, queried)
|
|
assert.NoError(t, err)
|
|
assert.NotNil(t, ensure)
|
|
|
|
assert.NoError(t, ensure(k8sClient))
|
|
}
|
|
|
|
func TestManaged_AdaptBucketInstantBackup(t *testing.T) {
|
|
monitor := mntr.Monitor{}
|
|
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "testKind", "v0"), "database")
|
|
labels := map[string]string{
|
|
"app.kubernetes.io/component": "backup",
|
|
"app.kubernetes.io/managed-by": "testOp",
|
|
"app.kubernetes.io/name": "backup-serviceaccountjson",
|
|
"app.kubernetes.io/part-of": "testProd",
|
|
"app.kubernetes.io/version": "testVersion",
|
|
"caos.ch/apiversion": "v0",
|
|
"caos.ch/kind": "BucketBackup",
|
|
}
|
|
namespace := "testNs"
|
|
timestamp := "testTs"
|
|
nodeselector := map[string]string{"test": "test"}
|
|
tolerations := []corev1.Toleration{}
|
|
version := "testVersion"
|
|
masterkey := "testMk"
|
|
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
|
|
saJson := "testSA"
|
|
backupName := "testBucket"
|
|
|
|
features := []string{backup.Instant}
|
|
bucket.SetInstantBackup(k8sClient, namespace, backupName, labels, saJson)
|
|
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second)
|
|
|
|
desired := getTreeWithDBAndBackup(t, masterkey, saJson, backupName)
|
|
|
|
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
|
assert.NoError(t, err)
|
|
|
|
databases := []string{"test1", "test2"}
|
|
queried := bucket.SetQueriedForDatabases(databases)
|
|
ensure, err := query(k8sClient, queried)
|
|
assert.NoError(t, err)
|
|
assert.NotNil(t, ensure)
|
|
|
|
assert.NoError(t, ensure(k8sClient))
|
|
}
|
|
|
|
func TestManaged_AdaptBucketCleanAndRestore(t *testing.T) {
|
|
monitor := mntr.Monitor{}
|
|
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "testKind", "v0"), "database")
|
|
labels := map[string]string{
|
|
"app.kubernetes.io/component": "backup",
|
|
"app.kubernetes.io/managed-by": "testOp",
|
|
"app.kubernetes.io/name": "backup-serviceaccountjson",
|
|
"app.kubernetes.io/part-of": "testProd",
|
|
"app.kubernetes.io/version": "testVersion",
|
|
"caos.ch/apiversion": "v0",
|
|
"caos.ch/kind": "BucketBackup",
|
|
}
|
|
namespace := "testNs"
|
|
timestamp := "testTs"
|
|
nodeselector := map[string]string{"test": "test"}
|
|
tolerations := []corev1.Toleration{}
|
|
version := "testVersion"
|
|
masterkey := "testMk"
|
|
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
|
|
saJson := "testSA"
|
|
backupName := "testBucket"
|
|
|
|
features := []string{restore.Instant, clean.Instant}
|
|
bucket.SetRestore(k8sClient, namespace, backupName, labels, saJson)
|
|
bucket.SetClean(k8sClient, namespace, backupName, labels, saJson)
|
|
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second).Times(2)
|
|
|
|
desired := getTreeWithDBAndBackup(t, masterkey, saJson, backupName)
|
|
|
|
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
|
|
assert.NoError(t, err)
|
|
|
|
databases := []string{"test1", "test2"}
|
|
queried := bucket.SetQueriedForDatabases(databases)
|
|
ensure, err := query(k8sClient, queried)
|
|
assert.NoError(t, err)
|
|
assert.NotNil(t, ensure)
|
|
|
|
assert.NoError(t, ensure(k8sClient))
|
|
}
|