mirror of
https://github.com/zitadel/zitadel.git
synced 2024-12-20 23:07:33 +00:00
bacfc3b099
* feat(crd): add crd mode for operators (#1329) * feat(operator): add base for zitadel operator * fix(operator): changed pipeline to release operator * fix(operator): fmt with only one parameter * fix(operator): corrected workflow job name * fix(zitadelctl): added restore and backuplist command * fix(zitadelctl): scale for restore * chore(container): use scratch for deploy container * fix(zitadelctl): limit image to scratch * fix(migration): added migration scripts for newer version * fix(operator): changed handling of kubeconfig in operator logic * fix(operator): changed handling of secrets in operator logic * fix(operator): use new version of zitadel * fix(operator): added path for migrations * fix(operator): delete doublets of migration scripts * fix(operator): delete subpaths and integrate logic into init container * fix(operator): corrected path in dockerfile for local migrations * fix(operator): added migrations for cockroachdb-secure * fix(operator): delete logic for ambassador module * fix(operator): added read and write secret commands * fix(operator): correct and align operator pipeline with zitadel pipeline * fix(operator): correct yaml error in operator pipeline * fix(operator): correct action name in operator pipeline * fix(operator): correct case-sensitive filename in operator pipeline * fix(operator): upload artifacts from buildx output * fix(operator): corrected attribute spelling error * fix(operator): combined jobs for operator binary and image * fix(operator): added missing comma in operator pipeline * fix(operator): added codecov for operator image * fix(operator): added codecov for operator image * fix(testing): code changes for testing and several unit-tests (#1009) * fix(operator): usage of interface of kubernetes client for testing and several unit-tests * fix(operator): several unit-tests * fix(operator): several unit-tests * fix(operator): changed order for the operator logic * fix(operator): added version of zitadelctl from semantic release * fix(operator): corrected function call with version of zitadelctl * fix(operator): corrected function call with version of zitadelctl * fix(operator): add check output to operator release pipeline * fix(operator): set --short length everywhere to 12 * fix(operator): zitadel setup in job instead of exec with several unit tests * fix(operator): fixes to combine newest zitadel and testing branch * fix(operator): corrected path in Dockerfile * fix(operator): fixed unit-test that was ignored during changes * fix(operator): fixed unit-test that was ignored during changes * fix(operator): corrected Dockerfile to correctly use env variable * fix(operator): quickfix takeoff deployment * fix(operator): corrected the clusterrolename in the applied artifacts * fix: update secure migrations * fix(operator): migrations (#1057) * fix(operator): copied migrations from orbos repository * fix(operator): newest migrations * chore: use cockroach-secure * fix: rename migration * fix: remove insecure cockroach migrations Co-authored-by: Stefan Benz <stefan@caos.ch> * fix: finalize labels * fix(operator): cli logging concurrent and fixe deployment of operator during restore * fix: finalize labels and cli commands * fix: restore * chore: cockroachdb is always secure * chore: use orbos consistent-labels latest commit * test: make tests compatible with new labels * fix: default to sa token for start command * fix: use cockroachdb v12.02 * fix: don't delete flyway user * test: fix migration test * fix: use correct table qualifiers * fix: don't alter sequence ownership * fix: upgrade flyway * fix: change ownership of all dbs and tables to admin user * fix: change defaultdb user * fix: treat clientid status codes >= 400 as errors * fix: reconcile specified ZITADEL version, not binary version * fix: add ca-certs * fix: use latest orbos code * fix: use orbos with fixed race condition * fix: use latest ORBOS code * fix: use latest ORBOS code * fix: make migration and scaling around restoring work * fix(operator): move zitadel operator * chore(migrations): include owner change migration * feat(db): add code base for database operator * fix(db): change used image registry for database operator * fix(db): generated mock * fix(db): add accidentally ignored file * fix(db): add cockroachdb backup image to pipeline * fix(db): correct pipeline and image versions * fix(db): correct version of used orbos * fix(db): correct database import * fix(db): go mod tidy * fix(db): use new version for orbos * fix(migrations): include migrations into zitadelctl binary (#1211) * fix(db): use statik to integrate migrations into binary * fix(migrations): corrections unit tests and pipeline for integrated migrations into zitadelctl binary * fix(migrations): correction in dockerfile for pipeline build * fix(migrations): correction in dockerfile for pipeline build * fix(migrations): dockerfile changes for cache optimization * fix(database): correct used part-of label in database operator * fix(database): correct used selectable label in zitadel operator * fix(operator): correct lables for user secrets in zitadel operator * fix(operator): correct lables for service test in zitadel operator * fix: don't enable database features for user operations (#1227) * fix: don't enable database features for user operations * fix: omit database feature for connection info adapter * fix: use latest orbos version * fix(crd): corrected logic to get database connection and other info * fix(crd): corrected yaml tags and start for zitadel operator * fix(crd): move some dependencies and use consistent structure * fix(crd): corrected unit-tests * fix(crd): corrected main files for debug starts * chore(pipeline): use correct version for zitadelctl build * fix(crd): correct calculating of current db state for zitadel operator * fix(crd): use binary version for deployment of crd mode operators * fix(crd): add gitops attribute for reconciling * fix(crd): corrected crd with newest version * fix(migration): collect cleanup functions and only use them if all jobs are successful * fix(zitadelctl): import gcp auth to connect to gke cluster * feat: Add read and writesecret options for crd mode (#1435) * fix: don't require orbconfig for crd mode * test: pass * fix(zitadelctl): import gcp auth to connect to gke cluster * feat: add read and writesecret option for crd mode * test: fix * fix: make all crd secrets writable * fix: use in-cluster configs for in-cluster operators * chore: remove unnecessary debug files Co-authored-by: Stefan Benz <stefan@caos.ch> * fix: Crdoperatormerge review (#1385) * fix: don't require orbconfig for crd mode * test: pass * fix(zitadelctl): import gcp auth to connect to gke cluster * fix: ensure caos-system namespace * fix: apply orbconfig at takeoff * docs: improve help for creating an orbconfig * docs: describe orbconfig properties * docs: add --gitops to help message example * fix(pipeline): correct upload of artifacts in dev releases * test: pass Co-authored-by: Stefan Benz <stefan@caos.ch> * fix(test): corrected falsely merged tests * chore: update orbos library * fix: only handle exactly named and namespaced crd resource * fix: print errors, check correct crd namespace * fix: validate bucket secret * chore: compile * fix(operator): corrected secret handling when unused secrets are not defined * fix(operator): corrected handling of jobs * fix: dont print logs when readsecret path is provided * fix(operator): corrected handling of jobs and sort for mounted volumes * fix(operator): sort for volumes * fix(operator): change orboos import to newest release Co-authored-by: Florian Forster <florian@caos.ch> Co-authored-by: Elio Bischof <eliobischof@gmail.com> (cherry picked from commit fa9bd5a8e7a5032b21272552fbc4cdf1630db049) * fix(operator): Standard timeout handling (#1458) * fix: always use standard time.Duration * fix: give backup and restore more time * fix: give backup and restore jobs more time (cherry picked from commit 7468b7d1e818102dcd58dbb6d3647f49277a4e65) * fix go mod Co-authored-by: Stefan Benz <46600784+stebenz@users.noreply.github.com> Co-authored-by: Elio Bischof <eliobischof@gmail.com>
305 lines
6.9 KiB
Go
305 lines
6.9 KiB
Go
package backup
|
|
|
|
import (
|
|
"testing"
|
|
|
|
"github.com/caos/orbos/mntr"
|
|
"github.com/caos/orbos/pkg/kubernetes"
|
|
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
|
|
"github.com/caos/orbos/pkg/labels"
|
|
"github.com/golang/mock/gomock"
|
|
"github.com/stretchr/testify/assert"
|
|
corev1 "k8s.io/api/core/v1"
|
|
macherrs "k8s.io/apimachinery/pkg/api/errors"
|
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
)
|
|
|
|
func TestBackup_AdaptInstantBackup1(t *testing.T) {
|
|
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
|
|
|
|
features := []string{Instant}
|
|
monitor := mntr.Monitor{}
|
|
namespace := "testNs"
|
|
|
|
databases := []string{"testDb"}
|
|
bucketName := "testBucket"
|
|
cron := "testCron"
|
|
timestamp := "test"
|
|
nodeselector := map[string]string{"test": "test"}
|
|
tolerations := []corev1.Toleration{
|
|
{Key: "testKey", Operator: "testOp"}}
|
|
backupName := "testName"
|
|
version := "testVersion"
|
|
secretKey := "testKey"
|
|
secretName := "testSecretName"
|
|
jobName := GetJobName(backupName)
|
|
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd2", "testOp2", "testVersion2"), "testKind2", "testVersion2"), "testComponent")
|
|
nameLabels := labels.MustForName(componentLabels, jobName)
|
|
|
|
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
|
|
return nil
|
|
}
|
|
|
|
jobDef := getJob(
|
|
namespace,
|
|
nameLabels,
|
|
getJobSpecDef(
|
|
nodeselector,
|
|
tolerations,
|
|
secretName,
|
|
secretKey,
|
|
backupName,
|
|
version,
|
|
getBackupCommand(
|
|
timestamp,
|
|
databases,
|
|
bucketName,
|
|
backupName,
|
|
),
|
|
),
|
|
)
|
|
|
|
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
|
|
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
|
|
|
|
query, _, err := AdaptFunc(
|
|
monitor,
|
|
backupName,
|
|
namespace,
|
|
componentLabels,
|
|
databases,
|
|
checkDBReady,
|
|
bucketName,
|
|
cron,
|
|
secretName,
|
|
secretKey,
|
|
timestamp,
|
|
nodeselector,
|
|
tolerations,
|
|
features,
|
|
version,
|
|
)
|
|
|
|
assert.NoError(t, err)
|
|
queried := map[string]interface{}{}
|
|
ensure, err := query(client, queried)
|
|
assert.NoError(t, err)
|
|
assert.NoError(t, ensure(client))
|
|
}
|
|
|
|
func TestBackup_AdaptInstantBackup2(t *testing.T) {
|
|
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
|
|
|
|
features := []string{Instant}
|
|
monitor := mntr.Monitor{}
|
|
namespace := "testNs2"
|
|
databases := []string{"testDb2"}
|
|
bucketName := "testBucket2"
|
|
cron := "testCron2"
|
|
timestamp := "test2"
|
|
nodeselector := map[string]string{"test2": "test2"}
|
|
tolerations := []corev1.Toleration{
|
|
{Key: "testKey2", Operator: "testOp2"}}
|
|
backupName := "testName2"
|
|
version := "testVersion2"
|
|
secretKey := "testKey2"
|
|
secretName := "testSecretName2"
|
|
jobName := GetJobName(backupName)
|
|
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd2", "testOp2", "testVersion2"), "testKind2", "testVersion2"), "testComponent")
|
|
nameLabels := labels.MustForName(componentLabels, jobName)
|
|
|
|
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
|
|
return nil
|
|
}
|
|
|
|
jobDef := getJob(
|
|
namespace,
|
|
nameLabels,
|
|
getJobSpecDef(
|
|
nodeselector,
|
|
tolerations,
|
|
secretName,
|
|
secretKey,
|
|
backupName,
|
|
version,
|
|
getBackupCommand(
|
|
timestamp,
|
|
databases,
|
|
bucketName,
|
|
backupName,
|
|
),
|
|
),
|
|
)
|
|
|
|
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
|
|
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
|
|
|
|
query, _, err := AdaptFunc(
|
|
monitor,
|
|
backupName,
|
|
namespace,
|
|
componentLabels,
|
|
databases,
|
|
checkDBReady,
|
|
bucketName,
|
|
cron,
|
|
secretName,
|
|
secretKey,
|
|
timestamp,
|
|
nodeselector,
|
|
tolerations,
|
|
features,
|
|
version,
|
|
)
|
|
|
|
assert.NoError(t, err)
|
|
queried := map[string]interface{}{}
|
|
ensure, err := query(client, queried)
|
|
assert.NoError(t, err)
|
|
assert.NoError(t, ensure(client))
|
|
}
|
|
|
|
func TestBackup_AdaptBackup1(t *testing.T) {
|
|
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
|
|
|
|
features := []string{Normal}
|
|
monitor := mntr.Monitor{}
|
|
namespace := "testNs"
|
|
databases := []string{"testDb"}
|
|
bucketName := "testBucket"
|
|
cron := "testCron"
|
|
timestamp := "test"
|
|
nodeselector := map[string]string{"test": "test"}
|
|
tolerations := []corev1.Toleration{
|
|
{Key: "testKey", Operator: "testOp"}}
|
|
backupName := "testName"
|
|
version := "testVersion"
|
|
secretKey := "testKey"
|
|
secretName := "testSecretName"
|
|
jobName := GetJobName(backupName)
|
|
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd2", "testOp2", "testVersion2"), "testKind2", "testVersion2"), "testComponent")
|
|
nameLabels := labels.MustForName(componentLabels, jobName)
|
|
|
|
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
|
|
return nil
|
|
}
|
|
|
|
jobDef := getCronJob(
|
|
namespace,
|
|
nameLabels,
|
|
cron,
|
|
getJobSpecDef(
|
|
nodeselector,
|
|
tolerations,
|
|
secretName,
|
|
secretKey,
|
|
backupName,
|
|
version,
|
|
getBackupCommand(
|
|
timestamp,
|
|
databases,
|
|
bucketName,
|
|
backupName,
|
|
),
|
|
),
|
|
)
|
|
|
|
client.EXPECT().ApplyCronJob(jobDef).Times(1).Return(nil)
|
|
|
|
query, _, err := AdaptFunc(
|
|
monitor,
|
|
backupName,
|
|
namespace,
|
|
componentLabels,
|
|
databases,
|
|
checkDBReady,
|
|
bucketName,
|
|
cron,
|
|
secretName,
|
|
secretKey,
|
|
timestamp,
|
|
nodeselector,
|
|
tolerations,
|
|
features,
|
|
version,
|
|
)
|
|
|
|
assert.NoError(t, err)
|
|
queried := map[string]interface{}{}
|
|
ensure, err := query(client, queried)
|
|
assert.NoError(t, err)
|
|
assert.NoError(t, ensure(client))
|
|
}
|
|
|
|
func TestBackup_AdaptBackup2(t *testing.T) {
|
|
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
|
|
|
|
features := []string{Normal}
|
|
monitor := mntr.Monitor{}
|
|
namespace := "testNs2"
|
|
databases := []string{"testDb2"}
|
|
bucketName := "testBucket2"
|
|
cron := "testCron2"
|
|
timestamp := "test2"
|
|
nodeselector := map[string]string{"test2": "test2"}
|
|
tolerations := []corev1.Toleration{
|
|
{Key: "testKey2", Operator: "testOp2"}}
|
|
backupName := "testName2"
|
|
version := "testVersion2"
|
|
secretKey := "testKey2"
|
|
secretName := "testSecretName2"
|
|
jobName := GetJobName(backupName)
|
|
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd2", "testOp2", "testVersion2"), "testKind2", "testVersion2"), "testComponent")
|
|
nameLabels := labels.MustForName(componentLabels, jobName)
|
|
|
|
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
|
|
return nil
|
|
}
|
|
|
|
jobDef := getCronJob(
|
|
namespace,
|
|
nameLabels,
|
|
cron,
|
|
getJobSpecDef(
|
|
nodeselector,
|
|
tolerations,
|
|
secretName,
|
|
secretKey,
|
|
backupName,
|
|
version,
|
|
getBackupCommand(
|
|
timestamp,
|
|
databases,
|
|
bucketName,
|
|
backupName,
|
|
),
|
|
),
|
|
)
|
|
|
|
client.EXPECT().ApplyCronJob(jobDef).Times(1).Return(nil)
|
|
|
|
query, _, err := AdaptFunc(
|
|
monitor,
|
|
backupName,
|
|
namespace,
|
|
componentLabels,
|
|
databases,
|
|
checkDBReady,
|
|
bucketName,
|
|
cron,
|
|
secretName,
|
|
secretKey,
|
|
timestamp,
|
|
nodeselector,
|
|
tolerations,
|
|
features,
|
|
version,
|
|
)
|
|
|
|
assert.NoError(t, err)
|
|
queried := map[string]interface{}{}
|
|
ensure, err := query(client, queried)
|
|
assert.NoError(t, err)
|
|
assert.NoError(t, ensure(client))
|
|
}
|