fix: operator picks (#1463)

* feat(crd): add crd mode for operators (#1329)

* feat(operator): add base for zitadel operator

* fix(operator): changed pipeline to release operator

* fix(operator): fmt with only one parameter

* fix(operator): corrected workflow job name

* fix(zitadelctl): added restore and backuplist command

* fix(zitadelctl): scale for restore

* chore(container): use scratch for deploy container

* fix(zitadelctl): limit image to scratch

* fix(migration): added migration scripts for newer version

* fix(operator): changed handling of kubeconfig in operator logic

* fix(operator): changed handling of secrets in operator logic

* fix(operator): use new version of zitadel

* fix(operator): added path for migrations

* fix(operator): delete doublets of migration scripts

* fix(operator): delete subpaths and integrate logic into init container

* fix(operator): corrected path in dockerfile for local migrations

* fix(operator): added migrations for cockroachdb-secure

* fix(operator): delete logic for ambassador module

* fix(operator): added read and write secret commands

* fix(operator): correct and align operator pipeline with zitadel pipeline

* fix(operator): correct yaml error in operator pipeline

* fix(operator): correct action name in operator pipeline

* fix(operator): correct case-sensitive filename in operator pipeline

* fix(operator): upload artifacts from buildx output

* fix(operator): corrected attribute spelling error

* fix(operator): combined jobs for operator binary and image

* fix(operator): added missing comma in operator pipeline

* fix(operator): added codecov for operator image

* fix(operator): added codecov for operator image

* fix(testing): code changes for testing and several unit-tests (#1009)

* fix(operator): usage of interface of kubernetes client for testing and several unit-tests

* fix(operator): several unit-tests

* fix(operator): several unit-tests

* fix(operator): changed order for the operator logic

* fix(operator): added version of zitadelctl from semantic release

* fix(operator): corrected function call with version of zitadelctl

* fix(operator): corrected function call with version of zitadelctl

* fix(operator): add check output to operator release pipeline

* fix(operator): set --short length everywhere to 12

* fix(operator): zitadel setup in job instead of exec with several unit tests

* fix(operator): fixes to combine newest zitadel and testing branch

* fix(operator): corrected path in Dockerfile

* fix(operator): fixed unit-test that was ignored during changes

* fix(operator): fixed unit-test that was ignored during changes

* fix(operator): corrected Dockerfile to correctly use env variable

* fix(operator): quickfix takeoff deployment

* fix(operator): corrected the clusterrolename in the applied artifacts

* fix: update secure migrations

* fix(operator): migrations (#1057)

* fix(operator): copied migrations from orbos repository

* fix(operator): newest migrations

* chore: use cockroach-secure

* fix: rename migration

* fix: remove insecure cockroach migrations

Co-authored-by: Stefan Benz <stefan@caos.ch>

* fix: finalize labels

* fix(operator): cli logging concurrent and fixe deployment of operator during restore

* fix: finalize labels and cli commands

* fix: restore

* chore: cockroachdb is always secure

* chore: use orbos consistent-labels latest commit

* test: make tests compatible with new labels

* fix: default to sa token for start command

* fix: use cockroachdb v12.02

* fix: don't delete flyway user

* test: fix migration test

* fix: use correct table qualifiers

* fix: don't alter sequence ownership

* fix: upgrade flyway

* fix: change ownership of all dbs and tables to admin user

* fix: change defaultdb user

* fix: treat clientid status codes >= 400 as errors

* fix: reconcile specified ZITADEL version, not binary version

* fix: add ca-certs

* fix: use latest orbos code

* fix: use orbos with fixed race condition

* fix: use latest ORBOS code

* fix: use latest ORBOS code

* fix: make migration and scaling around restoring work

* fix(operator): move zitadel operator

* chore(migrations): include owner change migration

* feat(db): add code base for database operator

* fix(db): change used image registry for database operator

* fix(db): generated mock

* fix(db): add accidentally ignored file

* fix(db): add cockroachdb backup image to pipeline

* fix(db): correct pipeline and image versions

* fix(db): correct version of used orbos

* fix(db): correct database import

* fix(db): go mod tidy

* fix(db): use new version for orbos

* fix(migrations): include migrations into zitadelctl binary (#1211)

* fix(db): use statik to integrate migrations into binary

* fix(migrations): corrections unit tests and pipeline for integrated migrations into zitadelctl binary

* fix(migrations): correction in dockerfile for pipeline build

* fix(migrations): correction in dockerfile for pipeline build

* fix(migrations):  dockerfile changes for cache optimization

* fix(database): correct used part-of label in database operator

* fix(database): correct used selectable label in zitadel operator

* fix(operator): correct lables for user secrets in zitadel operator

* fix(operator): correct lables for service test in zitadel operator

* fix: don't enable database features for user operations (#1227)

* fix: don't enable database features for user operations

* fix: omit database feature for connection info adapter

* fix: use latest orbos version

* fix(crd): corrected logic to get database connection and other info

* fix(crd): corrected yaml tags and start for zitadel operator

* fix(crd): move some dependencies and use consistent structure

* fix(crd): corrected unit-tests

* fix(crd): corrected main files for debug starts

* chore(pipeline): use correct version for zitadelctl build

* fix(crd): correct calculating of current db state for zitadel operator

* fix(crd): use binary version for deployment of crd mode operators

* fix(crd): add gitops attribute for reconciling

* fix(crd): corrected crd with newest version

* fix(migration): collect cleanup functions and only use them if all jobs are successful

* fix(zitadelctl): import gcp auth to connect to gke cluster

* feat: Add read and writesecret options for crd mode (#1435)

* fix: don't require orbconfig for crd mode

* test: pass

* fix(zitadelctl): import gcp auth to connect to gke cluster

* feat: add read and writesecret option for crd mode

* test: fix

* fix: make all crd secrets writable

* fix: use in-cluster configs for in-cluster operators

* chore: remove unnecessary debug files

Co-authored-by: Stefan Benz <stefan@caos.ch>

* fix: Crdoperatormerge review (#1385)

* fix: don't require orbconfig for crd mode

* test: pass

* fix(zitadelctl): import gcp auth to connect to gke cluster

* fix: ensure caos-system namespace

* fix: apply orbconfig at takeoff

* docs: improve help for creating an orbconfig

* docs: describe orbconfig properties

* docs: add --gitops to help message example

* fix(pipeline): correct upload of artifacts in dev releases

* test: pass

Co-authored-by: Stefan Benz <stefan@caos.ch>

* fix(test): corrected falsely merged tests

* chore: update orbos library

* fix: only handle exactly named and namespaced crd resource

* fix: print errors, check correct crd namespace

* fix: validate bucket secret

* chore: compile

* fix(operator): corrected secret handling when unused secrets are not defined

* fix(operator): corrected handling of jobs

* fix: dont print logs when readsecret path is provided

* fix(operator): corrected handling of jobs and sort for mounted volumes

* fix(operator): sort for volumes

* fix(operator): change orboos import to newest release

Co-authored-by: Florian Forster <florian@caos.ch>
Co-authored-by: Elio Bischof <eliobischof@gmail.com>

(cherry picked from commit fa9bd5a8e7)

* fix(operator): Standard timeout handling (#1458)

* fix: always use standard time.Duration

* fix: give backup and restore more time

* fix: give backup and restore jobs more time

(cherry picked from commit 7468b7d1e8)

* fix go mod

Co-authored-by: Stefan Benz <46600784+stebenz@users.noreply.github.com>
Co-authored-by: Elio Bischof <eliobischof@gmail.com>
This commit is contained in:
Livio Amstutz
2021-03-24 10:31:19 +01:00
committed by GitHub
parent e7c11cb9e2
commit bacfc3b099
105 changed files with 3431 additions and 1324 deletions

View File

@@ -11,7 +11,7 @@ import (
"gopkg.in/yaml.v3"
)
type AdaptFunc func(monitor mntr.Monitor, desired *tree.Tree, current *tree.Tree) (QueryFunc, DestroyFunc, map[string]*secret.Secret, error)
type AdaptFunc func(monitor mntr.Monitor, desired *tree.Tree, current *tree.Tree) (QueryFunc, DestroyFunc, map[string]*secret.Secret, map[string]*secret.Existing, bool, error)
type EnsureFunc func(k8sClient kubernetes.ClientInt) error

46
operator/api/core/api.go Normal file
View File

@@ -0,0 +1,46 @@
package core
import (
"errors"
"github.com/caos/orbos/pkg/tree"
"gopkg.in/yaml.v3"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
func UnmarshalUnstructuredSpec(unstruct *unstructured.Unstructured) (*tree.Tree, error) {
spec, found := unstruct.Object["spec"]
if !found {
return nil, errors.New("no spec in crd")
}
specMap, ok := spec.(map[string]interface{})
if !ok {
return nil, errors.New("no spec in crd")
}
data, err := yaml.Marshal(specMap)
if err != nil {
return nil, err
}
desired := &tree.Tree{}
if err := yaml.Unmarshal(data, &desired); err != nil {
return nil, err
}
return desired, nil
}
func MarshalToUnstructuredSpec(t *tree.Tree) (*unstructured.Unstructured, error) {
data, err := yaml.Marshal(t)
if err != nil {
return nil, err
}
unstruct := &unstructured.Unstructured{
Object: map[string]interface{}{
"spec": make(map[string]interface{}),
},
}
return unstruct, yaml.Unmarshal(data, unstruct.Object["spec"])
}

View File

@@ -0,0 +1,44 @@
package database
import (
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/api/core"
databasev1 "github.com/caos/zitadel/operator/api/database/v1"
macherrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
)
const (
Namespace = "caos-system"
kind = "Database"
apiVersion = "caos.ch/v1"
Name = "database"
)
func ReadCrd(k8sClient kubernetes.ClientInt) (*tree.Tree, error) {
unstruct, err := k8sClient.GetNamespacedCRDResource(databasev1.GroupVersion.Group, databasev1.GroupVersion.Version, kind, Namespace, Name)
if err != nil {
if macherrs.IsNotFound(err) || meta.IsNoMatchError(err) {
return nil, nil
}
return nil, err
}
return core.UnmarshalUnstructuredSpec(unstruct)
}
func WriteCrd(k8sClient kubernetes.ClientInt, t *tree.Tree) error {
unstruct, err := core.MarshalToUnstructuredSpec(t)
if err != nil {
return err
}
unstruct.SetName(Name)
unstruct.SetNamespace(Namespace)
unstruct.SetKind(kind)
unstruct.SetAPIVersion(apiVersion)
return k8sClient.ApplyNamespacedCRDResource(databasev1.GroupVersion.Group, databasev1.GroupVersion.Version, kind, Namespace, Name, unstruct)
}

View File

@@ -0,0 +1,57 @@
// +kubebuilder:object:generate=true
// +groupName=caos.ch
package v1
import (
"github.com/caos/orbos/pkg/tree"
orbdb "github.com/caos/zitadel/operator/database/kinds/orb"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "caos.ch", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:crd=Database
type Database struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec Spec `json:"spec,omitempty"`
Status Status `json:"status,omitempty"`
}
type Status struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
}
type Spec struct {
Common *tree.Common `json:",inline" yaml:",inline"`
Spec *orbdb.Spec `json:"spec" yaml:"spec"`
Database *Empty `json:"database" yaml:"database"`
}
type Empty struct{}
// +kubebuilder:object:root=true
type DatabaseList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Database `json:"items"`
}
func init() {
SchemeBuilder.Register(&Database{}, &DatabaseList{})
}

View File

@@ -0,0 +1,146 @@
// +build !ignore_autogenerated
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1
import (
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/database/kinds/orb"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Database) DeepCopyInto(out *Database) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Database.
func (in *Database) DeepCopy() *Database {
if in == nil {
return nil
}
out := new(Database)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Database) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DatabaseList) DeepCopyInto(out *DatabaseList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Database, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseList.
func (in *DatabaseList) DeepCopy() *DatabaseList {
if in == nil {
return nil
}
out := new(DatabaseList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *DatabaseList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Empty) DeepCopyInto(out *Empty) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Empty.
func (in *Empty) DeepCopy() *Empty {
if in == nil {
return nil
}
out := new(Empty)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Spec) DeepCopyInto(out *Spec) {
*out = *in
if in.Common != nil {
in, out := &in.Common, &out.Common
*out = new(tree.Common)
**out = **in
}
if in.Spec != nil {
in, out := &in.Spec, &out.Spec
*out = new(orb.Spec)
(*in).DeepCopyInto(*out)
}
if in.Database != nil {
in, out := &in.Database, &out.Database
*out = new(Empty)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Spec.
func (in *Spec) DeepCopy() *Spec {
if in == nil {
return nil
}
out := new(Spec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Status) DeepCopyInto(out *Status) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status.
func (in *Status) DeepCopy() *Status {
if in == nil {
return nil
}
out := new(Status)
in.DeepCopyInto(out)
return out
}

View File

@@ -0,0 +1,44 @@
package zitadel
import (
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/api/core"
zitadelv1 "github.com/caos/zitadel/operator/api/zitadel/v1"
macherrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
)
const (
Namespace = "caos-system"
kind = "Zitadel"
apiVersion = "caos.ch/v1"
Name = "zitadel"
)
func ReadCrd(k8sClient kubernetes.ClientInt) (*tree.Tree, error) {
unstruct, err := k8sClient.GetNamespacedCRDResource(zitadelv1.GroupVersion.Group, zitadelv1.GroupVersion.Version, kind, Namespace, Name)
if err != nil {
if macherrs.IsNotFound(err) || meta.IsNoMatchError(err) {
return nil, nil
}
return nil, err
}
return core.UnmarshalUnstructuredSpec(unstruct)
}
func WriteCrd(k8sClient kubernetes.ClientInt, t *tree.Tree) error {
unstruct, err := core.MarshalToUnstructuredSpec(t)
if err != nil {
return err
}
unstruct.SetName(Name)
unstruct.SetNamespace(Namespace)
unstruct.SetKind(kind)
unstruct.SetAPIVersion(apiVersion)
return k8sClient.ApplyNamespacedCRDResource(zitadelv1.GroupVersion.Group, zitadelv1.GroupVersion.Version, kind, Namespace, Name, unstruct)
}

View File

@@ -0,0 +1,57 @@
// +kubebuilder:object:generate=true
// +groupName=caos.ch
package v1
import (
"github.com/caos/orbos/pkg/tree"
orbz "github.com/caos/zitadel/operator/zitadel/kinds/orb"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "caos.ch", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)
// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:crd=Zitadel
type Zitadel struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec Spec `json:"spec,omitempty"`
Status Status `json:"status,omitempty"`
}
type Status struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
}
type Spec struct {
Common *tree.Common `json:",inline" yaml:",inline"`
Spec *orbz.Spec `json:"spec" yaml:"spec"`
IAM *Empty `json:"iam" yaml:"iam"`
}
type Empty struct{}
// +kubebuilder:object:root=true
type ZitadelList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Zitadel `json:"items"`
}
func init() {
SchemeBuilder.Register(&Zitadel{}, &ZitadelList{})
}

View File

@@ -0,0 +1,146 @@
// +build !ignore_autogenerated
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1
import (
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/zitadel/kinds/orb"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Empty) DeepCopyInto(out *Empty) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Empty.
func (in *Empty) DeepCopy() *Empty {
if in == nil {
return nil
}
out := new(Empty)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Spec) DeepCopyInto(out *Spec) {
*out = *in
if in.Common != nil {
in, out := &in.Common, &out.Common
*out = new(tree.Common)
**out = **in
}
if in.Spec != nil {
in, out := &in.Spec, &out.Spec
*out = new(orb.Spec)
(*in).DeepCopyInto(*out)
}
if in.IAM != nil {
in, out := &in.IAM, &out.IAM
*out = new(Empty)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Spec.
func (in *Spec) DeepCopy() *Spec {
if in == nil {
return nil
}
out := new(Spec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Status) DeepCopyInto(out *Status) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status.
func (in *Status) DeepCopy() *Status {
if in == nil {
return nil
}
out := new(Status)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Zitadel) DeepCopyInto(out *Zitadel) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Zitadel.
func (in *Zitadel) DeepCopy() *Zitadel {
if in == nil {
return nil
}
out := new(Zitadel)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Zitadel) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ZitadelList) DeepCopyInto(out *ZitadelList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Zitadel, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZitadelList.
func (in *ZitadelList) DeepCopy() *ZitadelList {
if in == nil {
return nil
}
out := new(ZitadelList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ZitadelList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}

View File

@@ -0,0 +1,74 @@
package crtlcrd
import (
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
databasev1 "github.com/caos/zitadel/operator/api/database/v1"
zitadelv1 "github.com/caos/zitadel/operator/api/zitadel/v1"
"github.com/caos/zitadel/operator/crtlcrd/database"
"github.com/caos/zitadel/operator/crtlcrd/zitadel"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
)
const (
Database = "database"
Zitadel = "zitadel"
)
var (
scheme = runtime.NewScheme()
)
func init() {
_ = clientgoscheme.AddToScheme(scheme)
_ = databasev1.AddToScheme(scheme)
_ = zitadelv1.AddToScheme(scheme)
}
func Start(monitor mntr.Monitor, version, metricsAddr string, features ...string) error {
cfg := ctrl.GetConfigOrDie()
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
Port: 9443,
LeaderElection: false,
LeaderElectionID: "9adsd12l.caos.ch",
})
if err != nil {
return errors.Wrap(err, "unable to start manager")
}
k8sClient := kubernetes.NewK8sClientWithConfig(monitor, cfg)
for _, feature := range features {
switch feature {
case Database:
if err = (&database.Reconciler{
ClientInt: k8sClient,
Monitor: monitor,
Scheme: mgr.GetScheme(),
Version: version,
}).SetupWithManager(mgr); err != nil {
return errors.Wrap(err, "unable to create controller")
}
case Zitadel:
if err = (&zitadel.Reconciler{
ClientInt: k8sClient,
Monitor: monitor,
Scheme: mgr.GetScheme(),
Version: version,
}).SetupWithManager(mgr); err != nil {
return errors.Wrap(err, "unable to create controller")
}
}
}
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
return errors.Wrap(err, "problem running manager")
}
return nil
}

View File

@@ -0,0 +1,68 @@
package database
import (
"context"
"fmt"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/api/database"
v1 "github.com/caos/zitadel/operator/api/database/v1"
orbdb "github.com/caos/zitadel/operator/database/kinds/orb"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
)
type Reconciler struct {
kubernetes.ClientInt
Monitor mntr.Monitor
Scheme *runtime.Scheme
Version string
}
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) {
internalMonitor := r.Monitor.WithFields(map[string]interface{}{
"kind": "database",
"namespace": req.NamespacedName,
})
defer func() {
r.Monitor.Error(err)
}()
if req.Namespace != database.Namespace || req.Name != database.Name {
return res, fmt.Errorf("resource must be named %s and namespaced in %s", database.Name, database.Namespace)
}
desired, err := database.ReadCrd(r.ClientInt)
if err != nil {
internalMonitor.Error(err)
return res, err
}
query, _, _, _, _, err := orbdb.AdaptFunc("", &r.Version, false, "database")(internalMonitor, desired, &tree.Tree{})
if err != nil {
internalMonitor.Error(err)
return res, err
}
ensure, err := query(r.ClientInt, map[string]interface{}{})
if err != nil {
internalMonitor.Error(err)
return res, err
}
if err := ensure(r.ClientInt); err != nil {
internalMonitor.Error(err)
return res, err
}
return res, nil
}
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&v1.Database{}).
Complete(r)
}

View File

@@ -0,0 +1,67 @@
package zitadel
import (
"context"
"fmt"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/api/zitadel"
v1 "github.com/caos/zitadel/operator/api/zitadel/v1"
orbz "github.com/caos/zitadel/operator/zitadel/kinds/orb"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
)
type Reconciler struct {
kubernetes.ClientInt
Monitor mntr.Monitor
Scheme *runtime.Scheme
Version string
}
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) {
internalMonitor := r.Monitor.WithFields(map[string]interface{}{
"kind": "zitadel",
"namespace": req.NamespacedName,
})
defer func() {
r.Monitor.Error(err)
}()
if req.Namespace != zitadel.Namespace || req.Name != zitadel.Name {
return res, fmt.Errorf("resource must be named %s and namespaced in %s", zitadel.Name, zitadel.Namespace)
}
desired, err := zitadel.ReadCrd(r.ClientInt)
if err != nil {
return res, err
}
query, _, _, _, _, err := orbz.AdaptFunc(nil, "ensure", &r.Version, false, []string{"operator", "iam"})(internalMonitor, desired, &tree.Tree{})
if err != nil {
internalMonitor.Error(err)
return res, err
}
ensure, err := query(r.ClientInt, map[string]interface{}{})
if err != nil {
internalMonitor.Error(err)
return res, err
}
if err := ensure(r.ClientInt); err != nil {
internalMonitor.Error(err)
return res, err
}
return res, nil
}
func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&v1.Zitadel{}).
Complete(r)
}

View File

@@ -1,11 +1,12 @@
package start
package crtlgitops
import (
"context"
"time"
"github.com/caos/zitadel/operator/database"
orbdb "github.com/caos/zitadel/operator/database/kinds/orb"
"github.com/caos/zitadel/operator/zitadel"
"time"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/git"
@@ -16,7 +17,7 @@ import (
kubernetes2 "github.com/caos/zitadel/pkg/kubernetes"
)
func Operator(monitor mntr.Monitor, orbConfigPath string, k8sClient *kubernetes.Client, version *string) error {
func Operator(monitor mntr.Monitor, orbConfigPath string, k8sClient *kubernetes.Client, version *string, gitops bool) error {
takeoffChan := make(chan struct{})
go func() {
takeoffChan <- struct{}{}
@@ -35,7 +36,7 @@ func Operator(monitor mntr.Monitor, orbConfigPath string, k8sClient *kubernetes.
return err
}
takeoff := zitadel.Takeoff(monitor, gitClient, orb.AdaptFunc(orbConfig, "ensure", version, []string{"operator", "iam"}), k8sClient)
takeoff := zitadel.Takeoff(monitor, gitClient, orb.AdaptFunc(orbConfig, "ensure", version, gitops, []string{"operator", "iam"}), k8sClient)
go func() {
started := time.Now()
@@ -53,7 +54,15 @@ func Operator(monitor mntr.Monitor, orbConfigPath string, k8sClient *kubernetes.
return nil
}
func Restore(monitor mntr.Monitor, gitClient *git.Client, orbCfg *orbconfig.Orb, k8sClient *kubernetes.Client, backup string, version *string) error {
func Restore(
monitor mntr.Monitor,
gitClient *git.Client,
orbCfg *orbconfig.Orb,
k8sClient *kubernetes.Client,
backup string,
gitops bool,
version *string,
) error {
databasesList := []string{
"notification",
"adminapi",
@@ -67,7 +76,7 @@ func Restore(monitor mntr.Monitor, gitClient *git.Client, orbCfg *orbconfig.Orb,
return err
}
if err := zitadel.Takeoff(monitor, gitClient, orb.AdaptFunc(orbCfg, "scaledown", version, []string{"scaledown"}), k8sClient)(); err != nil {
if err := zitadel.Takeoff(monitor, gitClient, orb.AdaptFunc(orbCfg, "scaledown", version, gitops, []string{"scaledown"}), k8sClient)(); err != nil {
return err
}
@@ -75,7 +84,7 @@ func Restore(monitor mntr.Monitor, gitClient *git.Client, orbCfg *orbconfig.Orb,
return err
}
if err := zitadel.Takeoff(monitor, gitClient, orb.AdaptFunc(orbCfg, "migration", version, []string{"migration"}), k8sClient)(); err != nil {
if err := zitadel.Takeoff(monitor, gitClient, orb.AdaptFunc(orbCfg, "migration", version, gitops, []string{"migration"}), k8sClient)(); err != nil {
return err
}
@@ -89,7 +98,7 @@ func Restore(monitor mntr.Monitor, gitClient *git.Client, orbCfg *orbconfig.Orb,
return err
}
if err := zitadel.Takeoff(monitor, gitClient, orb.AdaptFunc(orbCfg, "scaleup", version, []string{"scaleup"}), k8sClient)(); err != nil {
if err := zitadel.Takeoff(monitor, gitClient, orb.AdaptFunc(orbCfg, "scaleup", version, gitops, []string{"scaleup"}), k8sClient)(); err != nil {
return err
}
@@ -100,7 +109,7 @@ func Restore(monitor mntr.Monitor, gitClient *git.Client, orbCfg *orbconfig.Orb,
return nil
}
func Database(monitor mntr.Monitor, orbConfigPath string, k8sClient *kubernetes.Client, binaryVersion *string) error {
func Database(monitor mntr.Monitor, orbConfigPath string, k8sClient *kubernetes.Client, binaryVersion *string, gitops bool) error {
takeoffChan := make(chan struct{})
go func() {
takeoffChan <- struct{}{}
@@ -119,7 +128,7 @@ func Database(monitor mntr.Monitor, orbConfigPath string, k8sClient *kubernetes.
return err
}
takeoff := database.Takeoff(monitor, gitClient, orbdb.AdaptFunc("", binaryVersion, "database", "backup"), k8sClient)
takeoff := database.Takeoff(monitor, gitClient, orbdb.AdaptFunc("", binaryVersion, gitops, "operator", "database", "backup"), k8sClient)
go func() {
started := time.Now()
@@ -150,6 +159,6 @@ func Backup(monitor mntr.Monitor, orbConfigPath string, k8sClient *kubernetes.Cl
return err
}
database.Takeoff(monitor, gitClient, orbdb.AdaptFunc(backup, binaryVersion, "instantbackup"), k8sClient)()
database.Takeoff(monitor, gitClient, orbdb.AdaptFunc(backup, binaryVersion, false, "instantbackup"), k8sClient)()
return nil
}

View File

@@ -28,6 +28,8 @@ func GetQueryAndDestroyFuncs(
operator.QueryFunc,
operator.DestroyFunc,
map[string]*secret.Secret,
map[string]*secret.Existing,
bool,
error,
) {
switch desiredTree.Common.Kind {
@@ -50,7 +52,7 @@ func GetQueryAndDestroyFuncs(
features,
)(monitor, desiredTree, currentTree)
default:
return nil, nil, nil, errors.Errorf("unknown database kind %s", desiredTree.Common.Kind)
return nil, nil, nil, nil, false, errors.Errorf("unknown database kind %s", desiredTree.Common.Kind)
}
}

View File

@@ -2,6 +2,7 @@ package bucket
import (
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/helper"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/secret"
"github.com/caos/orbos/pkg/labels"
@@ -32,28 +33,36 @@ func AdaptFunc(
version string,
features []string,
) operator.AdaptFunc {
return func(monitor mntr.Monitor, desired *tree.Tree, current *tree.Tree) (queryFunc operator.QueryFunc, destroyFunc operator.DestroyFunc, secrets map[string]*secretpkg.Secret, err error) {
return func(
monitor mntr.Monitor,
desired *tree.Tree,
current *tree.Tree,
) (
operator.QueryFunc,
operator.DestroyFunc,
map[string]*secretpkg.Secret,
map[string]*secretpkg.Existing,
bool,
error,
) {
internalMonitor := monitor.WithField("component", "backup")
desiredKind, err := ParseDesiredV0(desired)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "parsing desired state failed")
return nil, nil, nil, nil, false, errors.Wrap(err, "parsing desired state failed")
}
desired.Parsed = desiredKind
secrets, existing := getSecretsMap(desiredKind)
if !monitor.IsVerbose() && desiredKind.Spec.Verbose {
internalMonitor.Verbose()
}
destroyS, err := secret.AdaptFuncToDestroy(namespace, secretName)
if err != nil {
return nil, nil, nil, err
}
queryS, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, secretName), map[string]string{secretKey: desiredKind.Spec.ServiceAccountJSON.Value})
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
_, destroyB, err := backup.AdaptFunc(
@@ -74,7 +83,7 @@ func AdaptFunc(
version,
)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
_, destroyR, err := restore.AdaptFunc(
@@ -93,7 +102,7 @@ func AdaptFunc(
version,
)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
_, destroyC, err := clean.AdaptFunc(
@@ -110,7 +119,7 @@ func AdaptFunc(
version,
)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
destroyers := make([]operator.DestroyFunc, 0)
@@ -133,6 +142,11 @@ func AdaptFunc(
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
if err := desiredKind.validateSecrets(); err != nil {
return nil, err
}
currentDB, err := coreDB.ParseQueriedForDatabase(queried)
if err != nil {
return nil, err
@@ -143,6 +157,16 @@ func AdaptFunc(
databases = []string{}
}
value, err := helper.GetSecretValue(k8sClient, desiredKind.Spec.ServiceAccountJSON, desiredKind.Spec.ExistingServiceAccountJSON)
if err != nil {
return nil, err
}
queryS, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, secretName), map[string]string{secretKey: value})
if err != nil {
return nil, err
}
queryB, _, err := backup.AdaptFunc(
internalMonitor,
name,
@@ -201,30 +225,53 @@ func AdaptFunc(
}
queriers := make([]operator.QueryFunc, 0)
cleanupQueries := make([]operator.QueryFunc, 0)
if databases != nil && len(databases) != 0 {
for _, feature := range features {
switch feature {
case backup.Normal, backup.Instant:
case backup.Normal:
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryS),
queryB,
)
case backup.Instant:
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryS),
queryB,
)
cleanupQueries = append(cleanupQueries,
operator.EnsureFuncToQueryFunc(backup.GetCleanupFunc(monitor, namespace, name)),
)
case clean.Instant:
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryS),
queryC,
)
cleanupQueries = append(cleanupQueries,
operator.EnsureFuncToQueryFunc(clean.GetCleanupFunc(monitor, namespace, name)),
)
case restore.Instant:
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryS),
queryR,
)
cleanupQueries = append(cleanupQueries,
operator.EnsureFuncToQueryFunc(restore.GetCleanupFunc(monitor, namespace, name)),
)
}
}
}
for _, cleanup := range cleanupQueries {
queriers = append(queriers, cleanup)
}
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
},
operator.DestroyersToDestroyFunc(internalMonitor, destroyers),
getSecretsMap(desiredKind),
secrets,
existing,
false,
nil
}
}

View File

@@ -1,6 +1,8 @@
package bucket
import (
"testing"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
@@ -13,7 +15,6 @@ import (
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
"testing"
)
func TestBucket_Secrets(t *testing.T) {
@@ -60,7 +61,7 @@ func TestBucket_Secrets(t *testing.T) {
"serviceaccountjson": saJson,
}
_, _, secrets, err := AdaptFunc(
_, _, secrets, existing, _, err := AdaptFunc(
backupName,
namespace,
componentLabels,
@@ -78,6 +79,7 @@ func TestBucket_Secrets(t *testing.T) {
assert.NoError(t, err)
for key, value := range allSecrets {
assert.Contains(t, secrets, key)
assert.Contains(t, existing, key)
assert.Equal(t, value, secrets[key].Value)
}
}
@@ -131,7 +133,7 @@ func TestBucket_AdaptBackup(t *testing.T) {
SetBackup(client, namespace, k8sLabels, saJson)
query, _, _, err := AdaptFunc(
query, _, _, _, _, err := AdaptFunc(
backupName,
namespace,
componentLabels,
@@ -205,7 +207,7 @@ func TestBucket_AdaptInstantBackup(t *testing.T) {
SetInstantBackup(client, namespace, backupName, k8sLabels, saJson)
query, _, _, err := AdaptFunc(
query, _, _, _, _, err := AdaptFunc(
backupName,
namespace,
componentLabels,
@@ -280,7 +282,7 @@ func TestBucket_AdaptRestore(t *testing.T) {
SetRestore(client, namespace, backupName, k8sLabels, saJson)
query, _, _, err := AdaptFunc(
query, _, _, _, _, err := AdaptFunc(
backupName,
namespace,
componentLabels,
@@ -316,6 +318,15 @@ func TestBucket_AdaptClean(t *testing.T) {
namespace := "testNs"
componentLabels := labels.MustForComponent(labels.MustForAPI(labels.MustForOperator("testProd", "testOp", "testVersion"), "BucketBackup", "v0"), "testComponent")
k8sLabels := map[string]string{
"app.kubernetes.io/component": "testComponent",
"app.kubernetes.io/managed-by": "testOp",
"app.kubernetes.io/name": "backup-serviceaccountjson",
"app.kubernetes.io/part-of": "testProd",
"app.kubernetes.io/version": "testVersion",
"caos.ch/apiversion": "v0",
"caos.ch/kind": "BucketBackup",
}
timestamp := "test"
nodeselector := map[string]string{"test": "test"}
@@ -344,9 +355,9 @@ func TestBucket_AdaptClean(t *testing.T) {
return nil
}
SetClean(client, namespace, backupName)
SetClean(client, namespace, backupName, k8sLabels, saJson)
query, _, _, err := AdaptFunc(
query, _, _, _, _, err := AdaptFunc(
backupName,
namespace,
componentLabels,

View File

@@ -1,9 +1,10 @@
package backup
import (
"github.com/caos/zitadel/operator"
"time"
"github.com/caos/zitadel/operator"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/cronjob"
@@ -13,18 +14,18 @@ import (
)
const (
defaultMode int32 = 256
certPath = "/cockroach/cockroach-certs"
secretPath = "/secrets/sa.json"
backupPath = "/cockroach"
backupNameEnv = "BACKUP_NAME"
cronJobNamePrefix = "backup-"
internalSecretName = "client-certs"
image = "ghcr.io/caos/zitadel-crbackup"
rootSecretName = "cockroachdb.client.root"
timeout time.Duration = 60
Normal = "backup"
Instant = "instantbackup"
defaultMode int32 = 256
certPath = "/cockroach/cockroach-certs"
secretPath = "/secrets/sa.json"
backupPath = "/cockroach"
backupNameEnv = "BACKUP_NAME"
cronJobNamePrefix = "backup-"
internalSecretName = "client-certs"
image = "ghcr.io/caos/zitadel-crbackup"
rootSecretName = "cockroachdb.client.root"
timeout = 15 * time.Minute
Normal = "backup"
Instant = "instantbackup"
)
func AdaptFunc(
@@ -119,7 +120,6 @@ func AdaptFunc(
queriers = append(queriers,
operator.EnsureFuncToQueryFunc(checkDBReady),
operator.ResourceQueryToZitadelQuery(queryJ),
operator.EnsureFuncToQueryFunc(getCleanupFunc(monitor, jobDef.Namespace, jobDef.Name)),
)
}
}

View File

@@ -1,6 +1,8 @@
package backup
import (
"testing"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
@@ -10,7 +12,6 @@ import (
corev1 "k8s.io/api/core/v1"
macherrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"testing"
)
func TestBackup_AdaptInstantBackup1(t *testing.T) {
@@ -60,8 +61,6 @@ func TestBackup_AdaptInstantBackup1(t *testing.T) {
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
client.EXPECT().WaitUntilJobCompleted(jobDef.Namespace, jobDef.Name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil)
query, _, err := AdaptFunc(
monitor,
@@ -134,8 +133,6 @@ func TestBackup_AdaptInstantBackup2(t *testing.T) {
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
client.EXPECT().WaitUntilJobCompleted(jobDef.Namespace, jobDef.Name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil)
query, _, err := AdaptFunc(
monitor,

View File

@@ -7,15 +7,19 @@ import (
"github.com/pkg/errors"
)
func getCleanupFunc(monitor mntr.Monitor, namespace string, name string) operator.EnsureFunc {
func GetCleanupFunc(
monitor mntr.Monitor,
namespace string,
backupName string,
) operator.EnsureFunc {
return func(k8sClient kubernetes.ClientInt) error {
monitor.Info("waiting for backup to be completed")
if err := k8sClient.WaitUntilJobCompleted(namespace, name, timeout); err != nil {
if err := k8sClient.WaitUntilJobCompleted(namespace, GetJobName(backupName), timeout); err != nil {
monitor.Error(errors.Wrap(err, "error while waiting for backup to be completed"))
return err
}
monitor.Info("backup is completed, cleanup")
if err := k8sClient.DeleteJob(namespace, name); err != nil {
if err := k8sClient.DeleteJob(namespace, GetJobName(backupName)); err != nil {
monitor.Error(errors.Wrap(err, "error while trying to cleanup backup"))
return err
}

View File

@@ -1,12 +1,13 @@
package backup
import (
"testing"
"github.com/caos/orbos/mntr"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/golang/mock/gomock"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"testing"
)
func TestBackup_Cleanup1(t *testing.T) {
@@ -15,12 +16,12 @@ func TestBackup_Cleanup1(t *testing.T) {
name := "test"
namespace := "testNs"
cleanupFunc := getCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, name).Times(1)
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
assert.NoError(t, cleanupFunc(client))
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(errors.New("fail"))
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
assert.Error(t, cleanupFunc(client))
}
@@ -30,11 +31,11 @@ func TestBackup_Cleanup2(t *testing.T) {
name := "test2"
namespace := "testNs2"
cleanupFunc := getCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, name).Times(1)
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
assert.NoError(t, cleanupFunc(client))
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(errors.New("fail"))
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
assert.Error(t, cleanupFunc(client))
}

View File

@@ -1,9 +1,10 @@
package clean
import (
"github.com/caos/zitadel/operator"
"time"
"github.com/caos/zitadel/operator"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/job"
@@ -12,16 +13,16 @@ import (
)
const (
Instant = "clean"
defaultMode = int32(256)
certPath = "/cockroach/cockroach-certs"
secretPath = "/secrets/sa.json"
internalSecretName = "client-certs"
image = "ghcr.io/caos/zitadel-crbackup"
rootSecretName = "cockroachdb.client.root"
jobPrefix = "backup-"
jobSuffix = "-clean"
timeout time.Duration = 60
Instant = "clean"
defaultMode = int32(256)
certPath = "/cockroach/cockroach-certs"
secretPath = "/secrets/sa.json"
internalSecretName = "client-certs"
image = "ghcr.io/caos/zitadel-crbackup"
rootSecretName = "cockroachdb.client.root"
jobPrefix = "backup-"
jobSuffix = "-clean"
timeout = 60 * time.Second
)
func AdaptFunc(
@@ -71,7 +72,6 @@ func AdaptFunc(
queriers := []operator.QueryFunc{
operator.EnsureFuncToQueryFunc(checkDBReady),
operator.ResourceQueryToZitadelQuery(queryJ),
operator.EnsureFuncToQueryFunc(getCleanupFunc(monitor, jobDef.Namespace, jobDef.Name)),
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {

View File

@@ -1,6 +1,8 @@
package clean
import (
"testing"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
@@ -10,7 +12,6 @@ import (
corev1 "k8s.io/api/core/v1"
macherrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"testing"
)
func TestBackup_Adapt1(t *testing.T) {
@@ -49,8 +50,6 @@ func TestBackup_Adapt1(t *testing.T) {
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
client.EXPECT().WaitUntilJobCompleted(jobDef.Namespace, jobDef.Name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil)
query, _, err := AdaptFunc(
monitor,
@@ -109,8 +108,6 @@ func TestBackup_Adapt2(t *testing.T) {
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
client.EXPECT().WaitUntilJobCompleted(jobDef.Namespace, jobDef.Name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil)
query, _, err := AdaptFunc(
monitor,

View File

@@ -7,19 +7,19 @@ import (
"github.com/pkg/errors"
)
func getCleanupFunc(
func GetCleanupFunc(
monitor mntr.Monitor,
namespace string,
jobName string,
backupName string,
) operator.EnsureFunc {
return func(k8sClient kubernetes.ClientInt) error {
monitor.Info("waiting for clean to be completed")
if err := k8sClient.WaitUntilJobCompleted(namespace, jobName, 60); err != nil {
if err := k8sClient.WaitUntilJobCompleted(namespace, GetJobName(backupName), timeout); err != nil {
monitor.Error(errors.Wrap(err, "error while waiting for clean to be completed"))
return err
}
monitor.Info("clean is completed, cleanup")
if err := k8sClient.DeleteJob(namespace, jobName); err != nil {
if err := k8sClient.DeleteJob(namespace, GetJobName(backupName)); err != nil {
monitor.Error(errors.Wrap(err, "error while trying to cleanup clean"))
return err
}

View File

@@ -15,12 +15,12 @@ func TestBackup_Cleanup1(t *testing.T) {
name := "test"
namespace := "testNs"
cleanupFunc := getCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, name).Times(1)
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
assert.NoError(t, cleanupFunc(client))
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(errors.New("fail"))
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
assert.Error(t, cleanupFunc(client))
}
@@ -30,11 +30,11 @@ func TestBackup_Cleanup2(t *testing.T) {
name := "test2"
namespace := "testNs2"
cleanupFunc := getCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, name).Times(1)
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
assert.NoError(t, cleanupFunc(client))
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(errors.New("fail"))
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
assert.Error(t, cleanupFunc(client))
}

View File

@@ -1,7 +1,9 @@
package bucket
import (
secret2 "github.com/caos/orbos/pkg/secret"
"fmt"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
"github.com/pkg/errors"
)
@@ -12,14 +14,15 @@ type DesiredV0 struct {
}
type Spec struct {
Verbose bool
Cron string `yaml:"cron,omitempty"`
Bucket string `yaml:"bucket,omitempty"`
ServiceAccountJSON *secret2.Secret `yaml:"serviceAccountJSON,omitempty"`
Verbose bool
Cron string `yaml:"cron,omitempty"`
Bucket string `yaml:"bucket,omitempty"`
ServiceAccountJSON *secret.Secret `yaml:"serviceAccountJSON,omitempty"`
ExistingServiceAccountJSON *secret.Existing `yaml:"existingServiceAccountJSON,omitempty"`
}
func (s *Spec) IsZero() bool {
if (s.ServiceAccountJSON == nil || s.ServiceAccountJSON.IsZero()) &&
if ((s.ServiceAccountJSON == nil || s.ServiceAccountJSON.IsZero()) && (s.ExistingServiceAccountJSON == nil || s.ExistingServiceAccountJSON.IsZero())) &&
!s.Verbose &&
s.Cron == "" &&
s.Bucket == "" {
@@ -40,3 +43,10 @@ func ParseDesiredV0(desiredTree *tree.Tree) (*DesiredV0, error) {
return desiredKind, nil
}
func (d *DesiredV0) validateSecrets() error {
if err := secret.ValidateSecret(d.Spec.ServiceAccountJSON, d.Spec.ExistingServiceAccountJSON); err != nil {
return fmt.Errorf("validating api key failed: %w", err)
}
return nil
}

View File

@@ -65,7 +65,18 @@ func SetClean(
k8sClient *kubernetesmock.MockClientInt,
namespace string,
backupName string,
labels map[string]string,
saJson string,
) {
k8sClient.EXPECT().ApplySecret(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: namespace,
Labels: labels,
},
StringData: map[string]string{secretKey: saJson},
Type: "Opaque",
}).Times(1).Return(nil)
k8sClient.EXPECT().ApplyJob(gomock.Any()).Times(1).Return(nil)
k8sClient.EXPECT().GetJob(namespace, clean.GetJobName(backupName)).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, clean.GetJobName(backupName)))

View File

@@ -1,9 +1,10 @@
package restore
import (
"github.com/caos/zitadel/operator"
"time"
"github.com/caos/zitadel/operator"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/job"
@@ -12,16 +13,16 @@ import (
)
const (
Instant = "restore"
defaultMode = int32(256)
certPath = "/cockroach/cockroach-certs"
secretPath = "/secrets/sa.json"
jobPrefix = "backup-"
jobSuffix = "-restore"
image = "ghcr.io/caos/zitadel-crbackup"
internalSecretName = "client-certs"
rootSecretName = "cockroachdb.client.root"
timeout time.Duration = 60
Instant = "restore"
defaultMode = int32(256)
certPath = "/cockroach/cockroach-certs"
secretPath = "/secrets/sa.json"
jobPrefix = "backup-"
jobSuffix = "-restore"
image = "ghcr.io/caos/zitadel-crbackup"
internalSecretName = "client-certs"
rootSecretName = "cockroachdb.client.root"
timeout = 15 * time.Minute
)
func AdaptFunc(
@@ -79,7 +80,6 @@ func AdaptFunc(
queriers := []operator.QueryFunc{
operator.EnsureFuncToQueryFunc(checkDBReady),
operator.ResourceQueryToZitadelQuery(queryJ),
operator.EnsureFuncToQueryFunc(getCleanupFunc(monitor, jobdef.Namespace, jobdef.Name)),
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {

View File

@@ -1,6 +1,8 @@
package restore
import (
"testing"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
@@ -10,7 +12,6 @@ import (
corev1 "k8s.io/api/core/v1"
macherrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"testing"
)
func TestBackup_Adapt1(t *testing.T) {
@@ -54,8 +55,6 @@ func TestBackup_Adapt1(t *testing.T) {
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
client.EXPECT().WaitUntilJobCompleted(jobDef.Namespace, jobDef.Name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil)
query, _, err := AdaptFunc(
monitor,
@@ -121,8 +120,6 @@ func TestBackup_Adapt2(t *testing.T) {
client.EXPECT().ApplyJob(jobDef).Times(1).Return(nil)
client.EXPECT().GetJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobName))
client.EXPECT().WaitUntilJobCompleted(jobDef.Namespace, jobDef.Name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(jobDef.Namespace, jobDef.Name).Times(1).Return(nil)
query, _, err := AdaptFunc(
monitor,

View File

@@ -7,15 +7,19 @@ import (
"github.com/pkg/errors"
)
func getCleanupFunc(monitor mntr.Monitor, namespace, jobName string) operator.EnsureFunc {
func GetCleanupFunc(
monitor mntr.Monitor,
namespace,
backupName string,
) operator.EnsureFunc {
return func(k8sClient kubernetes.ClientInt) error {
monitor.Info("waiting for restore to be completed")
if err := k8sClient.WaitUntilJobCompleted(namespace, jobName, timeout); err != nil {
if err := k8sClient.WaitUntilJobCompleted(namespace, GetJobName(backupName), timeout); err != nil {
monitor.Error(errors.Wrap(err, "error while waiting for restore to be completed"))
return err
}
monitor.Info("restore is completed, cleanup")
if err := k8sClient.DeleteJob(namespace, jobName); err != nil {
if err := k8sClient.DeleteJob(namespace, GetJobName(backupName)); err != nil {
monitor.Error(errors.Wrap(err, "error while trying to cleanup restore"))
return err
}

View File

@@ -15,12 +15,12 @@ func TestBackup_Cleanup1(t *testing.T) {
name := "test"
namespace := "testNs"
cleanupFunc := getCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, name).Times(1)
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
assert.NoError(t, cleanupFunc(client))
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(errors.New("fail"))
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
assert.Error(t, cleanupFunc(client))
}
@@ -30,11 +30,11 @@ func TestBackup_Cleanup2(t *testing.T) {
name := "test2"
namespace := "testNs2"
cleanupFunc := getCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, name).Times(1)
cleanupFunc := GetCleanupFunc(monitor, namespace, name)
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(nil)
client.EXPECT().DeleteJob(namespace, GetJobName(name)).Times(1)
assert.NoError(t, cleanupFunc(client))
client.EXPECT().WaitUntilJobCompleted(namespace, name, timeout).Times(1).Return(errors.New("fail"))
client.EXPECT().WaitUntilJobCompleted(namespace, GetJobName(name), timeout).Times(1).Return(errors.New("fail"))
assert.Error(t, cleanupFunc(client))
}

View File

@@ -4,8 +4,12 @@ import (
"github.com/caos/orbos/pkg/secret"
)
func getSecretsMap(desiredKind *DesiredV0) map[string]*secret.Secret {
secrets := make(map[string]*secret.Secret, 0)
func getSecretsMap(desiredKind *DesiredV0) (map[string]*secret.Secret, map[string]*secret.Existing) {
var (
secrets = make(map[string]*secret.Secret, 0)
existing = make(map[string]*secret.Existing, 0)
)
if desiredKind.Spec == nil {
desiredKind.Spec = &Spec{}
}
@@ -13,7 +17,14 @@ func getSecretsMap(desiredKind *DesiredV0) map[string]*secret.Secret {
if desiredKind.Spec.ServiceAccountJSON == nil {
desiredKind.Spec.ServiceAccountJSON = &secret.Secret{}
}
secrets["serviceaccountjson"] = desiredKind.Spec.ServiceAccountJSON
return secrets
if desiredKind.Spec.ExistingServiceAccountJSON == nil {
desiredKind.Spec.ExistingServiceAccountJSON = &secret.Existing{}
}
sakey := "serviceaccountjson"
secrets[sakey] = desiredKind.Spec.ServiceAccountJSON
existing[sakey] = desiredKind.Spec.ExistingServiceAccountJSON
return secrets, existing
}

View File

@@ -1,22 +1,26 @@
package bucket
import (
"testing"
"github.com/caos/orbos/pkg/secret"
"github.com/stretchr/testify/assert"
"testing"
)
func TestBucket_getSecretsFull(t *testing.T) {
secrets := getSecretsMap(&desired)
secrets, existing := getSecretsMap(&desired)
assert.Equal(t, desired.Spec.ServiceAccountJSON, secrets["serviceaccountjson"])
assert.Equal(t, desired.Spec.ExistingServiceAccountJSON, existing["serviceaccountjson"])
}
func TestBucket_getSecretsEmpty(t *testing.T) {
secrets := getSecretsMap(&desiredWithoutSecret)
secrets, existing := getSecretsMap(&desiredWithoutSecret)
assert.Equal(t, &secret.Secret{}, secrets["serviceaccountjson"])
assert.Equal(t, &secret.Existing{}, existing["serviceaccountjson"])
}
func TestBucket_getSecretsNil(t *testing.T) {
secrets := getSecretsMap(&desiredNil)
secrets, existing := getSecretsMap(&desiredNil)
assert.Equal(t, &secret.Secret{}, secrets["serviceaccountjson"])
assert.Equal(t, &secret.Existing{}, existing["serviceaccountjson"])
}

View File

@@ -35,6 +35,8 @@ func GetQueryAndDestroyFuncs(
query operator.QueryFunc,
destroy operator.DestroyFunc,
secrets map[string]*secret.Secret,
existing map[string]*secret.Existing,
migrate bool,
err error,
) {
componentLabels := labels.MustForComponent(apiLabels, component)
@@ -46,7 +48,7 @@ func GetQueryAndDestroyFuncs(
case "databases.caos.ch/ProvidedDatabase":
return provided.AdaptFunc()(internalMonitor, desiredTree, currentTree)
default:
return nil, nil, nil, errors.Errorf("unknown database kind %s", desiredTree.Common.Kind)
return nil, nil, nil, nil, false, errors.Errorf("unknown database kind %s", desiredTree.Common.Kind)
}
}

View File

@@ -1,10 +1,11 @@
package managed
import (
"github.com/caos/zitadel/operator"
"strconv"
"strings"
"github.com/caos/zitadel/operator"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/orbos/pkg/secret"
@@ -51,6 +52,8 @@ func AdaptFunc(
operator.QueryFunc,
operator.DestroyFunc,
map[string]*secret.Secret,
map[string]*secret.Existing,
bool,
error,
) {
@@ -62,14 +65,21 @@ func AdaptFunc(
operator.QueryFunc,
operator.DestroyFunc,
map[string]*secret.Secret,
map[string]*secret.Existing,
bool,
error,
) {
internalMonitor := monitor.WithField("kind", "cockroachdb")
allSecrets := map[string]*secret.Secret{}
var (
internalMonitor = monitor.WithField("kind", "cockroachdb")
allSecrets = make(map[string]*secret.Secret)
allExisting = make(map[string]*secret.Existing)
migrate bool
)
desiredKind, err := parseDesiredV0(desired)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "parsing desired state failed")
return nil, nil, nil, nil, false, errors.Wrap(err, "parsing desired state failed")
}
desired.Parsed = desiredKind
@@ -92,15 +102,15 @@ func AdaptFunc(
queryCert, destroyCert, addUser, deleteUser, listUsers, err := certificate.AdaptFunc(internalMonitor, namespace, componentLabels, desiredKind.Spec.ClusterDns, isFeatureDatabase)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
addRoot, err := addUser("root")
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
destroyRoot, err := deleteUser("root")
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
queryRBAC, destroyRBAC, err := rbac.AdaptFunc(internalMonitor, namespace, labels.MustForName(componentLabels, serviceAccountName))
@@ -126,7 +136,7 @@ func AdaptFunc(
desiredKind.Spec.Resources,
)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
queryS, destroyS, err := services.AdaptFunc(
@@ -147,12 +157,12 @@ func AdaptFunc(
queryPDB, err := pdb.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, pdbName), cockroachSelector, "1")
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
destroyPDB, err := pdb.AdaptFuncToDestroy(namespace, pdbName)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
currentDB := &Current{
@@ -203,7 +213,7 @@ func AdaptFunc(
for backupName, desiredBackup := range desiredKind.Spec.Backups {
currentBackup := &tree.Tree{}
if timestamp == "" || !oneBackup || (timestamp != "" && strings.HasPrefix(timestamp, backupName)) {
queryB, destroyB, secrets, err := backups.GetQueryAndDestroyFuncs(
queryB, destroyB, secrets, existing, migrateB, err := backups.GetQueryAndDestroyFuncs(
internalMonitor,
desiredBackup,
currentBackup,
@@ -218,10 +228,12 @@ func AdaptFunc(
features,
)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, false, err
}
secret.AppendSecrets(backupName, allSecrets, secrets)
migrate = migrate || migrateB
secret.AppendSecrets(backupName, allSecrets, secrets, allExisting, existing)
destroyers = append(destroyers, destroyB)
queriers = append(queriers, queryB)
}
@@ -251,6 +263,8 @@ func AdaptFunc(
},
operator.DestroyersToDestroyFunc(internalMonitor, destroyers),
allSecrets,
allExisting,
migrate,
nil
}
}

View File

@@ -1,6 +1,9 @@
package managed
import (
"testing"
"time"
"github.com/caos/orbos/mntr"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/caos/orbos/pkg/labels"
@@ -13,8 +16,6 @@ import (
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
"testing"
"time"
)
func getTreeWithDBAndBackup(t *testing.T, masterkey string, saJson string, backupName string) *tree.Tree {
@@ -81,9 +82,9 @@ func TestManaged_AdaptBucketBackup(t *testing.T) {
features := []string{backup.Normal}
bucket.SetBackup(k8sClient, namespace, labels, saJson)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60))
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second)
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
assert.NoError(t, err)
databases := []string{"test1", "test2"}
@@ -119,11 +120,11 @@ func TestManaged_AdaptBucketInstantBackup(t *testing.T) {
features := []string{backup.Instant}
bucket.SetInstantBackup(k8sClient, namespace, backupName, labels, saJson)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60))
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second)
desired := getTreeWithDBAndBackup(t, masterkey, saJson, backupName)
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
assert.NoError(t, err)
databases := []string{"test1", "test2"}
@@ -159,12 +160,12 @@ func TestManaged_AdaptBucketCleanAndRestore(t *testing.T) {
features := []string{restore.Instant, clean.Instant}
bucket.SetRestore(k8sClient, namespace, backupName, labels, saJson)
bucket.SetClean(k8sClient, namespace, backupName)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60)).Times(2)
bucket.SetClean(k8sClient, namespace, backupName, labels, saJson)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second).Times(2)
desired := getTreeWithDBAndBackup(t, masterkey, saJson, backupName)
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
assert.NoError(t, err)
databases := []string{"test1", "test2"}

View File

@@ -1,10 +1,11 @@
package managed
import (
"gopkg.in/yaml.v3"
"testing"
"time"
"gopkg.in/yaml.v3"
"github.com/caos/orbos/mntr"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/caos/orbos/pkg/labels"
@@ -114,11 +115,11 @@ func TestManaged_Adapt1(t *testing.T) {
//statefulset
k8sClient.EXPECT().ApplyStatefulSet(gomock.Any(), gomock.Any()).MinTimes(1).MaxTimes(1)
//running for setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, false, time.Duration(60)).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, false, 60*time.Second).MinTimes(1).MaxTimes(1)
//not ready for setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(1)).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 1*time.Second).MinTimes(1).MaxTimes(1)
//ready after setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60)).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second).MinTimes(1).MaxTimes(1)
//client
k8sClient.EXPECT().ListSecrets(namespace, nodeLabels).MinTimes(1).MaxTimes(1).Return(secretList, nil)
dbCurrent.EXPECT().GetCertificate().MinTimes(1).MaxTimes(1).Return(nil)
@@ -132,7 +133,7 @@ func TestManaged_Adapt1(t *testing.T) {
dbCurrent.EXPECT().SetCertificateKey(gomock.Any()).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().ApplySecret(gomock.Any()).MinTimes(1).MaxTimes(1)
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
assert.NoError(t, err)
ensure, err := query(k8sClient, queried)
@@ -226,11 +227,11 @@ func TestManaged_Adapt2(t *testing.T) {
//statefulset
k8sClient.EXPECT().ApplyStatefulSet(gomock.Any(), gomock.Any()).MinTimes(1).MaxTimes(1)
//running for setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, false, time.Duration(60)).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, false, 60*time.Second).MinTimes(1).MaxTimes(1)
//not ready for setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(1)).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 1*time.Second).MinTimes(1).MaxTimes(1)
//ready after setup
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, time.Duration(60)).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().WaitUntilStatefulsetIsReady(namespace, SfsName, true, true, 60*time.Second).MinTimes(1).MaxTimes(1)
//client
k8sClient.EXPECT().ListSecrets(namespace, nodeLabels).MinTimes(1).MaxTimes(1).Return(secretList, nil)
dbCurrent.EXPECT().GetCertificate().MinTimes(1).MaxTimes(1).Return(nil)
@@ -244,7 +245,7 @@ func TestManaged_Adapt2(t *testing.T) {
dbCurrent.EXPECT().SetCertificateKey(gomock.Any()).MinTimes(1).MaxTimes(1)
k8sClient.EXPECT().ApplySecret(gomock.Any()).MinTimes(1).MaxTimes(1)
query, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
query, _, _, _, _, err := AdaptFunc(componentLabels, namespace, timestamp, nodeselector, tolerations, version, features)(monitor, desired, &tree.Tree{})
assert.NoError(t, err)
ensure, err := query(k8sClient, queried)

View File

@@ -2,12 +2,14 @@ package statefulset
import (
"fmt"
"sort"
"strings"
"time"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/zitadel/operator"
"github.com/caos/zitadel/operator/helpers"
"k8s.io/apimachinery/pkg/util/intstr"
"sort"
"strings"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
@@ -216,7 +218,7 @@ func AdaptFunc(
wrapedQuery, wrapedDestroy, err := resources.WrapFuncs(internalMonitor, query, destroy)
checkDBRunning := func(k8sClient kubernetes.ClientInt) error {
internalMonitor.Info("waiting for statefulset to be running")
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, false, 60); err != nil {
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, false, 60*time.Second); err != nil {
internalMonitor.Error(errors.Wrap(err, "error while waiting for statefulset to be running"))
return err
}
@@ -226,7 +228,7 @@ func AdaptFunc(
checkDBNotReady := func(k8sClient kubernetes.ClientInt) error {
internalMonitor.Info("checking for statefulset to not be ready")
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, true, 1); err != nil {
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, true, 1*time.Second); err != nil {
internalMonitor.Info("statefulset is not ready")
return nil
}
@@ -253,7 +255,7 @@ func AdaptFunc(
checkDBReady := func(k8sClient kubernetes.ClientInt) error {
internalMonitor.Info("waiting for statefulset to be ready")
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, true, 60); err != nil {
if err := k8sClient.WaitUntilStatefulsetIsReady(namespace, name, true, true, 60*time.Second); err != nil {
internalMonitor.Error(errors.Wrap(err, "error while waiting for statefulset to be ready"))
return err
}

View File

@@ -17,6 +17,8 @@ func AdaptFunc() func(
operator.QueryFunc,
operator.DestroyFunc,
map[string]*secret.Secret,
map[string]*secret.Existing,
bool,
error,
) {
return func(
@@ -27,11 +29,13 @@ func AdaptFunc() func(
operator.QueryFunc,
operator.DestroyFunc,
map[string]*secret.Secret,
map[string]*secret.Existing,
bool,
error,
) {
desiredKind, err := parseDesiredV0(desired)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "parsing desired state failed")
return nil, nil, nil, nil, false, errors.Wrap(err, "parsing desired state failed")
}
desired.Parsed = desiredKind
@@ -53,7 +57,9 @@ func AdaptFunc() func(
}, func(k8sClient kubernetes.ClientInt) error {
return nil
},
map[string]*secret.Secret{},
make(map[string]*secret.Secret),
make(map[string]*secret.Existing),
false,
nil
}
}

View File

@@ -9,6 +9,9 @@ import (
"github.com/caos/orbos/pkg/tree"
"github.com/caos/orbos/pkg/treelabels"
"github.com/caos/zitadel/operator"
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/backup"
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/clean"
"github.com/caos/zitadel/operator/database/kinds/backups/bucket/restore"
"github.com/caos/zitadel/operator/database/kinds/databases"
"github.com/pkg/errors"
)
@@ -21,18 +24,29 @@ func OperatorSelector() *labels.Selector {
return labels.OpenOperatorSelector("ZITADEL", "database.caos.ch")
}
func AdaptFunc(timestamp string, binaryVersion *string, features ...string) operator.AdaptFunc {
func AdaptFunc(timestamp string, binaryVersion *string, gitops bool, features ...string) operator.AdaptFunc {
return func(monitor mntr.Monitor, orbDesiredTree *tree.Tree, currentTree *tree.Tree) (queryFunc operator.QueryFunc, destroyFunc operator.DestroyFunc, secrets map[string]*secret.Secret, err error) {
return func(
monitor mntr.Monitor,
orbDesiredTree *tree.Tree,
currentTree *tree.Tree,
) (
queryFunc operator.QueryFunc,
destroyFunc operator.DestroyFunc,
secrets map[string]*secret.Secret,
existing map[string]*secret.Existing,
migrate bool,
err error,
) {
defer func() {
err = errors.Wrapf(err, "building %s failed", orbDesiredTree.Common.Kind)
}()
orbMonitor := monitor.WithField("kind", "orb")
desiredKind, err := parseDesiredV0(orbDesiredTree)
desiredKind, err := ParseDesiredV0(orbDesiredTree)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "parsing desired state failed")
return nil, nil, nil, nil, migrate, errors.Wrap(err, "parsing desired state failed")
}
orbDesiredTree.Parsed = desiredKind
currentTree = &tree.Tree{}
@@ -43,18 +57,18 @@ func AdaptFunc(timestamp string, binaryVersion *string, features ...string) oper
queryNS, err := namespace.AdaptFuncToEnsure(NamespaceStr)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, nil, migrate, err
}
destroyNS, err := namespace.AdaptFuncToDestroy(NamespaceStr)
/*destroyNS, err := namespace.AdaptFuncToDestroy(NamespaceStr)
if err != nil {
return nil, nil, nil, err
}
}*/
databaseCurrent := &tree.Tree{}
operatorLabels := mustDatabaseOperator(binaryVersion)
queryDB, destroyDB, secrets, err := databases.GetQueryAndDestroyFuncs(
queryDB, destroyDB, secrets, existing, migrate, err := databases.GetQueryAndDestroyFuncs(
orbMonitor,
desiredKind.Database,
databaseCurrent,
@@ -66,23 +80,28 @@ func AdaptFunc(timestamp string, binaryVersion *string, features ...string) oper
desiredKind.Spec.Version,
features,
)
if err != nil {
return nil, nil, nil, err
}
queriers := []operator.QueryFunc{
operator.ResourceQueryToZitadelQuery(queryNS),
queryDB,
}
if desiredKind.Spec.SelfReconciling {
queriers = append(queriers,
operator.EnsureFuncToQueryFunc(Reconcile(monitor, orbDesiredTree)),
)
return nil, nil, nil, nil, migrate, err
}
destroyers := []operator.DestroyFunc{
operator.ResourceDestroyToZitadelDestroy(destroyNS),
destroyDB,
destroyers := make([]operator.DestroyFunc, 0)
queriers := make([]operator.QueryFunc, 0)
for _, feature := range features {
switch feature {
case "database", backup.Instant, backup.Normal, restore.Instant, clean.Instant:
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryNS),
queryDB,
)
destroyers = append(destroyers,
destroyDB,
)
case "operator":
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryNS),
operator.EnsureFuncToQueryFunc(Reconcile(monitor, desiredKind.Spec)),
)
}
}
currentTree.Parsed = &DesiredV0{
@@ -105,6 +124,8 @@ func AdaptFunc(timestamp string, binaryVersion *string, features ...string) oper
return operator.DestroyersToDestroyFunc(monitor, destroyers)(k8sClient)
},
secrets,
existing,
migrate,
nil
}
}

View File

@@ -9,7 +9,7 @@ import (
func BackupListFunc() func(monitor mntr.Monitor, desiredTree *tree.Tree) (strings []string, err error) {
return func(monitor mntr.Monitor, desiredTree *tree.Tree) (strings []string, err error) {
desiredKind, err := parseDesiredV0(desiredTree)
desiredKind, err := ParseDesiredV0(desiredTree)
if err != nil {
return nil, errors.Wrap(err, "parsing desired state failed")
}

View File

@@ -7,21 +7,25 @@ import (
)
type DesiredV0 struct {
Common *tree.Common `yaml:",inline"`
Spec struct {
Verbose bool
NodeSelector map[string]string `yaml:"nodeSelector,omitempty"`
Tolerations []corev1.Toleration `yaml:"tolerations,omitempty"`
Version string `yaml:"version,omitempty"`
SelfReconciling bool `yaml:"selfReconciling"`
//Use this registry to pull the ZITADEL operator image from
//@default: ghcr.io
CustomImageRegistry string `json:"customImageRegistry,omitempty" yaml:"customImageRegistry,omitempty"`
}
Common *tree.Common `json:",inline" yaml:",inline"`
Spec *Spec `json:"spec" yaml:"spec"`
Database *tree.Tree
}
func parseDesiredV0(desiredTree *tree.Tree) (*DesiredV0, error) {
// +kubebuilder:object:generate=true
type Spec struct {
Verbose bool `json:"verbose" json:"verbose"`
NodeSelector map[string]string `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty"`
Tolerations []corev1.Toleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty"`
Version string `json:"version,omitempty" yaml:"version,omitempty"`
SelfReconciling bool `json:"selfReconciling" yaml:"selfReconciling"`
GitOps bool `json:"gitOps,omitempty" yaml:"gitOps,omitempty"`
//Use this registry to pull the Database operator image from
//@default: ghcr.io
CustomImageRegistry string `json:"customImageRegistry,omitempty" yaml:"customImageRegistry,omitempty"`
}
func ParseDesiredV0(desiredTree *tree.Tree) (*DesiredV0, error) {
desiredKind := &DesiredV0{Common: desiredTree.Common}
if err := desiredTree.Original.Decode(desiredKind); err != nil {

View File

@@ -10,38 +10,38 @@ import (
"github.com/pkg/errors"
)
func Reconcile(monitor mntr.Monitor, desiredTree *tree.Tree) operator.EnsureFunc {
func Reconcile(
monitor mntr.Monitor,
spec *Spec,
) operator.EnsureFunc {
return func(k8sClient kubernetes.ClientInt) (err error) {
defer func() {
err = errors.Wrapf(err, "building %s failed", desiredTree.Common.Kind)
}()
recMonitor := monitor.WithField("version", spec.Version)
desiredKind, err := parseDesiredV0(desiredTree)
if err != nil {
return errors.Wrap(err, "parsing desired state failed")
}
desiredTree.Parsed = desiredKind
recMonitor := monitor.WithField("version", desiredKind.Spec.Version)
if desiredKind.Spec.Version == "" {
err := errors.New("No version set in database.yml")
monitor.Error(err)
if spec.Version == "" {
err := errors.New("No version provided for self-reconciling")
recMonitor.Error(err)
return err
}
imageRegistry := desiredKind.Spec.CustomImageRegistry
imageRegistry := spec.CustomImageRegistry
if imageRegistry == "" {
imageRegistry = "ghcr.io"
}
if err := zitadelKubernetes.EnsureDatabaseArtifacts(monitor, treelabels.MustForAPI(desiredTree, mustDatabaseOperator(&desiredKind.Spec.Version)), k8sClient, desiredKind.Spec.Version, desiredKind.Spec.NodeSelector, desiredKind.Spec.Tolerations, imageRegistry); err != nil {
recMonitor.Error(errors.Wrap(err, "Failed to deploy database-operator into k8s-cluster"))
return err
if spec.SelfReconciling {
desiredTree := &tree.Tree{
Common: &tree.Common{
Kind: "databases.caos.ch/Orb",
Version: "v0",
},
}
if err := zitadelKubernetes.EnsureDatabaseArtifacts(monitor, treelabels.MustForAPI(desiredTree, mustDatabaseOperator(&spec.Version)), k8sClient, spec.Version, spec.NodeSelector, spec.Tolerations, imageRegistry, spec.GitOps); err != nil {
recMonitor.Error(errors.Wrap(err, "Failed to deploy database-operator into k8s-cluster"))
return err
}
recMonitor.Info("Applied database-operator")
}
recMonitor.Info("Applied database-operator")
return nil
}

View File

@@ -0,0 +1,54 @@
// +build !ignore_autogenerated
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package orb
import (
"k8s.io/api/core/v1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Spec) DeepCopyInto(out *Spec) {
*out = *in
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]v1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Spec.
func (in *Spec) DeepCopy() *Spec {
if in == nil {
return nil
}
out := new(Spec)
in.DeepCopyInto(out)
return out
}

View File

@@ -2,6 +2,7 @@ package database
import (
"errors"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/git"
"github.com/caos/orbos/pkg/kubernetes"
@@ -25,7 +26,7 @@ func Takeoff(monitor mntr.Monitor, gitClient *git.Client, adapt operator.AdaptFu
return
}
query, _, _, err := adapt(internalMonitor, treeDesired, treeCurrent)
query, _, _, _, _, err := adapt(internalMonitor, treeDesired, treeCurrent)
if err != nil {
internalMonitor.Error(err)
return

View File

@@ -2,16 +2,22 @@ package secrets
import (
"errors"
orbdb "github.com/caos/zitadel/operator/database/kinds/orb"
"fmt"
"strings"
"github.com/caos/orbos/pkg/kubernetes"
crddb "github.com/caos/zitadel/operator/api/database"
crdzit "github.com/caos/zitadel/operator/api/zitadel"
orbdb "github.com/caos/zitadel/operator/database/kinds/orb"
orbzit "github.com/caos/zitadel/operator/zitadel/kinds/orb"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/git"
"github.com/caos/orbos/pkg/orb"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator/api"
zitadelOrb "github.com/caos/zitadel/operator/zitadel/kinds/orb"
)
const (
@@ -19,79 +25,154 @@ const (
database string = "database"
)
func GetAllSecretsFunc(orb *orb.Orb) func(monitor mntr.Monitor, gitClient *git.Client) (map[string]*secret.Secret, map[string]*tree.Tree, error) {
return func(monitor mntr.Monitor, gitClient *git.Client) (map[string]*secret.Secret, map[string]*tree.Tree, error) {
allSecrets := make(map[string]*secret.Secret, 0)
allTrees := make(map[string]*tree.Tree, 0)
foundZitadel, err := api.ExistsZitadelYml(gitClient)
if err != nil {
return nil, nil, err
}
if foundZitadel {
zitadelYML, err := api.ReadZitadelYml(gitClient)
if err != nil {
return nil, nil, err
}
allTrees[zitadel] = zitadelYML
_, _, zitadelSecrets, err := zitadelOrb.AdaptFunc(orb, "secret", nil, []string{})(monitor, zitadelYML, &tree.Tree{})
if err != nil {
return nil, nil, err
}
if zitadelSecrets != nil && len(zitadelSecrets) > 0 {
secret.AppendSecrets(zitadel, allSecrets, zitadelSecrets)
}
} else {
monitor.Info("no file for zitadel found")
}
foundDB, err := api.ExistsDatabaseYml(gitClient)
if err != nil {
return nil, nil, err
}
if foundDB {
dbYML, err := api.ReadDatabaseYml(gitClient)
if err != nil {
return nil, nil, err
}
allTrees[database] = dbYML
_, _, dbSecrets, err := orbdb.AdaptFunc("", nil, "database", "backup")(monitor, dbYML, nil)
if err != nil {
return nil, nil, err
}
if dbSecrets != nil && len(dbSecrets) > 0 {
secret.AppendSecrets(database, allSecrets, dbSecrets)
}
} else {
monitor.Info("no file for database found")
}
return allSecrets, allTrees, nil
func GetAllSecretsFunc(
monitor mntr.Monitor,
printLogs,
gitops bool,
gitClient *git.Client,
k8sClient kubernetes.ClientInt,
orb *orb.Orb,
) func() (
map[string]*secret.Secret,
map[string]*secret.Existing,
map[string]*tree.Tree,
error,
) {
return func() (
map[string]*secret.Secret,
map[string]*secret.Existing,
map[string]*tree.Tree,
error,
) {
return getAllSecrets(monitor, printLogs, gitops, orb, gitClient, k8sClient)
}
}
func PushFunc() func(monitor mntr.Monitor, gitClient *git.Client, trees map[string]*tree.Tree, path string) error {
return func(monitor mntr.Monitor, gitClient *git.Client, trees map[string]*tree.Tree, path string) error {
operator := ""
if strings.HasPrefix(path, zitadel) {
operator = zitadel
} else if strings.HasPrefix(path, database) {
operator = database
} else {
return errors.New("Operator unknown")
}
func getAllSecrets(
monitor mntr.Monitor,
printLogs,
gitops bool,
orb *orb.Orb,
gitClient *git.Client,
k8sClient kubernetes.ClientInt,
) (
map[string]*secret.Secret,
map[string]*secret.Existing,
map[string]*tree.Tree,
error,
) {
allSecrets := make(map[string]*secret.Secret, 0)
allExisting := make(map[string]*secret.Existing, 0)
allTrees := make(map[string]*tree.Tree, 0)
desired, found := trees[operator]
if !found {
return errors.New("Operator file not found")
}
if err := secret.GetOperatorSecrets(
monitor,
printLogs,
gitops,
allTrees,
allSecrets,
allExisting,
zitadel,
func() (bool, error) { return api.ExistsZitadelYml(gitClient) },
func() (t *tree.Tree, err error) { return api.ReadZitadelYml(gitClient) },
func() (t *tree.Tree, err error) { return crdzit.ReadCrd(k8sClient) },
func(t *tree.Tree) (map[string]*secret.Secret, map[string]*secret.Existing, bool, error) {
_, _, secrets, existing, migrate, err := orbzit.AdaptFunc(orb, "secret", nil, gitops, []string{})(monitor, t, &tree.Tree{})
return secrets, existing, migrate, err
},
); err != nil {
return nil, nil, nil, err
}
if operator == zitadel {
if err := secret.GetOperatorSecrets(
monitor,
printLogs,
gitops,
allTrees,
allSecrets,
allExisting,
database,
func() (bool, error) { return api.ExistsDatabaseYml(gitClient) },
func() (t *tree.Tree, err error) { return api.ReadDatabaseYml(gitClient) },
func() (t *tree.Tree, err error) { return crddb.ReadCrd(k8sClient) },
func(t *tree.Tree) (map[string]*secret.Secret, map[string]*secret.Existing, bool, error) {
_, _, secrets, existing, migrate, err := orbdb.AdaptFunc("", nil, gitops, "database", "backup")(monitor, t, nil)
return secrets, existing, migrate, err
},
); err != nil {
return nil, nil, nil, err
}
if k8sClient == nil {
allExisting = nil
}
if len(allSecrets) == 0 && len(allExisting) == 0 {
return nil, nil, nil, errors.New("couldn't find any secrets")
}
return allSecrets, allExisting, allTrees, nil
}
func PushFunc(
monitor mntr.Monitor,
gitops bool,
gitClient *git.Client,
k8sClient kubernetes.ClientInt,
) func(
trees map[string]*tree.Tree,
path string,
) error {
return func(
trees map[string]*tree.Tree,
path string,
) error {
return push(monitor, gitops, gitClient, k8sClient, trees, path)
}
}
func push(
monitor mntr.Monitor,
gitops bool,
gitClient *git.Client,
k8sClient kubernetes.ClientInt,
trees map[string]*tree.Tree,
path string,
) error {
var (
pushGitFunc func(*tree.Tree) error
applyCRDFunc func(*tree.Tree) error
operator string
)
if strings.HasPrefix(path, zitadel) {
operator = zitadel
pushGitFunc = func(desired *tree.Tree) error {
return api.PushZitadelDesiredFunc(gitClient, desired)(monitor)
} else if operator == database {
}
applyCRDFunc = func(t *tree.Tree) error {
return crdzit.WriteCrd(k8sClient, t)
}
} else if strings.HasPrefix(path, database) {
operator = database
pushGitFunc = func(desired *tree.Tree) error {
return api.PushDatabaseDesiredFunc(gitClient, desired)(monitor)
}
return errors.New("Operator push function unknown")
applyCRDFunc = func(t *tree.Tree) error {
return crddb.WriteCrd(k8sClient, t)
}
} else {
return errors.New("operator unknown")
}
desired, found := trees[operator]
if !found {
return fmt.Errorf("desired state for %s not found", operator)
}
if gitops {
return pushGitFunc(desired)
}
return applyCRDFunc(desired)
}

View File

@@ -3,10 +3,10 @@ package iam
import (
"fmt"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/orbos/pkg/orb"
"github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/database"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator"
@@ -22,7 +22,8 @@ func GetQueryAndDestroyFuncs(
currentTree *tree.Tree,
nodeselector map[string]string,
tolerations []core.Toleration,
orbconfig *orb.Orb,
dbClient database.Client,
namespace string,
action string,
version *string,
features []string,
@@ -30,6 +31,8 @@ func GetQueryAndDestroyFuncs(
query operator.QueryFunc,
destroy operator.DestroyFunc,
secrets map[string]*secret.Secret,
existing map[string]*secret.Existing,
migrate bool,
err error,
) {
@@ -42,8 +45,8 @@ func GetQueryAndDestroyFuncs(
switch desiredTree.Common.Kind {
case "zitadel.caos.ch/ZITADEL":
apiLabels := labels.MustForAPI(operatorLabels, "ZITADEL", desiredTree.Common.Version)
return zitadel.AdaptFunc(apiLabels, nodeselector, tolerations, orbconfig, action, version, features)(monitor, desiredTree, currentTree)
return zitadel.AdaptFunc(apiLabels, nodeselector, tolerations, dbClient, namespace, action, version, features)(monitor, desiredTree, currentTree)
default:
return nil, nil, nil, errors.Errorf("unknown iam kind %s", desiredTree.Common.Kind)
return nil, nil, nil, nil, false, errors.Errorf("unknown iam kind %s", desiredTree.Common.Kind)
}
}

View File

@@ -4,7 +4,6 @@ import (
"strconv"
"github.com/caos/orbos/pkg/labels"
"github.com/caos/orbos/pkg/orb"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/database"
"github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/setup"
@@ -13,7 +12,6 @@ import (
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/namespace"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator"
"github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/ambassador"
@@ -28,7 +26,8 @@ func AdaptFunc(
apiLabels *labels.API,
nodeselector map[string]string,
tolerations []core.Toleration,
orbconfig *orb.Orb,
dbClient database.Client,
namespace string,
action string,
version *string,
features []string,
@@ -41,24 +40,25 @@ func AdaptFunc(
operator.QueryFunc,
operator.DestroyFunc,
map[string]*secret.Secret,
map[string]*secret.Existing,
bool,
error,
) {
allSecrets := make(map[string]*secret.Secret)
internalMonitor := monitor.WithField("kind", "iam")
desiredKind, err := parseDesiredV0(desired)
if err != nil {
return nil, nil, allSecrets, errors.Wrap(err, "parsing desired state failed")
return nil, nil, nil, nil, false, errors.Wrap(err, "parsing desired state failed")
}
desired.Parsed = desiredKind
secret.AppendSecrets("", allSecrets, getSecretsMap(desiredKind))
allSecrets, allExisting := getSecretsMap(desiredKind)
if !monitor.IsVerbose() && desiredKind.Spec.Verbose {
internalMonitor.Verbose()
}
namespaceStr := "caos-zitadel"
// shared elements
cmName := "zitadel-vars"
secretName := "zitadel-secret"
@@ -70,28 +70,12 @@ func AdaptFunc(
secretPath := "/secret"
//services which are kubernetes resources and are used in the ambassador elements
grpcServiceName := "grpc-v1"
var grpcPort uint16 = 80
grpcPort := 80
httpServiceName := "http-v1"
var httpPort uint16 = 80
httpPort := 80
uiServiceName := "ui-v1"
var uiPort uint16 = 80
// labels := getLabels()
users := getAllUsers(desiredKind)
allZitadelUsers := getZitadelUserList()
dbClient, err := database.NewClient(monitor, orbconfig.URL, orbconfig.Repokey)
if err != nil {
return nil, nil, allSecrets, err
}
queryNS, err := namespace.AdaptFuncToEnsure(namespaceStr)
if err != nil {
return nil, nil, allSecrets, err
}
destroyNS, err := namespace.AdaptFuncToDestroy(namespaceStr)
if err != nil {
return nil, nil, allSecrets, err
}
uiPort := 80
usersWithoutPWs := getUserListWithoutPasswords(desiredKind)
zitadelComponent := labels.MustForComponent(apiLabels, "ZITADEL")
zitadelDeploymentName := labels.MustForName(zitadelComponent, "zitadel")
@@ -100,21 +84,21 @@ func AdaptFunc(
internalMonitor,
zitadelComponent,
zitadelPodSelector,
namespaceStr,
namespace,
grpcServiceName,
grpcPort,
uint16(grpcPort),
httpServiceName,
httpPort,
uint16(httpPort),
uiServiceName,
uiPort)
uint16(uiPort))
if err != nil {
return nil, nil, allSecrets, err
return nil, nil, nil, nil, false, err
}
queryC, destroyC, getConfigurationHashes, err := configuration.AdaptFunc(
getQueryC, destroyC, getConfigurationHashes, err := configuration.AdaptFunc(
internalMonitor,
zitadelComponent,
namespaceStr,
namespace,
desiredKind.Spec.Configuration,
cmName,
certPath,
@@ -123,12 +107,11 @@ func AdaptFunc(
consoleCMName,
secretVarsName,
secretPasswordName,
users,
services.GetClientIDFunc(namespaceStr, httpServiceName, httpPort),
dbClient,
services.GetClientIDFunc(namespace, httpServiceName, httpPort),
)
if err != nil {
return nil, nil, allSecrets, err
return nil, nil, nil, nil, false, err
}
queryDB, err := database.AdaptFunc(
@@ -136,28 +119,28 @@ func AdaptFunc(
dbClient,
)
if err != nil {
return nil, nil, allSecrets, err
return nil, nil, nil, nil, false, err
}
queryM, destroyM, err := migration.AdaptFunc(
internalMonitor,
labels.MustForComponent(apiLabels, "database"),
namespaceStr,
namespace,
action,
secretPasswordName,
migrationUser,
allZitadelUsers,
usersWithoutPWs,
nodeselector,
tolerations,
)
if err != nil {
return nil, nil, allSecrets, err
return nil, nil, nil, nil, false, err
}
querySetup, destroySetup, err := setup.AdaptFunc(
getQuerySetup, destroySetup, err := setup.AdaptFunc(
internalMonitor,
zitadelComponent,
namespaceStr,
namespace,
action,
desiredKind.Spec.NodeSelector,
desiredKind.Spec.Tolerations,
@@ -170,11 +153,10 @@ func AdaptFunc(
consoleCMName,
secretVarsName,
secretPasswordName,
allZitadelUsers,
migration.GetDoneFunc(monitor, namespaceStr, action),
configuration.GetReadyFunc(monitor, namespaceStr, secretName, secretVarsName, secretPasswordName, cmName, consoleCMName),
getConfigurationHashes,
)
if err != nil {
return nil, nil, nil, nil, false, err
}
queryD, destroyD, err := deployment.AdaptFunc(
internalMonitor,
@@ -182,7 +164,7 @@ func AdaptFunc(
zitadelPodSelector,
desiredKind.Spec.Force,
version,
namespaceStr,
namespace,
desiredKind.Spec.ReplicaCount,
desiredKind.Spec.Affinity,
cmName,
@@ -192,64 +174,38 @@ func AdaptFunc(
consoleCMName,
secretVarsName,
secretPasswordName,
allZitadelUsers,
desiredKind.Spec.NodeSelector,
desiredKind.Spec.Tolerations,
desiredKind.Spec.Resources,
migration.GetDoneFunc(monitor, namespaceStr, action),
configuration.GetReadyFunc(monitor, namespaceStr, secretName, secretVarsName, secretPasswordName, cmName, consoleCMName),
setup.GetDoneFunc(monitor, namespaceStr, action),
getConfigurationHashes,
migration.GetDoneFunc(monitor, namespace, action),
configuration.GetReadyFunc(monitor, namespace, secretName, secretVarsName, secretPasswordName, cmName, consoleCMName),
setup.GetDoneFunc(monitor, namespace, action),
)
if err != nil {
return nil, nil, allSecrets, err
return nil, nil, nil, nil, false, err
}
queryAmbassador, destroyAmbassador, err := ambassador.AdaptFunc(
internalMonitor,
labels.MustForComponent(apiLabels, "apiGateway"),
namespaceStr,
grpcServiceName+"."+namespaceStr+":"+strconv.Itoa(int(grpcPort)),
"http://"+httpServiceName+"."+namespaceStr+":"+strconv.Itoa(int(httpPort)),
"http://"+uiServiceName+"."+namespaceStr,
namespace,
grpcServiceName+"."+namespace+":"+strconv.Itoa(grpcPort),
"http://"+httpServiceName+"."+namespace+":"+strconv.Itoa(httpPort),
"http://"+uiServiceName+"."+namespace,
desiredKind.Spec.Configuration.DNS,
)
if err != nil {
return nil, nil, allSecrets, err
return nil, nil, nil, nil, false, err
}
destroyers := make([]operator.DestroyFunc, 0)
queriers := make([]operator.QueryFunc, 0)
for _, feature := range features {
switch feature {
case "migration":
queriers = append(queriers,
queryDB,
//configuration
queryC,
//migration
queryM,
//wait until migration is completed
operator.EnsureFuncToQueryFunc(migration.GetDoneFunc(monitor, namespaceStr, action)),
)
destroyers = append(destroyers,
destroyM,
)
case "iam":
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryNS),
queryDB,
//configuration
queryC,
//migration
queryM,
//services
queryS,
querySetup,
queryD,
operator.EnsureFuncToQueryFunc(deployment.GetReadyFunc(monitor, namespaceStr, zitadelDeploymentName)),
queryAmbassador,
)
destroyers = append(destroyers,
destroyAmbassador,
destroyS,
@@ -257,24 +213,85 @@ func AdaptFunc(
destroyD,
destroySetup,
destroyC,
operator.ResourceDestroyToZitadelDestroy(destroyNS),
)
case "scaledown":
queriers = append(queriers,
operator.EnsureFuncToQueryFunc(deployment.GetScaleFunc(monitor, namespaceStr, zitadelDeploymentName)(0)),
)
case "scaleup":
queriers = append(queriers,
operator.EnsureFuncToQueryFunc(deployment.GetScaleFunc(monitor, namespaceStr, zitadelDeploymentName)(desiredKind.Spec.ReplicaCount)),
)
}
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
users, err := getAllUsers(k8sClient, desiredKind)
if err != nil {
return nil, err
}
allZitadelUsers, err := getZitadelUserList(k8sClient, desiredKind)
if err != nil {
return nil, err
}
queryReadyM := operator.EnsureFuncToQueryFunc(migration.GetDoneFunc(monitor, namespace, action))
queryC := getQueryC(users)
queryReadyC := operator.EnsureFuncToQueryFunc(configuration.GetReadyFunc(monitor, namespace, secretName, secretVarsName, secretPasswordName, cmName, consoleCMName))
querySetup := getQuerySetup(allZitadelUsers, getConfigurationHashes)
queryReadySetup := operator.EnsureFuncToQueryFunc(setup.GetDoneFunc(monitor, namespace, action))
queryD := queryD(allZitadelUsers, getConfigurationHashes)
queryReadyD := operator.EnsureFuncToQueryFunc(deployment.GetReadyFunc(monitor, namespace, zitadelDeploymentName))
queriers := make([]operator.QueryFunc, 0)
for _, feature := range features {
switch feature {
case "migration":
queriers = append(queriers,
queryDB,
//configuration
queryC,
queryReadyC,
//migration
queryM,
queryReadyM,
operator.EnsureFuncToQueryFunc(migration.GetCleanupFunc(monitor, namespace, action)),
)
case "iam":
queriers = append(queriers,
queryDB,
//configuration
queryC,
queryReadyC,
//migration
queryM,
queryReadyM,
//services
queryS,
//setup
querySetup,
queryReadySetup,
//deployment
queryD,
queryReadyD,
//handle change if necessary for clientID
queryC,
queryReadyC,
//again apply deployment if config changed
queryD,
queryReadyD,
//apply ambassador crds after zitadel is ready
queryAmbassador,
)
case "scaledown":
queriers = append(queriers,
operator.EnsureFuncToQueryFunc(deployment.GetScaleFunc(monitor, namespace, zitadelDeploymentName)(0)),
)
case "scaleup":
queriers = append(queriers,
operator.EnsureFuncToQueryFunc(deployment.GetScaleFunc(monitor, namespace, zitadelDeploymentName)(desiredKind.Spec.ReplicaCount)),
)
}
}
return operator.QueriersToEnsureFunc(internalMonitor, true, queriers, k8sClient, queried)
},
operator.DestroyersToDestroyFunc(monitor, destroyers),
allSecrets,
allExisting,
false,
nil
}
}

View File

@@ -21,9 +21,9 @@ type ConsoleEnv struct {
}
const (
googleServiceAccountJSONPath = "google-serviceaccount-key.json"
zitadelKeysPath = "zitadel-keys.yaml"
timeout time.Duration = 60
googleServiceAccountJSONPath = "google-serviceaccount-key.json"
zitadelKeysPath = "zitadel-keys.yaml"
timeout = 60 * time.Second
)
func AdaptFunc(
@@ -38,20 +38,21 @@ func AdaptFunc(
consoleCMName string,
secretVarsName string,
secretPasswordName string,
necessaryUsers map[string]string,
dbClient database.Client,
getClientID func() string,
dbClient database.ClientInt,
) (
operator.QueryFunc,
func(
necessaryUsers map[string]string,
) operator.QueryFunc,
operator.DestroyFunc,
func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) map[string]string,
func(
k8sClient kubernetes.ClientInt,
queried map[string]interface{},
necessaryUsers map[string]string,
) (map[string]string, error),
error,
) {
internalMonitor := monitor.WithField("component", "configuration")
literalsSecret := literalsSecret(desired, googleServiceAccountJSONPath, zitadelKeysPath)
literalsSecretVars := literalsSecretVars(desired)
destroyCM, err := configmap.AdaptFuncToDestroy(namespace, cmName)
if err != nil {
return nil, nil, nil, err
@@ -73,7 +74,7 @@ func AdaptFunc(
return nil, nil, nil, err
}
_, destroyUser, err := users.AdaptFunc(internalMonitor, necessaryUsers, dbClient)
_, destroyUser, err := users.AdaptFunc(internalMonitor, dbClient)
if err != nil {
return nil, nil, nil, err
}
@@ -87,71 +88,97 @@ func AdaptFunc(
operator.ResourceDestroyToZitadelDestroy(destroySP),
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
queryUser, _, err := users.AdaptFunc(internalMonitor, necessaryUsers, dbClient)
if err != nil {
return nil, err
}
queryS, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, secretName), literalsSecret)
if err != nil {
return nil, err
}
querySV, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, secretVarsName), literalsSecretVars)
if err != nil {
return nil, err
}
querySP, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, secretPasswordName), necessaryUsers)
if err != nil {
return nil, err
}
return func(
necessaryUsers map[string]string,
) operator.QueryFunc {
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
literalsSecret, err := literalsSecret(k8sClient, desired, googleServiceAccountJSONPath, zitadelKeysPath)
if err != nil {
return nil, err
}
literalsSecretVars, err := literalsSecretVars(k8sClient, desired)
if err != nil {
return nil, err
}
queryCCM, err := configmap.AdaptFuncToEnsure(
namespace,
consoleCMName,
labels.MustForNameK8SMap(componentLabels, consoleCMName),
literalsConsoleCM(
getClientID(),
desired.DNS,
k8sClient,
queryUser, _, err := users.AdaptFunc(internalMonitor, dbClient)
if err != nil {
return nil, err
}
queryS, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, secretName), literalsSecret)
if err != nil {
return nil, err
}
querySV, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, secretVarsName), literalsSecretVars)
if err != nil {
return nil, err
}
querySP, err := secret.AdaptFuncToEnsure(namespace, labels.MustForName(componentLabels, secretPasswordName), necessaryUsers)
if err != nil {
return nil, err
}
queryCCM, err := configmap.AdaptFuncToEnsure(
namespace,
consoleCMName,
),
)
if err != nil {
return nil, err
}
labels.MustForNameK8SMap(componentLabels, consoleCMName),
literalsConsoleCM(
getClientID(),
desired.DNS,
k8sClient,
namespace,
consoleCMName,
),
)
if err != nil {
return nil, err
}
queryCM, err := configmap.AdaptFuncToEnsure(
namespace,
cmName,
labels.MustForNameK8SMap(componentLabels, cmName),
literalsConfigMap(
desired,
necessaryUsers,
certPath,
secretPath,
googleServiceAccountJSONPath,
zitadelKeysPath,
queried,
),
)
if err != nil {
return nil, err
}
queryCM, err := configmap.AdaptFuncToEnsure(
namespace,
cmName,
labels.MustForNameK8SMap(componentLabels, cmName),
literalsConfigMap(
desired,
necessaryUsers,
certPath,
secretPath,
googleServiceAccountJSONPath,
zitadelKeysPath,
queried,
),
)
if err != nil {
return nil, err
}
queriers := []operator.QueryFunc{
queryUser,
operator.ResourceQueryToZitadelQuery(queryS),
operator.ResourceQueryToZitadelQuery(queryCCM),
operator.ResourceQueryToZitadelQuery(querySV),
operator.ResourceQueryToZitadelQuery(querySP),
operator.ResourceQueryToZitadelQuery(queryCM),
}
queriers := []operator.QueryFunc{
queryUser(necessaryUsers),
operator.ResourceQueryToZitadelQuery(queryS),
operator.ResourceQueryToZitadelQuery(queryCCM),
operator.ResourceQueryToZitadelQuery(querySV),
operator.ResourceQueryToZitadelQuery(querySP),
operator.ResourceQueryToZitadelQuery(queryCM),
}
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
}
},
operator.DestroyersToDestroyFunc(internalMonitor, destroyers),
func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) map[string]string {
func(
k8sClient kubernetes.ClientInt,
queried map[string]interface{},
necessaryUsers map[string]string,
) (map[string]string, error) {
literalsSecret, err := literalsSecret(k8sClient, desired, googleServiceAccountJSONPath, zitadelKeysPath)
if err != nil {
return nil, err
}
literalsSecretVars, err := literalsSecretVars(k8sClient, desired)
if err != nil {
return nil, err
}
return map[string]string{
secretName: getHash(literalsSecret),
secretVarsName: getHash(literalsSecretVars),
@@ -176,7 +203,7 @@ func AdaptFunc(
consoleCMName,
),
),
}
}, nil
},
nil
}

View File

@@ -38,6 +38,7 @@ func SetConfigMap(
}
func SetSecretVars(
t *testing.T,
k8sClient *kubernetesmock.MockClientInt,
namespace string,
secretVarsName string,
@@ -45,6 +46,8 @@ func SetSecretVars(
desired *Configuration,
) {
literalsSV, err := literalsSecretVars(k8sClient, desired)
assert.NoError(t, err)
k8sClient.EXPECT().ApplySecret(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
@@ -52,7 +55,7 @@ func SetSecretVars(
Labels: labels,
},
Type: "Opaque",
StringData: literalsSecretVars(desired),
StringData: literalsSV,
}).Times(1)
}
func SetConsoleCM(
@@ -76,12 +79,16 @@ func SetConsoleCM(
k8sClient.EXPECT().ApplyConfigmap(consoleCM).Times(1)
}
func SetSecrets(
t *testing.T,
k8sClient *kubernetesmock.MockClientInt,
namespace string,
secretName string,
labels map[string]string,
desired *Configuration,
) {
literalsS, err := literalsSecret(k8sClient, desired, googleServiceAccountJSONPath, zitadelKeysPath)
assert.NoError(t, err)
k8sClient.EXPECT().ApplySecret(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
@@ -89,7 +96,7 @@ func SetSecrets(
Labels: labels,
},
Type: "Opaque",
StringData: literalsSecret(desired, googleServiceAccountJSONPath, zitadelKeysPath),
StringData: literalsS,
}).Times(1)
}
@@ -114,7 +121,7 @@ func SetSecretPasswords(
func TestConfiguration_Adapt(t *testing.T) {
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
monitor := mntr.Monitor{Fields: map[string]interface{}{"component": "configuration"}}
namespace := "test"
@@ -162,6 +169,7 @@ func TestConfiguration_Adapt(t *testing.T) {
zitadelKeysPath)
SetSecretVars(
t,
k8sClient,
namespace,
secretVarsName,
@@ -179,6 +187,7 @@ func TestConfiguration_Adapt(t *testing.T) {
)
SetSecrets(
t,
k8sClient,
namespace,
secretName,
@@ -194,7 +203,7 @@ func TestConfiguration_Adapt(t *testing.T) {
users,
)
query, _, _, err := AdaptFunc(
getQuery, _, _, err := AdaptFunc(
monitor,
componentLabels,
namespace,
@@ -206,12 +215,11 @@ func TestConfiguration_Adapt(t *testing.T) {
consoleCMName,
secretVarsName,
secretPasswordName,
users,
getClientID,
dbClient,
getClientID,
)
assert.NoError(t, err)
query := getQuery(users)
ensure, err := query(k8sClient, queried)
assert.NoError(t, err)
assert.NoError(t, ensure(k8sClient))
@@ -220,7 +228,7 @@ func TestConfiguration_Adapt(t *testing.T) {
func TestConfiguration_AdaptFull(t *testing.T) {
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
monitor := mntr.Monitor{Fields: map[string]interface{}{"component": "configuration"}}
namespace := "test2"
@@ -268,6 +276,7 @@ func TestConfiguration_AdaptFull(t *testing.T) {
zitadelKeysPath)
SetSecretVars(
t,
k8sClient,
namespace,
secretVarsName,
@@ -285,6 +294,7 @@ func TestConfiguration_AdaptFull(t *testing.T) {
)
SetSecrets(
t,
k8sClient,
namespace,
secretName,
@@ -300,7 +310,7 @@ func TestConfiguration_AdaptFull(t *testing.T) {
users,
)
query, _, _, err := AdaptFunc(
getQuery, _, _, err := AdaptFunc(
monitor,
componentLabels,
namespace,
@@ -312,12 +322,12 @@ func TestConfiguration_AdaptFull(t *testing.T) {
consoleCMName,
secretVarsName,
secretPasswordName,
users,
getClientID,
dbClient,
getClientID,
)
assert.NoError(t, err)
query := getQuery(users)
ensure, err := query(k8sClient, queried)
assert.NoError(t, err)
assert.NoError(t, ensure(k8sClient))

View File

@@ -28,53 +28,67 @@ type Subdomains struct {
Issuer string `yaml:"issuer"`
}
type Passwords struct {
Migration *secret.Secret `yaml:"migration"`
Management *secret.Secret `yaml:"management"`
Auth *secret.Secret `yaml:"auth"`
Authz *secret.Secret `yaml:"authz"`
Adminapi *secret.Secret `yaml:"adminapi"`
Notification *secret.Secret `yaml:"notification"`
Eventstore *secret.Secret `yaml:"eventstore"`
Queries *secret.Secret `yaml:"queries"`
Migration *secret.Secret `yaml:"migration"`
Management *secret.Secret `yaml:"management"`
Auth *secret.Secret `yaml:"auth"`
Authz *secret.Secret `yaml:"authz"`
Adminapi *secret.Secret `yaml:"adminapi"`
Notification *secret.Secret `yaml:"notification"`
Eventstore *secret.Secret `yaml:"eventstore"`
Queries *secret.Secret `yaml:"queries"`
ExistingMigration *secret.Existing `yaml:"existingMigration"`
ExistingManagement *secret.Existing `yaml:"existingManagement"`
ExistingAuth *secret.Existing `yaml:"existingAuth"`
ExistingAuthz *secret.Existing `yaml:"existingAuthz"`
ExistingAdminapi *secret.Existing `yaml:"existingAdminapi"`
ExistingNotification *secret.Existing `yaml:"existingNotification"`
ExistingEventstore *secret.Existing `yaml:"existingEventstore"`
ExistingQueries *secret.Existing `yaml:"existingQueries"`
}
type Secrets struct {
Keys *secret.Secret `yaml:"keys,omitempty"`
UserVerificationID string `yaml:"userVerificationID,omitempty"`
OTPVerificationID string `yaml:"otpVerificationID,omitempty"`
OIDCKeysID string `yaml:"oidcKeysID,omitempty"`
CookieID string `yaml:"cookieID,omitempty"`
CSRFID string `yaml:"csrfID,omitempty"`
DomainVerificationID string `yaml:"domainVerificationID,omitempty"`
IDPConfigVerificationID string `yaml:"idpConfigVerificationID,omitempty"`
Keys *secret.Secret `yaml:"keys,omitempty"`
ExistingKeys *secret.Existing `yaml:"existingKeys,omitempty"`
UserVerificationID string `yaml:"userVerificationID,omitempty"`
OTPVerificationID string `yaml:"otpVerificationID,omitempty"`
OIDCKeysID string `yaml:"oidcKeysID,omitempty"`
CookieID string `yaml:"cookieID,omitempty"`
CSRFID string `yaml:"csrfID,omitempty"`
DomainVerificationID string `yaml:"domainVerificationID,omitempty"`
IDPConfigVerificationID string `yaml:"idpConfigVerificationID,omitempty"`
}
type Notifications struct {
GoogleChatURL *secret.Secret `yaml:"googleChatURL,omitempty"`
Email *Email `yaml:"email,omitempty"`
Twilio *Twilio `yaml:"twilio,omitempty"`
GoogleChatURL *secret.Secret `yaml:"googleChatURL,omitempty"`
ExistingGoogleChatURL *secret.Existing `yaml:"existingGoogleChatURL,omitempty"`
Email *Email `yaml:"email,omitempty"`
Twilio *Twilio `yaml:"twilio,omitempty"`
}
type Tracing struct {
ServiceAccountJSON *secret.Secret `yaml:"serviceAccountJSON,omitempty"`
ProjectID string `yaml:"projectID,omitempty"`
Fraction string `yaml:"fraction,omitempty"`
Type string `yaml:"type,omitempty"`
ServiceAccountJSON *secret.Secret `yaml:"serviceAccountJSON,omitempty"`
ExistingServiceAccountJSON *secret.Existing `yaml:"existingServiceAccountJSON,omitempty"`
ProjectID string `yaml:"projectID,omitempty"`
Fraction string `yaml:"fraction,omitempty"`
Type string `yaml:"type,omitempty"`
}
type Twilio struct {
SenderName string `yaml:"senderName,omitempty"`
AuthToken *secret.Secret `yaml:"authToken,omitempty"`
SID *secret.Secret `yaml:"sid,omitempty"`
SenderName string `yaml:"senderName,omitempty"`
AuthToken *secret.Secret `yaml:"authToken,omitempty"`
SID *secret.Secret `yaml:"sid,omitempty"`
ExistingAuthToken *secret.Existing `yaml:"existingAuthToken,omitempty"`
ExistingSID *secret.Existing `yaml:"ExistingSid,omitempty"`
}
type Email struct {
SMTPHost string `yaml:"smtpHost,omitempty"`
SMTPUser string `yaml:"smtpUser,omitempty"`
SenderAddress string `yaml:"senderAddress,omitempty"`
SenderName string `yaml:"senderName,omitempty"`
TLS bool `yaml:"tls,omitempty"`
AppKey *secret.Secret `yaml:"appKey,omitempty"`
SMTPHost string `yaml:"smtpHost,omitempty"`
SMTPUser string `yaml:"smtpUser,omitempty"`
SenderAddress string `yaml:"senderAddress,omitempty"`
SenderName string `yaml:"senderName,omitempty"`
TLS bool `yaml:"tls,omitempty"`
AppKey *secret.Secret `yaml:"appKey,omitempty"`
ExistingAppKey *secret.Existing `yaml:"existingAppKey,omitempty"`
}
type Cache struct {

View File

@@ -5,8 +5,8 @@ import (
"strconv"
"strings"
"github.com/caos/orbos/pkg/helper"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/database"
)
@@ -109,38 +109,62 @@ func literalsConfigMap(
return literalsConfigMap
}
func literalsSecret(desired *Configuration, googleServiceAccountJSONPath, zitadelKeysPath string) map[string]string {
func literalsSecret(k8sClient kubernetes.ClientInt, desired *Configuration, googleServiceAccountJSONPath, zitadelKeysPath string) (map[string]string, error) {
literalsSecret := map[string]string{}
if desired != nil {
if desired.Tracing != nil && desired.Tracing.ServiceAccountJSON != nil {
literalsSecret[googleServiceAccountJSONPath] = desired.Tracing.ServiceAccountJSON.Value
if desired.Tracing != nil && (desired.Tracing.ServiceAccountJSON != nil || desired.Tracing.ExistingServiceAccountJSON != nil) {
value, err := helper.GetSecretValue(k8sClient, desired.Tracing.ServiceAccountJSON, desired.Tracing.ExistingServiceAccountJSON)
if err != nil {
return nil, err
}
literalsSecret[googleServiceAccountJSONPath] = value
}
if desired.Secrets != nil && desired.Secrets.Keys != nil {
literalsSecret[zitadelKeysPath] = desired.Secrets.Keys.Value
if desired.Secrets != nil && (desired.Secrets.Keys != nil || desired.Secrets.ExistingKeys != nil) {
value, err := helper.GetSecretValue(k8sClient, desired.Secrets.Keys, desired.Secrets.ExistingKeys)
if err != nil {
return nil, err
}
literalsSecret[zitadelKeysPath] = value
}
}
return literalsSecret
return literalsSecret, nil
}
func literalsSecretVars(desired *Configuration) map[string]string {
func literalsSecretVars(k8sClient kubernetes.ClientInt, desired *Configuration) (map[string]string, error) {
literalsSecretVars := map[string]string{}
if desired != nil {
if desired.Notifications != nil {
if desired.Notifications.Email.AppKey != nil {
literalsSecretVars["ZITADEL_EMAILAPPKEY"] = desired.Notifications.Email.AppKey.Value
if desired.Notifications.Email.AppKey != nil || desired.Notifications.Email.ExistingAppKey != nil {
value, err := helper.GetSecretValue(k8sClient, desired.Notifications.Email.AppKey, desired.Notifications.Email.ExistingAppKey)
if err != nil {
return nil, err
}
literalsSecretVars["ZITADEL_EMAILAPPKEY"] = value
}
if desired.Notifications.GoogleChatURL != nil {
literalsSecretVars["ZITADEL_GOOGLE_CHAT_URL"] = desired.Notifications.GoogleChatURL.Value
if desired.Notifications.GoogleChatURL != nil || desired.Notifications.ExistingGoogleChatURL != nil {
value, err := helper.GetSecretValue(k8sClient, desired.Notifications.GoogleChatURL, desired.Notifications.ExistingGoogleChatURL)
if err != nil {
return nil, err
}
literalsSecretVars["ZITADEL_GOOGLE_CHAT_URL"] = value
}
if desired.Notifications.Twilio.AuthToken != nil {
literalsSecretVars["ZITADEL_TWILIO_AUTH_TOKEN"] = desired.Notifications.Twilio.AuthToken.Value
if desired.Notifications.Twilio.AuthToken != nil || desired.Notifications.Twilio.ExistingAuthToken != nil {
value, err := helper.GetSecretValue(k8sClient, desired.Notifications.Twilio.AuthToken, desired.Notifications.Twilio.ExistingAuthToken)
if err != nil {
return nil, err
}
literalsSecretVars["ZITADEL_TWILIO_AUTH_TOKEN"] = value
}
if desired.Notifications.Twilio.SID != nil {
literalsSecretVars["ZITADEL_TWILIO_SID"] = desired.Notifications.Twilio.SID.Value
if desired.Notifications.Twilio.SID != nil || desired.Notifications.Twilio.ExistingSID != nil {
value, err := helper.GetSecretValue(k8sClient, desired.Notifications.Twilio.SID, desired.Notifications.Twilio.ExistingSID)
if err != nil {
return nil, err
}
literalsSecretVars["ZITADEL_TWILIO_SID"] = value
}
}
}
return literalsSecretVars
return literalsSecretVars, nil
}
func literalsConsoleCM(

View File

@@ -127,6 +127,62 @@ var (
},
ClusterDNS: "cluster",
}
desiredFullExisting = &Configuration{
Tracing: &Tracing{
ExistingServiceAccountJSON: &secret.Existing{"sajson", "sajson", "sajson"},
ProjectID: "projectid",
Fraction: "fraction",
Type: "type",
},
Secrets: &Secrets{
ExistingKeys: &secret.Existing{"keys", "keys", "keys"},
UserVerificationID: "userid",
OTPVerificationID: "otpid",
OIDCKeysID: "oidcid",
CookieID: "cookieid",
CSRFID: "csrfid",
DomainVerificationID: "domainid",
IDPConfigVerificationID: "idpid",
},
Notifications: &Notifications{
ExistingGoogleChatURL: &secret.Existing{"chat", "chat", "chat"},
Email: &Email{
SMTPHost: "smtphost",
SMTPUser: "smtpuser",
SenderAddress: "sender",
SenderName: "sendername",
TLS: true,
ExistingAppKey: &secret.Existing{"appkey", "appkey", "appkey"},
},
Twilio: &Twilio{
SenderName: "sendername",
ExistingAuthToken: &secret.Existing{"migration", "migration", "migration"},
ExistingSID: &secret.Existing{"sid", "sid", "sid"},
},
},
Passwords: &Passwords{
ExistingMigration: &secret.Existing{"migration", "migration", "migration"},
ExistingEventstore: &secret.Existing{"eventstore", "eventstore", "eventstore"},
ExistingNotification: &secret.Existing{"notification", "notification", "notification"},
ExistingAuthz: &secret.Existing{"authz", "authz", "authz"},
ExistingAuth: &secret.Existing{"auth", "auth", "auth"},
ExistingAdminapi: &secret.Existing{"adminapi", "adminapi", "adminapi"},
ExistingManagement: &secret.Existing{"management", "management", "management"},
},
DebugMode: true,
LogLevel: "debug",
DNS: &DNS{
Domain: "domain",
TlsSecret: "tls",
Subdomains: &Subdomains{
Accounts: "accounts",
API: "api",
Console: "console",
Issuer: "issuer",
},
},
ClusterDNS: "cluster",
}
)
func TestConfiguration_LiteralsConfigMap(t *testing.T) {
@@ -285,6 +341,7 @@ func TestConfiguration_LiteralsConfigMapFull(t *testing.T) {
}
func TestConfiguration_LiteralsSecrets(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
googleSA := "sajson"
zitadelKeyPath := "zitadel"
@@ -293,11 +350,13 @@ func TestConfiguration_LiteralsSecrets(t *testing.T) {
zitadelKeyPath: "",
}
literals := literalsSecret(desiredEmpty, googleSA, zitadelKeyPath)
literals, err := literalsSecret(client, desiredEmpty, googleSA, zitadelKeyPath)
assert.NoError(t, err)
assert.EqualValues(t, equals, literals)
}
func TestConfiguration_LiteralsSecretsFull(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
googleSA := "sajson"
zitadelKeyPath := "zitadel"
@@ -306,31 +365,123 @@ func TestConfiguration_LiteralsSecretsFull(t *testing.T) {
zitadelKeyPath: "keys",
}
literals := literalsSecret(desiredFull, googleSA, zitadelKeyPath)
literals, err := literalsSecret(client, desiredFull, googleSA, zitadelKeyPath)
assert.NoError(t, err)
assert.EqualValues(t, equals, literals)
}
func TestConfiguration_LiteralsSecretsExisting(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
sajson := "sajson"
keys := "keys"
namespace := "caos-system"
client.EXPECT().GetSecret(namespace, desiredFullExisting.Tracing.ExistingServiceAccountJSON.Name).Return(&corev1.Secret{
StringData: map[string]string{
desiredFullExisting.Tracing.ExistingServiceAccountJSON.Key: sajson,
},
Data: map[string][]byte{
desiredFullExisting.Tracing.ExistingServiceAccountJSON.Key: []byte(sajson),
},
}, nil)
client.EXPECT().GetSecret(namespace, desiredFullExisting.Secrets.ExistingKeys.Name).Return(&corev1.Secret{
StringData: map[string]string{
desiredFullExisting.Secrets.ExistingKeys.Key: keys,
},
Data: map[string][]byte{
desiredFullExisting.Secrets.ExistingKeys.Key: []byte(keys),
},
}, nil)
googleSA := "sajson"
zitadelKeyPath := "zitadel"
equals := map[string]string{
googleSA: sajson,
zitadelKeyPath: keys,
}
literals, err := literalsSecret(client, desiredFullExisting, googleSA, zitadelKeyPath)
assert.NoError(t, err)
assert.EqualValues(t, equals, literals)
}
func TestConfiguration_LiteralsSecretVars(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
equals := map[string]string{
"ZITADEL_EMAILAPPKEY": "",
"ZITADEL_GOOGLE_CHAT_URL": "",
"ZITADEL_TWILIO_AUTH_TOKEN": "",
"ZITADEL_TWILIO_SID": "",
}
literals := literalsSecretVars(desiredEmpty)
literals, err := literalsSecretVars(client, desiredEmpty)
assert.NoError(t, err)
assert.EqualValues(t, equals, literals)
}
func TestConfiguration_LiteralsSecretVarsFull(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
equals := map[string]string{
"ZITADEL_EMAILAPPKEY": "appkey",
"ZITADEL_GOOGLE_CHAT_URL": "chat",
"ZITADEL_TWILIO_AUTH_TOKEN": "authtoken",
"ZITADEL_TWILIO_SID": "sid",
}
literals := literalsSecretVars(desiredFull)
literals, err := literalsSecretVars(client, desiredFull)
assert.NoError(t, err)
assert.EqualValues(t, equals, literals)
}
func TestConfiguration_LiteralsSecretVarsExisting(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
// namespace := "caos-system"
appkey := "appkey"
chat := "chat"
authtoken := "authtoken"
sid := "sid"
/* TODO: incomment!!!
client.EXPECT().GetSecret(namespace, desiredFullExisting.Notifications.Email.ExistingAppKey.Name).Return(&corev1.Secret{
StringData: map[string]string{
desiredFullExisting.Notifications.Email.ExistingAppKey.Key: appkey,
},
Data: map[string][]byte{
desiredFullExisting.Notifications.Email.ExistingAppKey.Key: []byte(appkey),
},
}, nil)
client.EXPECT().GetSecret(namespace, desiredFullExisting.Notifications.ExistingGoogleChatURL.Name).Return(&corev1.Secret{
StringData: map[string]string{
desiredFullExisting.Notifications.ExistingGoogleChatURL.Key: chat,
},
Data: map[string][]byte{
desiredFullExisting.Notifications.ExistingGoogleChatURL.Key: []byte(chat),
},
}, nil)
client.EXPECT().GetSecret(namespace, desiredFullExisting.Notifications.Twilio.ExistingAuthToken.Name).Return(&corev1.Secret{
StringData: map[string]string{
desiredFullExisting.Notifications.Twilio.ExistingAuthToken.Key: authtoken,
},
Data: map[string][]byte{
desiredFullExisting.Notifications.Twilio.ExistingAuthToken.Key: []byte(authtoken),
},
}, nil)
client.EXPECT().GetSecret(namespace, desiredFullExisting.Notifications.Twilio.ExistingSID.Name).Return(&corev1.Secret{
StringData: map[string]string{
desiredFullExisting.Notifications.Twilio.ExistingSID.Key: sid,
},
Data: map[string][]byte{
desiredFullExisting.Notifications.Twilio.ExistingSID.Key: []byte(sid),
},
}, nil)
*/
equals := map[string]string{
"ZITADEL_EMAILAPPKEY": appkey,
"ZITADEL_GOOGLE_CHAT_URL": chat,
"ZITADEL_TWILIO_AUTH_TOKEN": authtoken,
"ZITADEL_TWILIO_SID": sid,
}
literals, err := literalsSecretVars(client, desiredFull)
assert.NoError(t, err)
assert.EqualValues(t, equals, literals)
}

View File

@@ -9,14 +9,14 @@ import (
func AdaptFunc(
monitor mntr.Monitor,
users map[string]string,
dbClient database.ClientInt,
dbClient database.Client,
) (
operator.QueryFunc,
func(users map[string]string) operator.QueryFunc,
operator.DestroyFunc,
error,
) {
internalMonitor := monitor.WithField("component", "db-users")
destroyers := make([]operator.DestroyFunc, 0)
destroyers = append(destroyers, func(k8sClient kubernetes.ClientInt) error {
@@ -32,35 +32,39 @@ func AdaptFunc(
return nil
})
usernames := []string{}
for username := range users {
usernames = append(usernames, username)
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
queriers := make([]operator.QueryFunc, 0)
db, err := database.GetDatabaseInQueried(queried)
if err != nil {
return nil, err
}
for _, username := range usernames {
ensure := createIfNecessary(monitor, username, db.Users, dbClient)
if ensure != nil {
queriers = append(queriers, operator.EnsureFuncToQueryFunc(ensure))
return func(users map[string]string) operator.QueryFunc {
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
queriers := make([]operator.QueryFunc, 0)
db, err := database.GetDatabaseInQueried(queried)
if err != nil {
return nil, err
}
}
for _, listedUser := range db.Users {
ensure := deleteIfNotRequired(monitor, listedUser, usernames, dbClient)
if ensure != nil {
queriers = append(queriers, operator.EnsureFuncToQueryFunc(ensure))
}
}
if queriers == nil || len(queriers) == 0 {
return func(k8sClient kubernetes.ClientInt) error { return nil }, nil
usernames := []string{}
for username := range users {
usernames = append(usernames, username)
}
for _, username := range usernames {
ensure := createIfNecessary(monitor, username, db.Users, dbClient)
if ensure != nil {
queriers = append(queriers, operator.EnsureFuncToQueryFunc(ensure))
}
}
for _, listedUser := range db.Users {
ensure := deleteIfNotRequired(monitor, listedUser, usernames, dbClient)
if ensure != nil {
queriers = append(queriers, operator.EnsureFuncToQueryFunc(ensure))
}
}
if queriers == nil || len(queriers) == 0 {
return func(k8sClient kubernetes.ClientInt) error { return nil }, nil
}
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
}
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
}, operator.DestroyersToDestroyFunc(internalMonitor, destroyers),
},
operator.DestroyersToDestroyFunc(internalMonitor, destroyers),
nil
}

View File

@@ -1,19 +1,20 @@
package users
import (
"testing"
"github.com/caos/orbos/mntr"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
"github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/database"
databasemock "github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/database/mock"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"testing"
)
func TestUsers_Adapt_CreateFirst(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
users := map[string]string{"test": "testpw"}
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
monitor := mntr.Monitor{}
queried := map[string]interface{}{}
@@ -24,8 +25,9 @@ func TestUsers_Adapt_CreateFirst(t *testing.T) {
})
dbClient.EXPECT().AddUser(monitor, "test", client)
query, _, err := AdaptFunc(monitor, users, dbClient)
getQuery, _, err := AdaptFunc(monitor, dbClient)
assert.NoError(t, err)
query := getQuery(users)
ensure, err := query(client, queried)
assert.NoError(t, err)
err = ensure(client)
@@ -35,7 +37,7 @@ func TestUsers_Adapt_CreateFirst(t *testing.T) {
func TestUsers_Adapt_DoNothing(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
users := map[string]string{"test": "testpw"}
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
monitor := mntr.Monitor{}
queried := map[string]interface{}{}
@@ -45,8 +47,9 @@ func TestUsers_Adapt_DoNothing(t *testing.T) {
Users: []string{"test"},
})
query, _, err := AdaptFunc(monitor, users, dbClient)
getQuery, _, err := AdaptFunc(monitor, dbClient)
assert.NoError(t, err)
query := getQuery(users)
ensure, err := query(client, queried)
assert.NoError(t, err)
assert.NotNil(t, ensure)
@@ -56,7 +59,7 @@ func TestUsers_Adapt_DoNothing(t *testing.T) {
func TestUsers_Adapt_Add(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
users := map[string]string{"test": "testpw", "test2": "testpw"}
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
monitor := mntr.Monitor{}
queried := map[string]interface{}{}
@@ -67,8 +70,9 @@ func TestUsers_Adapt_Add(t *testing.T) {
})
dbClient.EXPECT().AddUser(monitor, "test2", client)
query, _, err := AdaptFunc(monitor, users, dbClient)
getQuery, _, err := AdaptFunc(monitor, dbClient)
assert.NoError(t, err)
query := getQuery(users)
ensure, err := query(client, queried)
assert.NoError(t, err)
err = ensure(client)
@@ -78,7 +82,7 @@ func TestUsers_Adapt_Add(t *testing.T) {
func TestUsers_Adapt_Delete(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
users := map[string]string{"test": "testpw", "test2": "testpw"}
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
monitor := mntr.Monitor{}
queried := map[string]interface{}{}
@@ -90,8 +94,9 @@ func TestUsers_Adapt_Delete(t *testing.T) {
dbClient.EXPECT().DeleteUser(monitor, "test3", client)
query, _, err := AdaptFunc(monitor, users, dbClient)
getQuery, _, err := AdaptFunc(monitor, dbClient)
assert.NoError(t, err)
query := getQuery(users)
ensure, err := query(client, queried)
err = ensure(client)
assert.NoError(t, err)
@@ -100,7 +105,7 @@ func TestUsers_Adapt_Delete(t *testing.T) {
func TestUsers_Adapt_DeleteMultiple(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
users := map[string]string{}
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
monitor := mntr.Monitor{}
queried := map[string]interface{}{}
@@ -114,8 +119,9 @@ func TestUsers_Adapt_DeleteMultiple(t *testing.T) {
dbClient.EXPECT().DeleteUser(monitor, "test2", client)
dbClient.EXPECT().DeleteUser(monitor, "test3", client)
query, _, err := AdaptFunc(monitor, users, dbClient)
getQuery, _, err := AdaptFunc(monitor, dbClient)
assert.NoError(t, err)
query := getQuery(users)
ensure, err := query(client, queried)
err = ensure(client)
assert.NoError(t, err)

View File

@@ -7,7 +7,7 @@ import (
"github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/database"
)
func createIfNecessary(monitor mntr.Monitor, user string, list []string, dbClient database.ClientInt) operator.EnsureFunc {
func createIfNecessary(monitor mntr.Monitor, user string, list []string, dbClient database.Client) operator.EnsureFunc {
existing := false
for _, listedUser := range list {
if listedUser == user {
@@ -23,7 +23,7 @@ func createIfNecessary(monitor mntr.Monitor, user string, list []string, dbClien
return nil
}
func deleteIfNotRequired(monitor mntr.Monitor, listedUser string, list []string, dbClient database.ClientInt) operator.EnsureFunc {
func deleteIfNotRequired(monitor mntr.Monitor, listedUser string, list []string, dbClient database.Client) operator.EnsureFunc {
required := false
for _, user := range list {
if user == listedUser {

View File

@@ -1,19 +1,20 @@
package users
import (
"testing"
"github.com/caos/orbos/mntr"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
databasemock "github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/database/mock"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"testing"
)
func TestUsers_CreateIfNecessary(t *testing.T) {
users := []string{}
monitor := mntr.Monitor{}
user := "test"
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
dbClient.EXPECT().AddUser(monitor, user, k8sClient)
@@ -38,7 +39,7 @@ func TestUsers_DeleteIfNotRequired(t *testing.T) {
users := []string{}
monitor := mntr.Monitor{}
user := "test"
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
dbClient.EXPECT().DeleteUser(monitor, user, k8sClient)

View File

@@ -8,7 +8,7 @@ import (
func AdaptFunc(
monitor mntr.Monitor,
dbClient ClientInt,
dbClient Client,
) (
operator.QueryFunc,
error,

View File

@@ -2,16 +2,17 @@ package database
import (
"errors"
"testing"
"github.com/caos/orbos/mntr"
kubernetesmock "github.com/caos/orbos/pkg/kubernetes/mock"
databasemock "github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/database/mock"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"testing"
)
func TestDatabase_Adapt(t *testing.T) {
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
host := "host"
port := "port"
@@ -41,7 +42,7 @@ func TestDatabase_Adapt(t *testing.T) {
}
func TestDatabase_Adapt2(t *testing.T) {
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
host := "host2"
port := "port2"
@@ -71,7 +72,7 @@ func TestDatabase_Adapt2(t *testing.T) {
}
func TestDatabase_AdaptFailConnection(t *testing.T) {
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
monitor := mntr.Monitor{}
@@ -95,7 +96,7 @@ func TestDatabase_AdaptFailConnection(t *testing.T) {
}
func TestDatabase_AdaptFailUsers(t *testing.T) {
dbClient := databasemock.NewMockClientInt(gomock.NewController(t))
dbClient := databasemock.NewMockClient(gomock.NewController(t))
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
host := "host"
port := "port"

View File

@@ -9,27 +9,28 @@ import (
"github.com/caos/zitadel/pkg/databases"
)
var _ ClientInt = (*Client)(nil)
var _ Client = (*GitOpsClient)(nil)
var _ Client = (*CrdClient)(nil)
type ClientInt interface {
type Client interface {
GetConnectionInfo(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) (string, string, error)
DeleteUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error
AddUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error
ListUsers(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) ([]string, error)
}
type Client struct {
type GitOpsClient struct {
Monitor mntr.Monitor
gitClient *git.Client
}
func NewClient(monitor mntr.Monitor, repoURL, repoKey string) (*Client, error) {
func NewGitOpsClient(monitor mntr.Monitor, repoURL, repoKey string) (*GitOpsClient, error) {
gitClient, err := newGit(monitor, repoURL, repoKey)
if err != nil {
return nil, err
}
return &Client{
return &GitOpsClient{
Monitor: monitor,
gitClient: gitClient,
}, nil
@@ -47,10 +48,27 @@ func newGit(monitor mntr.Monitor, repoURL string, repoKey string) (*git.Client,
return gitClient, nil
}
func (c *Client) GetConnectionInfo(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) (string, string, error) {
return databases.GetConnectionInfo(
func (c *GitOpsClient) GetConnectionInfo(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) (string, string, error) {
return databases.GitOpsGetConnectionInfo(
monitor,
k8sClient,
c.gitClient,
)
}
type CrdClient struct {
Monitor mntr.Monitor
}
func NewCrdClient(monitor mntr.Monitor) *CrdClient {
return &CrdClient{
Monitor: monitor,
}
}
func (c *CrdClient) GetConnectionInfo(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) (string, string, error) {
return databases.CrdGetConnectionInfo(
monitor,
k8sClient,
)
}

View File

@@ -11,31 +11,31 @@ import (
reflect "reflect"
)
// MockClientInt is a mock of ClientInt interface
type MockClientInt struct {
// MockClient is a mock of Client interface
type MockClient struct {
ctrl *gomock.Controller
recorder *MockClientIntMockRecorder
recorder *MockClientMockRecorder
}
// MockClientIntMockRecorder is the mock recorder for MockClientInt
type MockClientIntMockRecorder struct {
mock *MockClientInt
// MockClientMockRecorder is the mock recorder for MockClient
type MockClientMockRecorder struct {
mock *MockClient
}
// NewMockClientInt creates a new mock instance
func NewMockClientInt(ctrl *gomock.Controller) *MockClientInt {
mock := &MockClientInt{ctrl: ctrl}
mock.recorder = &MockClientIntMockRecorder{mock}
// NewMockClient creates a new mock instance
func NewMockClient(ctrl *gomock.Controller) *MockClient {
mock := &MockClient{ctrl: ctrl}
mock.recorder = &MockClientMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockClientInt) EXPECT() *MockClientIntMockRecorder {
func (m *MockClient) EXPECT() *MockClientMockRecorder {
return m.recorder
}
// GetConnectionInfo mocks base method
func (m *MockClientInt) GetConnectionInfo(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) (string, string, error) {
func (m *MockClient) GetConnectionInfo(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) (string, string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "GetConnectionInfo", monitor, k8sClient)
ret0, _ := ret[0].(string)
@@ -45,13 +45,13 @@ func (m *MockClientInt) GetConnectionInfo(monitor mntr.Monitor, k8sClient kubern
}
// GetConnectionInfo indicates an expected call of GetConnectionInfo
func (mr *MockClientIntMockRecorder) GetConnectionInfo(monitor, k8sClient interface{}) *gomock.Call {
func (mr *MockClientMockRecorder) GetConnectionInfo(monitor, k8sClient interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConnectionInfo", reflect.TypeOf((*MockClientInt)(nil).GetConnectionInfo), monitor, k8sClient)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConnectionInfo", reflect.TypeOf((*MockClient)(nil).GetConnectionInfo), monitor, k8sClient)
}
// DeleteUser mocks base method
func (m *MockClientInt) DeleteUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error {
func (m *MockClient) DeleteUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteUser", monitor, user, k8sClient)
ret0, _ := ret[0].(error)
@@ -59,13 +59,13 @@ func (m *MockClientInt) DeleteUser(monitor mntr.Monitor, user string, k8sClient
}
// DeleteUser indicates an expected call of DeleteUser
func (mr *MockClientIntMockRecorder) DeleteUser(monitor, user, k8sClient interface{}) *gomock.Call {
func (mr *MockClientMockRecorder) DeleteUser(monitor, user, k8sClient interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUser", reflect.TypeOf((*MockClientInt)(nil).DeleteUser), monitor, user, k8sClient)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUser", reflect.TypeOf((*MockClient)(nil).DeleteUser), monitor, user, k8sClient)
}
// AddUser mocks base method
func (m *MockClientInt) AddUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error {
func (m *MockClient) AddUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddUser", monitor, user, k8sClient)
ret0, _ := ret[0].(error)
@@ -73,13 +73,13 @@ func (m *MockClientInt) AddUser(monitor mntr.Monitor, user string, k8sClient kub
}
// AddUser indicates an expected call of AddUser
func (mr *MockClientIntMockRecorder) AddUser(monitor, user, k8sClient interface{}) *gomock.Call {
func (mr *MockClientMockRecorder) AddUser(monitor, user, k8sClient interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUser", reflect.TypeOf((*MockClientInt)(nil).AddUser), monitor, user, k8sClient)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUser", reflect.TypeOf((*MockClient)(nil).AddUser), monitor, user, k8sClient)
}
// ListUsers mocks base method
func (m *MockClientInt) ListUsers(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) ([]string, error) {
func (m *MockClient) ListUsers(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) ([]string, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ListUsers", monitor, k8sClient)
ret0, _ := ret[0].([]string)
@@ -88,7 +88,7 @@ func (m *MockClientInt) ListUsers(monitor mntr.Monitor, k8sClient kubernetes.Cli
}
// ListUsers indicates an expected call of ListUsers
func (mr *MockClientIntMockRecorder) ListUsers(monitor, k8sClient interface{}) *gomock.Call {
func (mr *MockClientMockRecorder) ListUsers(monitor, k8sClient interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUsers", reflect.TypeOf((*MockClientInt)(nil).ListUsers), monitor, k8sClient)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListUsers", reflect.TypeOf((*MockClient)(nil).ListUsers), monitor, k8sClient)
}

View File

@@ -6,8 +6,8 @@ import (
"github.com/caos/zitadel/pkg/databases"
)
func (c *Client) DeleteUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error {
return databases.DeleteUser(
func (c *GitOpsClient) DeleteUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error {
return databases.GitOpsDeleteUser(
monitor,
user,
k8sClient,
@@ -15,8 +15,8 @@ func (c *Client) DeleteUser(monitor mntr.Monitor, user string, k8sClient kuberne
)
}
func (c *Client) AddUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error {
return databases.AddUser(
func (c *GitOpsClient) AddUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error {
return databases.GitOpsAddUser(
monitor,
user,
k8sClient,
@@ -24,10 +24,33 @@ func (c *Client) AddUser(monitor mntr.Monitor, user string, k8sClient kubernetes
)
}
func (c *Client) ListUsers(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) ([]string, error) {
return databases.ListUsers(
func (c *GitOpsClient) ListUsers(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) ([]string, error) {
return databases.GitOpsListUsers(
monitor,
k8sClient,
c.gitClient,
)
}
func (c *CrdClient) DeleteUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error {
return databases.CrdDeleteUser(
monitor,
user,
k8sClient,
)
}
func (c *CrdClient) AddUser(monitor mntr.Monitor, user string, k8sClient kubernetes.ClientInt) error {
return databases.CrdAddUser(
monitor,
user,
k8sClient,
)
}
func (c *CrdClient) ListUsers(monitor mntr.Monitor, k8sClient kubernetes.ClientInt) ([]string, error) {
return databases.CrdListUsers(
monitor,
k8sClient,
)
}

View File

@@ -21,8 +21,8 @@ const (
containerName = "zitadel"
RunAsUser = int64(1000)
//zitadelImage can be found in github.com/caos/zitadel repo
zitadelImage = "ghcr.io/caos/zitadel"
timeout time.Duration = 60
zitadelImage = "ghcr.io/caos/zitadel"
timeout = 60 * time.Second
)
func AdaptFunc(
@@ -41,16 +41,17 @@ func AdaptFunc(
consoleCMName string,
secretVarsName string,
secretPasswordsName string,
users []string,
nodeSelector map[string]string,
tolerations []corev1.Toleration,
resources *k8s.Resources,
migrationDone operator.EnsureFunc,
configurationDone operator.EnsureFunc,
setupDone operator.EnsureFunc,
getConfigurationHashes func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) map[string]string,
) (
operator.QueryFunc,
func(
necessaryUsers map[string]string,
getConfigurationHashes func(k8sClient kubernetes.ClientInt, queried map[string]interface{}, necessaryUsers map[string]string) (map[string]string, error),
) operator.QueryFunc,
operator.DestroyFunc,
error,
) {
@@ -64,52 +65,65 @@ func AdaptFunc(
operator.ResourceDestroyToZitadelDestroy(destroy),
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
deploymentDef := deploymentDef(
nameLabels,
namespace,
replicaCount,
podSelector,
nodeSelector,
tolerations,
affinity,
users,
version,
resources,
cmName,
certPath,
secretName,
secretPath,
consoleCMName,
secretVarsName,
secretPasswordsName,
)
hashes := getConfigurationHashes(k8sClient, queried)
if hashes != nil && len(hashes) != 0 {
for k, v := range hashes {
deploymentDef.Annotations[k] = v
deploymentDef.Spec.Template.Annotations[k] = v
return func(
necessaryUsers map[string]string,
getConfigurationHashes func(k8sClient kubernetes.ClientInt, queried map[string]interface{}, necessaryUsers map[string]string) (map[string]string, error),
) operator.QueryFunc {
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
users := make([]string, 0)
for user := range necessaryUsers {
users = append(users, user)
}
}
query, err := deployment.AdaptFuncToEnsure(deploymentDef, force)
if err != nil {
return nil, err
}
deploymentDef := deploymentDef(
nameLabels,
namespace,
replicaCount,
podSelector,
nodeSelector,
tolerations,
affinity,
users,
version,
resources,
cmName,
certPath,
secretName,
secretPath,
consoleCMName,
secretVarsName,
secretPasswordsName,
)
queriers := []operator.QueryFunc{
operator.EnsureFuncToQueryFunc(migrationDone),
operator.EnsureFuncToQueryFunc(configurationDone),
operator.EnsureFuncToQueryFunc(setupDone),
operator.ResourceQueryToZitadelQuery(query),
}
hashes, err := getConfigurationHashes(k8sClient, queried, necessaryUsers)
if err != nil {
return nil, err
}
if hashes != nil && len(hashes) != 0 {
for k, v := range hashes {
deploymentDef.Annotations[k] = v
deploymentDef.Spec.Template.Annotations[k] = v
}
}
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
query, err := deployment.AdaptFuncToEnsure(deploymentDef, force)
if err != nil {
return nil, err
}
queriers := []operator.QueryFunc{
operator.EnsureFuncToQueryFunc(migrationDone),
operator.EnsureFuncToQueryFunc(configurationDone),
operator.EnsureFuncToQueryFunc(setupDone),
operator.ResourceQueryToZitadelQuery(query),
}
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
}
},
operator.DestroyersToDestroyFunc(internalMonitor, destroyers),
nil
}
func deploymentDef(nameLabels *labels.Name, namespace string, replicaCount int, podSelector *labels.Selector, nodeSelector map[string]string, tolerations []corev1.Toleration, affinity *k8s.Affinity, users []string, version *string, resources *k8s.Resources, cmName string, certPath string, secretName string, secretPath string, consoleCMName string, secretVarsName string, secretPasswordsName string) *appsv1.Deployment {

View File

@@ -32,7 +32,11 @@ func TestDeployment_Adapt(t *testing.T) {
secretName := "testSecret"
consoleCMName := "testConsoleCM"
cmName := "testCM"
users := []string{"test"}
usersMap := map[string]string{"test": "test"}
users := []string{}
for _, user := range usersMap {
users = append(users, user)
}
annotations := map[string]string{"testHash": "test"}
k8sClient := kubernetesmock.NewMockClientInt(gomock.NewController(t))
@@ -115,8 +119,8 @@ func TestDeployment_Adapt(t *testing.T) {
}
k8sClient.EXPECT().ApplyDeployment(deploymentDef, false).Times(1)
getConfigurationHashes := func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) map[string]string {
return map[string]string{"testHash": "test"}
getConfigurationHashes := func(k8sClient kubernetes.ClientInt, queried map[string]interface{}, necessaryUsers map[string]string) (map[string]string, error) {
return map[string]string{"testHash": "test"}, nil
}
migrationDone := func(k8sClient kubernetes.ClientInt) error {
return nil
@@ -128,7 +132,7 @@ func TestDeployment_Adapt(t *testing.T) {
return nil
}
query, _, err := AdaptFunc(
getQuery, _, err := AdaptFunc(
monitor,
mocklabels.Name,
mocklabels.ClosedNameSelector,
@@ -144,17 +148,16 @@ func TestDeployment_Adapt(t *testing.T) {
consoleCMName,
secretVarsName,
secretPasswordsName,
users,
nodeSelector,
nil,
resources,
migrationDone,
configurationDone,
setupDone,
getConfigurationHashes,
)
assert.NoError(t, err)
queried := map[string]interface{}{}
query := getQuery(usersMap, getConfigurationHashes)
ensure, err := query(k8sClient, queried)
assert.NoError(t, err)
assert.NoError(t, ensure(k8sClient))

View File

@@ -1,6 +1,7 @@
package deployment
import (
"sort"
"strings"
"github.com/caos/orbos/pkg/kubernetes/k8s"
@@ -63,6 +64,7 @@ func GetContainer(
}},
}
sort.Strings(users)
for _, user := range users {
envVars = append(envVars, corev1.EnvVar{
Name: "CR_" + strings.ToUpper(user) + "_PASSWORD",
@@ -92,7 +94,7 @@ func GetContainer(
},
Name: containerName,
Image: zitadelImage + ":" + version,
ImagePullPolicy: "IfNotPresent",
ImagePullPolicy: corev1.PullIfNotPresent,
Ports: []corev1.ContainerPort{
{Name: "grpc", ContainerPort: 50001},
{Name: "http", ContainerPort: 50002},
@@ -127,5 +129,7 @@ func GetContainer(
PeriodSeconds: 5,
FailureThreshold: 2,
},
TerminationMessagePolicy: "File",
TerminationMessagePath: "/dev/termination-log",
}
}

View File

@@ -95,7 +95,7 @@ func TestDeployment_GetContainer(t *testing.T) {
},
Name: containerName,
Image: zitadelImage + ":" + version,
ImagePullPolicy: "IfNotPresent",
ImagePullPolicy: corev1.PullIfNotPresent,
Ports: []corev1.ContainerPort{
{Name: "grpc", ContainerPort: 50001},
{Name: "http", ContainerPort: 50002},
@@ -130,6 +130,8 @@ func TestDeployment_GetContainer(t *testing.T) {
PeriodSeconds: 5,
FailureThreshold: 2,
},
TerminationMessagePolicy: "File",
TerminationMessagePath: "/dev/termination-log",
}
container := GetContainer(

View File

@@ -2,6 +2,7 @@ package deployment
import (
corev1 "k8s.io/api/core/v1"
"sort"
"strconv"
"strings"
)
@@ -24,6 +25,8 @@ func GetInitContainer(
}
copySecrets := append([]string{}, "cp "+certMountPath+"/client_root/ca.crt "+certTempMountPath+"/ca.crt")
sort.Strings(users)
for _, user := range users {
userReplaced := strings.ReplaceAll(user, "_", "-")
internalName := "client-" + userReplaced
@@ -43,10 +46,13 @@ func GetInitContainer(
)
return corev1.Container{
Name: "fix-permissions",
Image: "alpine:3.11",
Command: []string{"/bin/sh", "-c"},
Args: []string{strings.Join(initCommands, " && ")},
VolumeMounts: initVolumeMounts,
Name: "fix-permissions",
Image: "alpine:3.11",
Command: []string{"/bin/sh", "-c"},
Args: []string{strings.Join(initCommands, " && ")},
VolumeMounts: initVolumeMounts,
TerminationMessagePolicy: "File",
TerminationMessagePath: "/dev/termination-log",
ImagePullPolicy: corev1.PullIfNotPresent,
}
}

View File

@@ -25,11 +25,14 @@ func TestDeployment_GetInitContainer(t *testing.T) {
}
equals := corev1.Container{
Name: "fix-permissions",
Image: "alpine:3.11",
Command: []string{"/bin/sh", "-c"},
Args: []string{strings.Join(initCommands, " && ")},
VolumeMounts: initVolumeMounts,
Name: "fix-permissions",
Image: "alpine:3.11",
Command: []string{"/bin/sh", "-c"},
Args: []string{strings.Join(initCommands, " && ")},
VolumeMounts: initVolumeMounts,
ImagePullPolicy: corev1.PullIfNotPresent,
TerminationMessagePolicy: "File",
TerminationMessagePath: "/dev/termination-log",
}
init := GetInitContainer(rootSecret, dbSecrets, users, RunAsUser)
@@ -55,11 +58,14 @@ func TestDeployment_GetInitContainer1(t *testing.T) {
}
equals := corev1.Container{
Name: "fix-permissions",
Image: "alpine:3.11",
Command: []string{"/bin/sh", "-c"},
Args: []string{strings.Join(initCommands, " && ")},
VolumeMounts: initVolumeMounts,
Name: "fix-permissions",
Image: "alpine:3.11",
Command: []string{"/bin/sh", "-c"},
Args: []string{strings.Join(initCommands, " && ")},
VolumeMounts: initVolumeMounts,
TerminationMessagePolicy: "File",
TerminationMessagePath: "/dev/termination-log",
ImagePullPolicy: corev1.PullIfNotPresent,
}
init := GetInitContainer(rootSecret, dbSecrets, users, RunAsUser)
@@ -88,11 +94,14 @@ func TestDeployment_GetInitContainer2(t *testing.T) {
}
equals := corev1.Container{
Name: "fix-permissions",
Image: "alpine:3.11",
Command: []string{"/bin/sh", "-c"},
Args: []string{strings.Join(initCommands, " && ")},
VolumeMounts: initVolumeMounts,
Name: "fix-permissions",
Image: "alpine:3.11",
Command: []string{"/bin/sh", "-c"},
Args: []string{strings.Join(initCommands, " && ")},
VolumeMounts: initVolumeMounts,
ImagePullPolicy: corev1.PullIfNotPresent,
TerminationMessagePolicy: "File",
TerminationMessagePath: "/dev/termination-log",
}
init := GetInitContainer(rootSecret, dbSecrets, users, RunAsUser)

View File

@@ -1,6 +1,8 @@
package deployment
import (
"time"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/labels"
@@ -11,7 +13,7 @@ import (
func GetReadyFunc(monitor mntr.Monitor, namespace string, name *labels.Name) operator.EnsureFunc {
return func(k8sClient kubernetes.ClientInt) error {
monitor.Info("waiting for deployment to be ready")
if err := k8sClient.WaitUntilDeploymentReady(namespace, name.Name(), true, true, 60); err != nil {
if err := k8sClient.WaitUntilDeploymentReady(namespace, name.Name(), true, true, 60*time.Second); err != nil {
monitor.Error(errors.Wrap(err, "error while waiting for deployment to be ready"))
return err
}

View File

@@ -3,6 +3,7 @@ package deployment
import (
"github.com/caos/zitadel/operator/helpers"
corev1 "k8s.io/api/core/v1"
"sort"
"strings"
)
@@ -16,7 +17,8 @@ func GetVolumes(
Name: secretName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretName,
SecretName: secretName,
DefaultMode: helpers.PointerInt32(420),
},
},
}, {
@@ -31,7 +33,8 @@ func GetVolumes(
Name: secretPasswordsName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretPasswordsName,
SecretName: secretPasswordsName,
DefaultMode: helpers.PointerInt32(384),
},
},
}, {
@@ -39,6 +42,7 @@ func GetVolumes(
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{Name: consoleCMName},
DefaultMode: helpers.PointerInt32(420),
},
},
}, {
@@ -56,6 +60,7 @@ func userVolumes(
) []corev1.Volume {
volumes := make([]corev1.Volume, 0)
sort.Strings(users)
for _, user := range users {
userReplaced := strings.ReplaceAll(user, "_", "-")
internalName := "client-" + userReplaced

View File

@@ -17,7 +17,8 @@ func TestDeployment_Volumes(t *testing.T) {
Name: secretName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretName,
SecretName: secretName,
DefaultMode: helpers.PointerInt32(420),
},
},
}, {
@@ -32,7 +33,8 @@ func TestDeployment_Volumes(t *testing.T) {
Name: secretPasswordsName,
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: secretPasswordsName,
SecretName: secretPasswordsName,
DefaultMode: helpers.PointerInt32(384),
},
},
}, {
@@ -40,6 +42,7 @@ func TestDeployment_Volumes(t *testing.T) {
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{Name: consoleCMName},
DefaultMode: helpers.PointerInt32(420),
},
},
}, {

View File

@@ -4,14 +4,13 @@ import (
"crypto/sha512"
"encoding/base64"
"encoding/json"
"github.com/pkg/errors"
"github.com/rakyll/statik/fs"
"os"
"path/filepath"
"regexp"
"sort"
"github.com/pkg/errors"
"github.com/rakyll/statik/fs"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/configmap"
@@ -102,10 +101,9 @@ func AdaptFunc(
},
},
Spec: corev1.PodSpec{
NodeSelector: nodeselector,
Tolerations: tolerations,
SecurityContext: &corev1.PodSecurityContext{},
InitContainers: getPreContainer(dbHost, dbPort, migrationUser, secretPasswordName),
NodeSelector: nodeselector,
Tolerations: tolerations,
InitContainers: getPreContainer(dbHost, dbPort, migrationUser, secretPasswordName),
Containers: []corev1.Container{
getMigrationContainer(dbHost, dbPort, migrationUser, secretPasswordName, users),
},

View File

@@ -97,10 +97,9 @@ func TestMigration_AdaptFunc(t *testing.T) {
},
},
Spec: corev1.PodSpec{
NodeSelector: nodeselector,
Tolerations: tolerations,
SecurityContext: &corev1.PodSecurityContext{},
InitContainers: getPreContainer(dbHost, dbPort, migrationUser, secretPasswordName),
NodeSelector: nodeselector,
Tolerations: tolerations,
InitContainers: getPreContainer(dbHost, dbPort, migrationUser, secretPasswordName),
Containers: []corev1.Container{
getMigrationContainer(dbHost, dbPort, migrationUser, secretPasswordName, users),
},

View File

@@ -2,48 +2,104 @@ package zitadel
import (
"github.com/caos/orbos/pkg/secret"
"github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/configuration"
)
func getSecretsMap(desiredKind *DesiredV0) map[string]*secret.Secret {
secrets := map[string]*secret.Secret{}
func getSecretsMap(desiredKind *DesiredV0) (
map[string]*secret.Secret,
map[string]*secret.Existing,
) {
if desiredKind.Spec != nil && desiredKind.Spec.Configuration != nil {
conf := desiredKind.Spec.Configuration
if conf.Tracing != nil {
if conf.Tracing.ServiceAccountJSON == nil {
conf.Tracing.ServiceAccountJSON = &secret.Secret{}
}
secrets["tracingserviceaccountjson"] = conf.Tracing.ServiceAccountJSON
}
var (
secrets = map[string]*secret.Secret{}
existing = map[string]*secret.Existing{}
)
if conf.Secrets != nil {
if conf.Secrets.Keys == nil {
conf.Secrets.Keys = &secret.Secret{}
}
secrets["keys"] = conf.Secrets.Keys
}
if conf.Notifications != nil {
if conf.Notifications.GoogleChatURL == nil {
conf.Notifications.GoogleChatURL = &secret.Secret{}
}
secrets["googlechaturl"] = conf.Notifications.GoogleChatURL
if conf.Notifications.Twilio.SID == nil {
conf.Notifications.Twilio.SID = &secret.Secret{}
}
secrets["twiliosid"] = conf.Notifications.Twilio.SID
if conf.Notifications.Twilio.AuthToken == nil {
conf.Notifications.Twilio.AuthToken = &secret.Secret{}
}
secrets["twilioauthtoken"] = conf.Notifications.Twilio.AuthToken
if conf.Notifications.Email.AppKey == nil {
conf.Notifications.Email.AppKey = &secret.Secret{}
}
secrets["emailappkey"] = conf.Notifications.Email.AppKey
}
if desiredKind.Spec == nil {
desiredKind.Spec = &Spec{}
}
return secrets
if desiredKind.Spec.Configuration == nil {
desiredKind.Spec.Configuration = &configuration.Configuration{}
}
conf := desiredKind.Spec.Configuration
if conf.Tracing == nil {
conf.Tracing = &configuration.Tracing{}
}
if conf.Tracing.ServiceAccountJSON == nil {
conf.Tracing.ServiceAccountJSON = &secret.Secret{}
}
if conf.Tracing.ExistingServiceAccountJSON == nil {
conf.Tracing.ExistingServiceAccountJSON = &secret.Existing{}
}
sakey := "tracingserviceaccountjson"
secrets[sakey] = conf.Tracing.ServiceAccountJSON
existing[sakey] = conf.Tracing.ExistingServiceAccountJSON
if conf.Secrets == nil {
conf.Secrets = &configuration.Secrets{}
}
if conf.Secrets.Keys == nil {
conf.Secrets.Keys = &secret.Secret{}
}
if conf.Secrets.ExistingKeys == nil {
conf.Secrets.ExistingKeys = &secret.Existing{}
}
keysKey := "keys"
secrets[keysKey] = conf.Secrets.Keys
existing[keysKey] = conf.Secrets.ExistingKeys
if conf.Notifications == nil {
conf.Notifications = &configuration.Notifications{}
}
if conf.Notifications.GoogleChatURL == nil {
conf.Notifications.GoogleChatURL = &secret.Secret{}
}
if conf.Notifications.ExistingGoogleChatURL == nil {
conf.Notifications.ExistingGoogleChatURL = &secret.Existing{}
}
gchatkey := "googlechaturl"
secrets[gchatkey] = conf.Notifications.GoogleChatURL
existing[gchatkey] = conf.Notifications.ExistingGoogleChatURL
if conf.Notifications.Twilio == nil {
conf.Notifications.Twilio = &configuration.Twilio{}
}
if conf.Notifications.Twilio.SID == nil {
conf.Notifications.Twilio.SID = &secret.Secret{}
}
if conf.Notifications.Twilio.ExistingSID == nil {
conf.Notifications.Twilio.ExistingSID = &secret.Existing{}
}
twilKey := "twiliosid"
secrets[twilKey] = conf.Notifications.Twilio.SID
existing[twilKey] = conf.Notifications.Twilio.ExistingSID
if conf.Notifications.Twilio.AuthToken == nil {
conf.Notifications.Twilio.AuthToken = &secret.Secret{}
}
if conf.Notifications.Twilio.ExistingAuthToken == nil {
conf.Notifications.Twilio.ExistingAuthToken = &secret.Existing{}
}
twilOAuthKey := "twilioauthtoken"
secrets[twilOAuthKey] = conf.Notifications.Twilio.AuthToken
existing[twilOAuthKey] = conf.Notifications.Twilio.ExistingAuthToken
if conf.Notifications.Email == nil {
conf.Notifications.Email = &configuration.Email{}
}
if conf.Notifications.Email.AppKey == nil {
conf.Notifications.Email.AppKey = &secret.Secret{}
}
if conf.Notifications.Email.ExistingAppKey == nil {
conf.Notifications.Email.ExistingAppKey = &secret.Existing{}
}
mailKey := "emailappkey"
secrets[mailKey] = conf.Notifications.Email.AppKey
existing[mailKey] = conf.Notifications.Email.ExistingAppKey
return secrets, existing
}

View File

@@ -20,13 +20,13 @@ func GetExpectedService(
zitadelPodSelector *labels.Selector,
grpcPortName string,
grpcServiceName *labels.Name,
grpcPort uint16,
grpcPort int,
httpPortName string,
httpServiceName *labels.Name,
httpPort uint16,
httpPort int,
uiPortName string,
uiServiceName *labels.Name,
uiPort uint16,
uiPort int,
) []*corev1.Service {
grpcPorts := []corev1.ServicePort{{
@@ -123,11 +123,13 @@ func TestServices_AdaptEnsure1(t *testing.T) {
namespace := "test"
grpcPortName := "grpc"
grpcServiceName := "grpc"
grpcPort := 1
httpPortName := "http"
httpServiceName := "http"
httpPort := 2
uiPortName := "ui"
uiServiceName := "ui"
var grpcPort, httpPort, uiPort uint16 = 1, 2, 3
uiPort := 3
componentLabels, podSelectorLabels, nameLabels := serviceLabels(grpcServiceName, httpServiceName, uiServiceName)
@@ -153,11 +155,11 @@ func TestServices_AdaptEnsure1(t *testing.T) {
podSelectorLabels,
namespace,
grpcServiceName,
grpcPort,
uint16(grpcPort),
httpServiceName,
httpPort,
uint16(httpPort),
uiServiceName,
uiPort,
uint16(uiPort),
)
assert.NilError(t, err)
@@ -172,11 +174,13 @@ func TestServices_AdaptEnsure2(t *testing.T) {
namespace := "test0"
grpcPortName := "grpc"
grpcServiceName := "grpc1"
grpcPort := 11
httpPortName := "http"
httpServiceName := "http2"
httpPort := 22
uiPortName := "ui"
uiServiceName := "ui3"
var grpcPort, httpPort, uiPort uint16 = 11, 22, 33
uiPort := 33
componentLabels, podSelectorLabels, nameLabels := serviceLabels(grpcServiceName, httpServiceName, uiServiceName)
@@ -203,11 +207,11 @@ func TestServices_AdaptEnsure2(t *testing.T) {
podSelectorLabels,
namespace,
grpcServiceName,
grpcPort,
uint16(grpcPort),
httpServiceName,
httpPort,
uint16(httpPort),
uiServiceName,
uiPort)
uint16(uiPort))
assert.NilError(t, err)
ensure, err := query(client, nil)
@@ -221,11 +225,13 @@ func TestServices_AdaptEnsure3(t *testing.T) {
namespace := "test00"
grpcPortName := "grpc"
grpcServiceName := "grpc11"
grpcPort := 111
httpPortName := "http"
httpServiceName := "http22"
httpPort := 222
uiPortName := "ui"
uiServiceName := "ui33"
var grpcPort, httpPort, uiPort uint16 = 111, 222, 333
uiPort := 333
componentLabels, podSelectorLabels, nameLabels := serviceLabels(grpcServiceName, httpServiceName, uiServiceName)
@@ -252,11 +258,11 @@ func TestServices_AdaptEnsure3(t *testing.T) {
podSelectorLabels,
namespace,
grpcServiceName,
grpcPort,
uint16(grpcPort),
httpServiceName,
httpPort,
uint16(httpPort),
uiServiceName,
uiPort)
uint16(uiPort))
assert.NilError(t, err)
ensure, err := query(client, nil)
@@ -270,11 +276,13 @@ func TestServices_AdaptDestroy1(t *testing.T) {
namespace := "test"
grpcPortName := "grpc"
grpcServiceName := "grpc"
grpcPort := 1
httpPortName := "http"
httpServiceName := "http"
httpPort := 2
uiPortName := "ui"
uiServiceName := "ui"
var grpcPort, httpPort, uiPort uint16 = 1, 2, 3
uiPort := 3
componentLabels, podSelectorLabels, nameLabels := serviceLabels(grpcServiceName, httpServiceName, uiServiceName)
@@ -301,11 +309,11 @@ func TestServices_AdaptDestroy1(t *testing.T) {
podSelectorLabels,
namespace,
grpcServiceName,
grpcPort,
uint16(grpcPort),
httpServiceName,
httpPort,
uint16(httpPort),
uiServiceName,
uiPort)
uint16(uiPort))
assert.NilError(t, err)
assert.NilError(t, destroy(client))
@@ -317,11 +325,13 @@ func TestServices_AdaptDestroy2(t *testing.T) {
namespace := "test0"
grpcPortName := "grpc"
grpcServiceName := "grpc1"
grpcPort := 11
httpPortName := "http"
httpServiceName := "http2"
httpPort := 22
uiPortName := "ui"
uiServiceName := "ui3"
var grpcPort, httpPort, uiPort uint16 = 11, 22, 33
uiPort := 33
componentLabels, podSelectorLabels, nameLabels := serviceLabels(grpcServiceName, httpServiceName, uiServiceName)
@@ -348,11 +358,11 @@ func TestServices_AdaptDestroy2(t *testing.T) {
podSelectorLabels,
namespace,
grpcServiceName,
grpcPort,
uint16(grpcPort),
httpServiceName,
httpPort,
uint16(httpPort),
uiServiceName,
uiPort)
uint16(uiPort))
assert.NilError(t, err)
assert.NilError(t, destroy(client))
@@ -364,11 +374,13 @@ func TestServices_AdaptDestroy3(t *testing.T) {
namespace := "test00"
grpcPortName := "grpc"
grpcServiceName := "grpc11"
grpcPort := 111
httpPortName := "http"
httpServiceName := "http22"
httpPort := 222
uiPortName := "ui"
uiServiceName := "ui33"
var grpcPort, httpPort, uiPort uint16 = 111, 222, 333
uiPort := 333
componentLabels, podSelectorLabels, nameLabels := serviceLabels(grpcServiceName, httpServiceName, uiServiceName)
@@ -395,11 +407,11 @@ func TestServices_AdaptDestroy3(t *testing.T) {
podSelectorLabels,
namespace,
grpcServiceName,
grpcPort,
uint16(grpcPort),
httpServiceName,
httpPort,
uint16(httpPort),
uiServiceName,
uiPort)
uint16(uiPort))
assert.NilError(t, err)
assert.NilError(t, destroy(client))

View File

@@ -10,10 +10,10 @@ import (
func GetClientIDFunc(
namespace string,
httpServiceName string,
httpPort uint16,
httpPort int,
) func() string {
return func() string {
resp, err := http.Get("http://" + httpServiceName + "." + namespace + ":" + strconv.Itoa(int(httpPort)) + "/clientID")
resp, err := http.Get("http://" + httpServiceName + "." + namespace + ":" + strconv.Itoa(httpPort) + "/clientID")
if err != nil || resp.StatusCode >= 400 {
return ""
}

View File

@@ -1,8 +1,6 @@
package setup
import (
"time"
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/k8s"
@@ -21,7 +19,6 @@ const (
containerName = "zitadel"
rootSecret = "client-root"
dbSecrets = "db-secrets"
timeout = 300 * time.Second
)
func AdaptFunc(
@@ -40,12 +37,11 @@ func AdaptFunc(
consoleCMName string,
secretVarsName string,
secretPasswordsName string,
users []string,
migrationDone operator.EnsureFunc,
configurationDone operator.EnsureFunc,
getConfigurationHashes func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) map[string]string,
) (
operator.QueryFunc,
func(
necessaryUsers map[string]string,
getConfigurationHashes func(k8sClient kubernetes.ClientInt, queried map[string]interface{}, necessaryUsers map[string]string) (map[string]string, error),
) operator.QueryFunc,
operator.DestroyFunc,
error,
) {
@@ -63,49 +59,60 @@ func AdaptFunc(
operator.ResourceDestroyToZitadelDestroy(destroyJ),
}
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
jobDef := jobDef(
nameLabels,
users,
version,
resources,
cmName,
certPath,
secretName,
secretPath,
consoleCMName,
secretVarsName,
secretPasswordsName,
namespace,
componentLabels,
nodeselector,
tolerations,
)
hashes := getConfigurationHashes(k8sClient, queried)
if hashes != nil && len(hashes) != 0 {
for k, v := range hashes {
jobDef.Annotations[k] = v
jobDef.Spec.Template.Annotations[k] = v
return func(
necessaryUsers map[string]string,
getConfigurationHashes func(k8sClient kubernetes.ClientInt, queried map[string]interface{}, necessaryUsers map[string]string) (map[string]string, error),
) operator.QueryFunc {
return func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) (operator.EnsureFunc, error) {
users := make([]string, 0)
for user := range necessaryUsers {
users = append(users, user)
}
}
query, err := job.AdaptFuncToEnsure(jobDef)
if err != nil {
return nil, err
}
jobDef := jobDef(
nameLabels,
users,
version,
resources,
cmName,
certPath,
secretName,
secretPath,
consoleCMName,
secretVarsName,
secretPasswordsName,
namespace,
componentLabels,
nodeselector,
tolerations,
)
queriers := []operator.QueryFunc{
operator.EnsureFuncToQueryFunc(migrationDone),
operator.EnsureFuncToQueryFunc(configurationDone),
operator.ResourceQueryToZitadelQuery(query),
}
hashes, err := getConfigurationHashes(k8sClient, queried, necessaryUsers)
if err != nil {
return nil, err
}
if hashes != nil && len(hashes) != 0 {
for k, v := range hashes {
jobDef.Annotations[k] = v
jobDef.Spec.Template.Annotations[k] = v
}
}
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
query, err := job.AdaptFuncToEnsure(jobDef)
if err != nil {
return nil, err
}
queriers := []operator.QueryFunc{
operator.ResourceQueryToZitadelQuery(query),
}
return operator.QueriersToEnsureFunc(internalMonitor, false, queriers, k8sClient, queried)
}
},
operator.DestroyersToDestroyFunc(internalMonitor, destroyers),
nil
}
func jobDef(name *labels.Name, users []string, version *string, resources *k8s.Resources, cmName string, certPath string, secretName string, secretPath string, consoleCMName string, secretVarsName string, secretPasswordsName string, namespace string, componentLabels *labels.Component, nodeselector map[string]string, tolerations []corev1.Toleration) *batchv1.Job {
@@ -152,11 +159,10 @@ func jobDef(name *labels.Name, users []string, version *string, resources *k8s.R
Annotations: map[string]string{},
},
Spec: corev1.PodSpec{
NodeSelector: nodeselector,
Tolerations: tolerations,
InitContainers: initContainers,
Containers: containers,
SecurityContext: &corev1.PodSecurityContext{},
NodeSelector: nodeselector,
Tolerations: tolerations,
InitContainers: initContainers,
Containers: containers,
RestartPolicy: "Never",
DNSPolicy: "ClusterFirst",

View File

@@ -27,7 +27,11 @@ func TestSetup_AdaptFunc(t *testing.T) {
client := kubernetesmock.NewMockClientInt(gomock.NewController(t))
namespace := "test"
reason := "test"
users := []string{"test"}
usersMap := map[string]string{"test": "test"}
users := []string{}
for _, user := range usersMap {
users = append(users, user)
}
nodeselector := map[string]string{"test": "test"}
tolerations := []corev1.Toleration{}
dbHost := "test"
@@ -91,11 +95,10 @@ func TestSetup_AdaptFunc(t *testing.T) {
Annotations: annotations,
},
Spec: corev1.PodSpec{
NodeSelector: nodeselector,
Tolerations: tolerations,
InitContainers: initContainers,
Containers: containers,
SecurityContext: &corev1.PodSecurityContext{},
NodeSelector: nodeselector,
Tolerations: tolerations,
InitContainers: initContainers,
Containers: containers,
RestartPolicy: "Never",
DNSPolicy: "ClusterFirst",
@@ -110,17 +113,11 @@ func TestSetup_AdaptFunc(t *testing.T) {
client.EXPECT().ApplyJob(jobDef).Times(1)
client.EXPECT().GetJob(namespace, getJobName(reason)).Times(1).Return(nil, macherrs.NewNotFound(schema.GroupResource{"batch", "jobs"}, jobNamePrefix+reason))
getConfigurationHashes := func(k8sClient kubernetes.ClientInt, queried map[string]interface{}) map[string]string {
return map[string]string{"testHash": "test"}
}
migrationDone := func(k8sClient kubernetes.ClientInt) error {
return nil
}
configurationDone := func(k8sClient kubernetes.ClientInt) error {
return nil
getConfigurationHashes := func(k8sClient kubernetes.ClientInt, queried map[string]interface{}, necessaryUsers map[string]string) (map[string]string, error) {
return map[string]string{"testHash": "test"}, nil
}
query, _, err := AdaptFunc(
getQuery, _, err := AdaptFunc(
monitor,
componentLabels,
namespace,
@@ -136,10 +133,6 @@ func TestSetup_AdaptFunc(t *testing.T) {
consoleCMName,
secretVarsName,
secretPasswordsName,
users,
migrationDone,
configurationDone,
getConfigurationHashes,
)
queried := map[string]interface{}{}
@@ -149,6 +142,7 @@ func TestSetup_AdaptFunc(t *testing.T) {
})
assert.NoError(t, err)
query := getQuery(usersMap, getConfigurationHashes)
ensure, err := query(client, queried)
assert.NoError(t, err)
assert.NoError(t, ensure(client))

View File

@@ -5,6 +5,11 @@ import (
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/zitadel/operator"
"github.com/pkg/errors"
"time"
)
const (
timeout = 20 * time.Minute
)
func GetDoneFunc(

View File

@@ -3,88 +3,109 @@ package zitadel
import (
"sort"
"github.com/caos/orbos/pkg/helper"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/configuration"
)
const migrationUser = "flyway"
const (
migrationUser = "flyway"
mgmtUser = "management"
adminUser = "adminapi"
authUser = "auth"
authzUser = "authz"
notUser = "notification"
esUser = "eventstore"
queriesUser = "queries"
)
func getAllUsers(desired *DesiredV0) map[string]string {
func getUserListWithoutPasswords(desired *DesiredV0) []string {
userpw, _ := getAllUsers(nil, desired)
users := make([]string, 0)
for user := range userpw {
users = append(users, user)
}
sort.Slice(users, func(i, j int) bool {
return users[i] < users[j]
})
return users
}
func getAllUsers(k8sClient kubernetes.ClientInt, desired *DesiredV0) (map[string]string, error) {
passwords := &configuration.Passwords{}
if desired != nil && desired.Spec != nil && desired.Spec.Configuration != nil && desired.Spec.Configuration.Passwords != nil {
passwords = desired.Spec.Configuration.Passwords
}
users := make(map[string]string, 0)
migrationPassword := migrationUser
if passwords.Migration != nil {
migrationPassword = passwords.Migration.Value
if err := fillInUserPassword(k8sClient, migrationUser, passwords.Migration, passwords.ExistingMigration, users); err != nil {
return nil, err
}
users[migrationUser] = migrationPassword
mgmtUser := "management"
mgmtPassword := mgmtUser
if passwords != nil && passwords.Management != nil {
mgmtPassword = passwords.Management.Value
if err := fillInUserPassword(k8sClient, mgmtUser, passwords.Management, passwords.ExistingManagement, users); err != nil {
return nil, err
}
users[mgmtUser] = mgmtPassword
adminUser := "adminapi"
adminPassword := adminUser
if passwords != nil && passwords.Adminapi != nil {
adminPassword = passwords.Adminapi.Value
if err := fillInUserPassword(k8sClient, adminUser, passwords.Adminapi, passwords.ExistingAdminapi, users); err != nil {
return nil, err
}
users[adminUser] = adminPassword
authUser := "auth"
authPassword := authUser
if passwords != nil && passwords.Auth != nil {
authPassword = passwords.Auth.Value
if err := fillInUserPassword(k8sClient, authUser, passwords.Auth, passwords.ExistingAuth, users); err != nil {
return nil, err
}
users[authUser] = authPassword
authzUser := "authz"
authzPassword := authzUser
if passwords != nil && passwords.Authz != nil {
authzPassword = passwords.Authz.Value
if err := fillInUserPassword(k8sClient, authzUser, passwords.Authz, passwords.ExistingAuthz, users); err != nil {
return nil, err
}
users[authzUser] = authzPassword
notUser := "notification"
notPassword := notUser
if passwords != nil && passwords.Notification != nil {
notPassword = passwords.Notification.Value
if err := fillInUserPassword(k8sClient, notUser, passwords.Notification, passwords.ExistingNotification, users); err != nil {
return nil, err
}
users[notUser] = notPassword
esUser := "eventstore"
esPassword := esUser
if passwords != nil && passwords.Eventstore != nil {
esPassword = passwords.Eventstore.Value
if err := fillInUserPassword(k8sClient, esUser, passwords.Eventstore, passwords.ExistingEventstore, users); err != nil {
return nil, err
}
users[esUser] = esPassword
queryUser := "queries"
queryPassword := queryUser
if passwords != nil && passwords.Queries != nil {
queryPassword = passwords.Queries.Value
if err := fillInUserPassword(k8sClient, queriesUser, passwords.Queries, passwords.ExistingQueries, users); err != nil {
return nil, err
}
users[queryUser] = queryPassword
return users
return users, nil
}
func getZitadelUserList() []string {
allUsersMap := getAllUsers(nil)
func fillInUserPassword(
k8sClient kubernetes.ClientInt,
user string,
secret *secret.Secret,
existing *secret.Existing,
userpw map[string]string,
) error {
if k8sClient == nil {
userpw[user] = user
return nil
}
allZitadelUsers := make([]string, 0)
for k := range allUsersMap {
pw, err := helper.GetSecretValue(k8sClient, secret, existing)
if err != nil {
return err
}
if pw != "" {
userpw[user] = pw
} else {
userpw[user] = user
}
return nil
}
func getZitadelUserList(k8sClient kubernetes.ClientInt, desired *DesiredV0) (map[string]string, error) {
allUsersMap, err := getAllUsers(k8sClient, desired)
if err != nil {
return nil, err
}
allZitadelUsers := make(map[string]string, 0)
for k, v := range allUsersMap {
if k != migrationUser {
allZitadelUsers = append(allZitadelUsers, k)
allZitadelUsers[k] = v
}
}
sort.Slice(allZitadelUsers, func(i, j int) bool {
return allZitadelUsers[i] < allZitadelUsers[j]
})
return allZitadelUsers
return allZitadelUsers, nil
}

View File

@@ -3,18 +3,25 @@ package orb
import (
"github.com/caos/orbos/mntr"
"github.com/caos/orbos/pkg/kubernetes"
"github.com/caos/orbos/pkg/kubernetes/resources/namespace"
"github.com/caos/orbos/pkg/orb"
"github.com/caos/orbos/pkg/secret"
"github.com/caos/orbos/pkg/tree"
"github.com/caos/zitadel/operator"
"github.com/caos/zitadel/operator/zitadel/kinds/iam"
zitadeldb "github.com/caos/zitadel/operator/zitadel/kinds/iam/zitadel/database"
"github.com/pkg/errors"
)
const (
namespaceName = "caos-zitadel"
)
func AdaptFunc(
orbconfig *orb.Orb,
action string,
binaryVersion *string,
gitops bool,
features []string,
) operator.AdaptFunc {
return func(
@@ -25,6 +32,8 @@ func AdaptFunc(
queryFunc operator.QueryFunc,
destroyFunc operator.DestroyFunc,
allSecrets map[string]*secret.Secret,
allExisting map[string]*secret.Existing,
migrate bool,
err error,
) {
defer func() {
@@ -32,12 +41,13 @@ func AdaptFunc(
}()
allSecrets = make(map[string]*secret.Secret)
allExisting = make(map[string]*secret.Existing)
orbMonitor := monitor.WithField("kind", "orb")
desiredKind, err := parseDesiredV0(desiredTree)
desiredKind, err := ParseDesiredV0(desiredTree)
if err != nil {
return nil, nil, allSecrets, errors.Wrap(err, "parsing desired state failed")
return nil, nil, nil, nil, false, errors.Wrap(err, "parsing desired state failed")
}
desiredTree.Parsed = desiredKind
currentTree = &tree.Tree{}
@@ -46,35 +56,64 @@ func AdaptFunc(
orbMonitor = orbMonitor.Verbose()
}
var dbClient zitadeldb.Client
if gitops {
dbClientT, err := zitadeldb.NewGitOpsClient(monitor, orbconfig.URL, orbconfig.Repokey)
if err != nil {
monitor.Error(err)
return nil, nil, nil, nil, false, err
}
dbClient = dbClientT
} else {
dbClient = zitadeldb.NewCrdClient(monitor)
}
operatorLabels := mustZITADELOperator(binaryVersion)
queryNS, err := namespace.AdaptFuncToEnsure(namespaceName)
if err != nil {
return nil, nil, nil, nil, false, err
}
/*destroyNS, err := namespace.AdaptFuncToDestroy(namespaceName)
if err != nil {
return nil, nil, allSecrets, err
}*/
iamCurrent := &tree.Tree{}
queryIAM, destroyIAM, zitadelSecrets, err := iam.GetQueryAndDestroyFuncs(
queryIAM, destroyIAM, zitadelSecrets, zitadelExisting, migrateIAM, err := iam.GetQueryAndDestroyFuncs(
orbMonitor,
operatorLabels,
desiredKind.IAM,
iamCurrent,
desiredKind.Spec.NodeSelector,
desiredKind.Spec.Tolerations,
orbconfig,
dbClient,
namespaceName,
action,
&desiredKind.Spec.Version,
features,
)
if err != nil {
return nil, nil, allSecrets, err
return nil, nil, nil, nil, false, err
}
secret.AppendSecrets("", allSecrets, zitadelSecrets)
migrate = migrate || migrateIAM
secret.AppendSecrets("", allSecrets, zitadelSecrets, allExisting, zitadelExisting)
destroyers := make([]operator.DestroyFunc, 0)
queriers := make([]operator.QueryFunc, 0)
for _, feature := range features {
switch feature {
case "iam", "migration", "scaleup", "scaledown":
queriers = append(queriers, queryIAM)
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryNS),
queryIAM,
)
destroyers = append(destroyers, destroyIAM)
case "operator":
queriers = append(queriers, operator.EnsureFuncToQueryFunc(Reconcile(monitor, desiredTree, false)))
queriers = append(queriers,
operator.ResourceQueryToZitadelQuery(queryNS),
operator.EnsureFuncToQueryFunc(Reconcile(monitor, desiredKind.Spec)),
)
}
}
@@ -96,6 +135,8 @@ func AdaptFunc(
return operator.DestroyersToDestroyFunc(monitor, destroyers)(k8sClient)
},
allSecrets,
allExisting,
migrate,
nil
}
}

View File

@@ -8,20 +8,24 @@ import (
type DesiredV0 struct {
Common *tree.Common `yaml:",inline"`
Spec struct {
Verbose bool
NodeSelector map[string]string `yaml:"nodeSelector,omitempty"`
Tolerations []corev1.Toleration `yaml:"tolerations,omitempty"`
Version string `yaml:"version,omitempty"`
SelfReconciling bool `yaml:"selfReconciling"`
//Use this registry to pull the database operator image from
//@default: ghcr.io
CustomImageRegistry string `json:"customImageRegistry,omitempty" yaml:"customImageRegistry,omitempty"`
}
IAM *tree.Tree
Spec *Spec `json:"spec" yaml:"spec"`
IAM *tree.Tree `json:"iam" yaml:"iam"`
}
func parseDesiredV0(desiredTree *tree.Tree) (*DesiredV0, error) {
// +kubebuilder:object:generate=true
type Spec struct {
Verbose bool `json:"verbose" yaml:"verbose"`
NodeSelector map[string]string `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty"`
Tolerations []corev1.Toleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty"`
Version string `json:"version,omitempty" yaml:"version,omitempty"`
SelfReconciling bool `json:"selfReconciling" yaml:"selfReconciling"`
GitOps bool `json:"gitops,omitempty" yaml:"gitops,omitempty"`
//Use this registry to pull the zitadel operator image from
//@default: ghcr.io
CustomImageRegistry string `json:"customImageRegistry,omitempty" yaml:"customImageRegistry,omitempty"`
}
func ParseDesiredV0(desiredTree *tree.Tree) (*DesiredV0, error) {
desiredKind := &DesiredV0{Common: desiredTree.Common}
if err := desiredTree.Original.Decode(desiredKind); err != nil {

View File

@@ -10,37 +10,36 @@ import (
"github.com/pkg/errors"
)
func Reconcile(monitor mntr.Monitor, desiredTree *tree.Tree, takeoff bool) operator.EnsureFunc {
func Reconcile(
monitor mntr.Monitor,
spec *Spec,
) operator.EnsureFunc {
return func(k8sClient kubernetes2.ClientInt) (err error) {
defer func() {
err = errors.Wrapf(err, "building %s failed", desiredTree.Common.Kind)
}()
recMonitor := monitor.WithField("version", spec.Version)
desiredKind, err := parseDesiredV0(desiredTree)
if err != nil {
return errors.Wrap(err, "parsing desired state failed")
}
desiredTree.Parsed = desiredKind
recMonitor := monitor.WithField("version", desiredKind.Spec.Version)
if desiredKind.Spec.Version == "" {
err := errors.New("No version set in zitadel.yml")
if spec.Version == "" {
err := errors.New("No version provided for self-reconciling")
recMonitor.Error(err)
return err
}
imageRegistry := desiredKind.Spec.CustomImageRegistry
imageRegistry := spec.CustomImageRegistry
if imageRegistry == "" {
imageRegistry = "ghcr.io"
}
if takeoff || desiredKind.Spec.SelfReconciling {
if err := kubernetes.EnsureZitadelOperatorArtifacts(monitor, treelabels.MustForAPI(desiredTree, mustZITADELOperator(&desiredKind.Spec.Version)), k8sClient, desiredKind.Spec.Version, desiredKind.Spec.NodeSelector, desiredKind.Spec.Tolerations, imageRegistry); err != nil {
if spec.SelfReconciling {
desiredTree := &tree.Tree{
Common: &tree.Common{
Kind: "zitadel.caos.ch/Orb",
Version: "v0",
},
}
if err := kubernetes.EnsureZitadelOperatorArtifacts(monitor, treelabels.MustForAPI(desiredTree, mustZITADELOperator(&spec.Version)), k8sClient, spec.Version, spec.NodeSelector, spec.Tolerations, imageRegistry, spec.GitOps); err != nil {
recMonitor.Error(errors.Wrap(err, "Failed to deploy zitadel-operator into k8s-cluster"))
return err
}
recMonitor.Info("Applied zitadel-operator")
}
return nil

View File

@@ -0,0 +1,54 @@
// +build !ignore_autogenerated
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package orb
import (
v1 "k8s.io/api/core/v1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Spec) DeepCopyInto(out *Spec) {
*out = *in
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Tolerations != nil {
in, out := &in.Tolerations, &out.Tolerations
*out = make([]v1.Toleration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Spec.
func (in *Spec) DeepCopy() *Spec {
if in == nil {
return nil
}
out := new(Spec)
in.DeepCopyInto(out)
return out
}

View File

@@ -2,6 +2,7 @@ package zitadel
import (
"errors"
"github.com/caos/zitadel/operator"
"github.com/caos/orbos/mntr"
@@ -10,7 +11,12 @@ import (
"github.com/caos/orbos/pkg/tree"
)
func Takeoff(monitor mntr.Monitor, gitClient *git.Client, adapt operator.AdaptFunc, k8sClient *kubernetes.Client) func() error {
func Takeoff(
monitor mntr.Monitor,
gitClient *git.Client,
adapt operator.AdaptFunc,
k8sClient *kubernetes.Client,
) func() error {
return func() error {
internalMonitor := monitor.WithField("operator", "zitadel")
internalMonitor.Info("Takeoff")
@@ -26,7 +32,7 @@ func Takeoff(monitor mntr.Monitor, gitClient *git.Client, adapt operator.AdaptFu
return err
}
query, _, _, err := adapt(internalMonitor, treeDesired, treeCurrent)
query, _, _, _, _, err := adapt(internalMonitor, treeDesired, treeCurrent)
if err != nil {
internalMonitor.Error(err)
return err