mirror of
https://github.com/tailscale/tailscale.git
synced 2024-12-04 15:35:38 +00:00
cmd/k8s-operator: fix a bunch of status equality checks (#14270)
Updates tailscale/tailscale#14269 Signed-off-by: Irbe Krumina <irbe@tailscale.com>
This commit is contained in:
parent
cbf1a4efe9
commit
aa43388363
@ -113,7 +113,7 @@ func (a *ConnectorReconciler) Reconcile(ctx context.Context, req reconcile.Reque
|
||||
setStatus := func(cn *tsapi.Connector, _ tsapi.ConditionType, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) {
|
||||
tsoperator.SetConnectorCondition(cn, tsapi.ConnectorReady, status, reason, message, cn.Generation, a.clock, logger)
|
||||
var updateErr error
|
||||
if !apiequality.Semantic.DeepEqual(oldCnStatus, cn.Status) {
|
||||
if !apiequality.Semantic.DeepEqual(oldCnStatus, &cn.Status) {
|
||||
// An error encountered here should get returned by the Reconcile function.
|
||||
updateErr = a.Client.Status().Update(ctx, cn)
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ func (esrr *egressSvcsReadinessReconciler) Reconcile(ctx context.Context, req re
|
||||
oldStatus := svc.Status.DeepCopy()
|
||||
defer func() {
|
||||
tsoperator.SetServiceCondition(svc, tsapi.EgressSvcReady, st, reason, msg, esrr.clock, l)
|
||||
if !apiequality.Semantic.DeepEqual(oldStatus, svc.Status) {
|
||||
if !apiequality.Semantic.DeepEqual(oldStatus, &svc.Status) {
|
||||
err = errors.Join(err, esrr.Status().Update(ctx, svc))
|
||||
}
|
||||
}()
|
||||
|
@ -123,7 +123,7 @@ func (esr *egressSvcsReconciler) Reconcile(ctx context.Context, req reconcile.Re
|
||||
|
||||
oldStatus := svc.Status.DeepCopy()
|
||||
defer func() {
|
||||
if !apiequality.Semantic.DeepEqual(oldStatus, svc.Status) {
|
||||
if !apiequality.Semantic.DeepEqual(oldStatus, &svc.Status) {
|
||||
err = errors.Join(err, esr.Status().Update(ctx, svc))
|
||||
}
|
||||
}()
|
||||
|
@ -86,7 +86,7 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
logger.Info("Cleaning up DNSConfig resources")
|
||||
if err := a.maybeCleanup(ctx, &dnsCfg, logger); err != nil {
|
||||
if err := a.maybeCleanup(&dnsCfg); err != nil {
|
||||
logger.Errorf("error cleaning up reconciler resource: %v", err)
|
||||
return res, err
|
||||
}
|
||||
@ -100,9 +100,9 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ
|
||||
}
|
||||
|
||||
oldCnStatus := dnsCfg.Status.DeepCopy()
|
||||
setStatus := func(dnsCfg *tsapi.DNSConfig, conditionType tsapi.ConditionType, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) {
|
||||
setStatus := func(dnsCfg *tsapi.DNSConfig, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) {
|
||||
tsoperator.SetDNSConfigCondition(dnsCfg, tsapi.NameserverReady, status, reason, message, dnsCfg.Generation, a.clock, logger)
|
||||
if !apiequality.Semantic.DeepEqual(oldCnStatus, dnsCfg.Status) {
|
||||
if !apiequality.Semantic.DeepEqual(oldCnStatus, &dnsCfg.Status) {
|
||||
// An error encountered here should get returned by the Reconcile function.
|
||||
if updateErr := a.Client.Status().Update(ctx, dnsCfg); updateErr != nil {
|
||||
err = errors.Wrap(err, updateErr.Error())
|
||||
@ -118,7 +118,7 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ
|
||||
msg := "invalid cluster configuration: more than one tailscale.com/dnsconfigs found. Please ensure that no more than one is created."
|
||||
logger.Error(msg)
|
||||
a.recorder.Event(&dnsCfg, corev1.EventTypeWarning, reasonMultipleDNSConfigsPresent, messageMultipleDNSConfigsPresent)
|
||||
setStatus(&dnsCfg, tsapi.NameserverReady, metav1.ConditionFalse, reasonMultipleDNSConfigsPresent, messageMultipleDNSConfigsPresent)
|
||||
setStatus(&dnsCfg, metav1.ConditionFalse, reasonMultipleDNSConfigsPresent, messageMultipleDNSConfigsPresent)
|
||||
}
|
||||
|
||||
if !slices.Contains(dnsCfg.Finalizers, FinalizerName) {
|
||||
@ -127,7 +127,7 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ
|
||||
if err := a.Update(ctx, &dnsCfg); err != nil {
|
||||
msg := fmt.Sprintf(messageNameserverCreationFailed, err)
|
||||
logger.Error(msg)
|
||||
return setStatus(&dnsCfg, tsapi.NameserverReady, metav1.ConditionFalse, reasonNameserverCreationFailed, msg)
|
||||
return setStatus(&dnsCfg, metav1.ConditionFalse, reasonNameserverCreationFailed, msg)
|
||||
}
|
||||
}
|
||||
if err := a.maybeProvision(ctx, &dnsCfg, logger); err != nil {
|
||||
@ -149,7 +149,7 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ
|
||||
dnsCfg.Status.Nameserver = &tsapi.NameserverStatus{
|
||||
IP: ip,
|
||||
}
|
||||
return setStatus(&dnsCfg, tsapi.NameserverReady, metav1.ConditionTrue, reasonNameserverCreated, reasonNameserverCreated)
|
||||
return setStatus(&dnsCfg, metav1.ConditionTrue, reasonNameserverCreated, reasonNameserverCreated)
|
||||
}
|
||||
logger.Info("nameserver Service does not have an IP address allocated, waiting...")
|
||||
return reconcile.Result{}, nil
|
||||
@ -188,7 +188,7 @@ func (a *NameserverReconciler) maybeProvision(ctx context.Context, tsDNSCfg *tsa
|
||||
// maybeCleanup removes DNSConfig from being tracked. The cluster resources
|
||||
// created, will be automatically garbage collected as they are owned by the
|
||||
// DNSConfig.
|
||||
func (a *NameserverReconciler) maybeCleanup(ctx context.Context, dnsCfg *tsapi.DNSConfig, logger *zap.SugaredLogger) error {
|
||||
func (a *NameserverReconciler) maybeCleanup(dnsCfg *tsapi.DNSConfig) error {
|
||||
a.mu.Lock()
|
||||
a.managedNameservers.Remove(dnsCfg.UID)
|
||||
a.mu.Unlock()
|
||||
|
@ -103,7 +103,7 @@ func (pcr *ProxyClassReconciler) Reconcile(ctx context.Context, req reconcile.Re
|
||||
} else {
|
||||
tsoperator.SetProxyClassCondition(pc, tsapi.ProxyClassReady, metav1.ConditionTrue, reasonProxyClassValid, reasonProxyClassValid, pc.Generation, pcr.clock, logger)
|
||||
}
|
||||
if !apiequality.Semantic.DeepEqual(oldPCStatus, pc.Status) {
|
||||
if !apiequality.Semantic.DeepEqual(oldPCStatus, &pc.Status) {
|
||||
if err := pcr.Client.Status().Update(ctx, pc); err != nil {
|
||||
logger.Errorf("error updating ProxyClass status: %v", err)
|
||||
return reconcile.Result{}, err
|
||||
|
@ -110,7 +110,7 @@ func (r *ProxyGroupReconciler) Reconcile(ctx context.Context, req reconcile.Requ
|
||||
oldPGStatus := pg.Status.DeepCopy()
|
||||
setStatusReady := func(pg *tsapi.ProxyGroup, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) {
|
||||
tsoperator.SetProxyGroupCondition(pg, tsapi.ProxyGroupReady, status, reason, message, pg.Generation, r.clock, logger)
|
||||
if !apiequality.Semantic.DeepEqual(oldPGStatus, pg.Status) {
|
||||
if !apiequality.Semantic.DeepEqual(oldPGStatus, &pg.Status) {
|
||||
// An error encountered here should get returned by the Reconcile function.
|
||||
if updateErr := r.Client.Status().Update(ctx, pg); updateErr != nil {
|
||||
err = errors.Wrap(err, updateErr.Error())
|
||||
|
@ -131,7 +131,7 @@ func (a *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
||||
func (a *ServiceReconciler) maybeCleanup(ctx context.Context, logger *zap.SugaredLogger, svc *corev1.Service) (err error) {
|
||||
oldSvcStatus := svc.Status.DeepCopy()
|
||||
defer func() {
|
||||
if !apiequality.Semantic.DeepEqual(oldSvcStatus, svc.Status) {
|
||||
if !apiequality.Semantic.DeepEqual(oldSvcStatus, &svc.Status) {
|
||||
// An error encountered here should get returned by the Reconcile function.
|
||||
err = errors.Join(err, a.Client.Status().Update(ctx, svc))
|
||||
}
|
||||
@ -196,7 +196,7 @@ func (a *ServiceReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare
|
||||
func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.SugaredLogger, svc *corev1.Service) (err error) {
|
||||
oldSvcStatus := svc.Status.DeepCopy()
|
||||
defer func() {
|
||||
if !apiequality.Semantic.DeepEqual(oldSvcStatus, svc.Status) {
|
||||
if !apiequality.Semantic.DeepEqual(oldSvcStatus, &svc.Status) {
|
||||
// An error encountered here should get returned by the Reconcile function.
|
||||
err = errors.Join(err, a.Client.Status().Update(ctx, svc))
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ func (r *RecorderReconciler) Reconcile(ctx context.Context, req reconcile.Reques
|
||||
oldTSRStatus := tsr.Status.DeepCopy()
|
||||
setStatusReady := func(tsr *tsapi.Recorder, status metav1.ConditionStatus, reason, message string) (reconcile.Result, error) {
|
||||
tsoperator.SetRecorderCondition(tsr, tsapi.RecorderReady, status, reason, message, tsr.Generation, r.clock, logger)
|
||||
if !apiequality.Semantic.DeepEqual(oldTSRStatus, tsr.Status) {
|
||||
if !apiequality.Semantic.DeepEqual(oldTSRStatus, &tsr.Status) {
|
||||
// An error encountered here should get returned by the Reconcile function.
|
||||
if updateErr := r.Client.Status().Update(ctx, tsr); updateErr != nil {
|
||||
err = errors.Wrap(err, updateErr.Error())
|
||||
|
Loading…
Reference in New Issue
Block a user