From 9f3a5511c137466874e423f97a1a150cf755ab5f Mon Sep 17 00:00:00 2001 From: Simon Beck Date: Tue, 10 Dec 2024 11:17:26 +0100 Subject: [PATCH] Make sure the backup cleanup is always triggered This commit makes sure, that backups go into a completed state, even if there was nothing to backup at all. --- e2e/test-12-annotated-failure.bats | 2 +- e2e/test-13-cleanup-empty-jobs.bats | 26 +++++++++++++++++++++++++ operator/backupcontroller/controller.go | 2 ++ operator/backupcontroller/executor.go | 7 +++++++ operator/reconciler/reconciler.go | 2 +- 5 files changed, 37 insertions(+), 2 deletions(-) create mode 100644 e2e/test-13-cleanup-empty-jobs.bats diff --git a/e2e/test-12-annotated-failure.bats b/e2e/test-12-annotated-failure.bats index f4b26683b..0b72e8375 100644 --- a/e2e/test-12-annotated-failure.bats +++ b/e2e/test-12-annotated-failure.bats @@ -11,7 +11,7 @@ DETIK_CLIENT_NAMESPACE="k8up-e2e-subject" # shellcheck disable=SC2034 DEBUG_DETIK="true" -@test "Annotated app, When creating a backup, Then expect Error" { +@test "Given annotated app, When creating a backup, Then expect Error" { expected_content="expected content: $(timestamp)" expected_filename="expected_filename.txt" diff --git a/e2e/test-13-cleanup-empty-jobs.bats b/e2e/test-13-cleanup-empty-jobs.bats new file mode 100644 index 000000000..8603d0d52 --- /dev/null +++ b/e2e/test-13-cleanup-empty-jobs.bats @@ -0,0 +1,26 @@ +#!/usr/bin/env bats + +load "lib/utils" +load "lib/detik" +load "lib/k8up" + +# shellcheck disable=SC2034 +DETIK_CLIENT_NAME="kubectl" +# shellcheck disable=SC2034 +DETIK_CLIENT_NAMESPACE="k8up-e2e-subject" +# shellcheck disable=SC2034 +DEBUG_DETIK="true" + +@test "Given empty namespace, When creating multiple backups, Then expect cleanup" { + given_a_running_operator + given_a_clean_ns + given_s3_storage + + kubectl apply -f definitions/secrets + yq e '.spec.podSecurityContext.runAsUser='$(id -u)' | .metadata.name="first-backup"' definitions/backup/backup.yaml | kubectl apply -f - + + yq e '.spec.podSecurityContext.runAsUser='$(id -u)' | .metadata.name="second-backup"' definitions/backup/backup.yaml | kubectl apply -f - + + wait_for_until_jsonpath backup/second-backup 2m 'jsonpath={.status.conditions[?(@.type=="Scrubbed")].message}="Deleted 1 resources"' + +} diff --git a/operator/backupcontroller/controller.go b/operator/backupcontroller/controller.go index a192aa0e7..bf4a39c3e 100644 --- a/operator/backupcontroller/controller.go +++ b/operator/backupcontroller/controller.go @@ -47,6 +47,7 @@ func (r *BackupReconciler) Provision(ctx context.Context, obj *k8upv1.Backup) (r log.V(1).Info("backup just started, waiting") return controllerruntime.Result{RequeueAfter: 5 * time.Second}, nil } + if obj.Status.HasFinished() || isPrebackupFailed(obj) { cleanupCond := meta.FindStatusCondition(obj.Status.Conditions, k8upv1.ConditionScrubbed.String()) if cleanupCond == nil || cleanupCond.Reason != k8upv1.ReasonSucceeded.String() { @@ -110,6 +111,7 @@ func (r *BackupReconciler) ReconcileJobStatus(ctx context.Context, obj *k8upv1.B } else if numStarted > 0 { objStatus.SetStarted(message) } + obj.SetStatus(objStatus) log.V(1).Info("updating status") diff --git a/operator/backupcontroller/executor.go b/operator/backupcontroller/executor.go index dfbfeb99d..5c5935788 100644 --- a/operator/backupcontroller/executor.go +++ b/operator/backupcontroller/executor.go @@ -236,6 +236,13 @@ func (b *BackupExecutor) startBackup(ctx context.Context) error { } } + if len(backupJobs) == 0 { + status := b.Obj.GetStatus() + status.SetSucceeded("nothing to backup") + b.Obj.SetStatus(status) + return b.Client.Status().Update(ctx, b.Obj) + } + index := 0 for name, batchJob := range backupJobs { _, err = controllerruntime.CreateOrUpdate(ctx, b.Generic.Config.Client, batchJob.job, func() error { diff --git a/operator/reconciler/reconciler.go b/operator/reconciler/reconciler.go index 31f99e721..8f98932d4 100644 --- a/operator/reconciler/reconciler.go +++ b/operator/reconciler/reconciler.go @@ -56,7 +56,7 @@ func (ctrl *controller[T, L]) Reconcile(ctx context.Context, request controllerr } else { res, provisionErr = ctrl.reconciler.Provision(ctx, obj) } - if apierrors.IsConflict(err) { // ignore "the object has been modified; please apply your changes to the latest version and try again" error, but requeue + if apierrors.IsConflict(provisionErr) { // ignore "the object has been modified; please apply your changes to the latest version and try again" error, but requeue log := controllerruntime.LoggerFrom(ctx) log.Info("Object has been modified, retrying...", "error", provisionErr.Error()) res.Requeue = true