diff --git a/pkg/cmd/cli/repomantenance/maintenance.go b/pkg/cmd/cli/repomantenance/maintenance.go index 7ca6d7505e..56a88de7ec 100644 --- a/pkg/cmd/cli/repomantenance/maintenance.go +++ b/pkg/cmd/cli/repomantenance/maintenance.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "github.com/bombsimon/logrusr/v3" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -16,7 +17,6 @@ import ( "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log/zap" "github.com/vmware-tanzu/velero/internal/credentials" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" @@ -68,9 +68,7 @@ func (o *Options) Run(f velerocli.Factory) { logger := logging.DefaultLogger(o.LogLevelFlag.Parse(), o.FormatFlag.Parse()) logger.SetOutput(os.Stdout) - ctrl.SetLogger(zap.New(zap.UseDevMode(true))) - - time.Sleep(time.Minute) + ctrl.SetLogger(logrusr.New(logger)) pruneError := o.runRepoPrune(f, f.Namespace(), logger) defer func() { diff --git a/pkg/controller/backup_repository_controller.go b/pkg/controller/backup_repository_controller.go index 2f6fa42220..5682525b53 100644 --- a/pkg/controller/backup_repository_controller.go +++ b/pkg/controller/backup_repository_controller.go @@ -42,8 +42,8 @@ import ( velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/constant" "github.com/vmware-tanzu/velero/pkg/label" - "github.com/vmware-tanzu/velero/pkg/repository" repoconfig "github.com/vmware-tanzu/velero/pkg/repository/config" + "github.com/vmware-tanzu/velero/pkg/repository/maintenance" repomanager "github.com/vmware-tanzu/velero/pkg/repository/manager" "github.com/vmware-tanzu/velero/pkg/util/kube" "github.com/vmware-tanzu/velero/pkg/util/logging" @@ -229,7 +229,7 @@ func (r *BackupRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, errors.Wrap(err, "error check and run repo maintenance jobs") } - if err := repository.DeleteOldMaintenanceJobs(r.Client, req.Name, r.keepLatestMaintenanceJobs); err != nil { + if err := maintenance.DeleteOldJobs(r.Client, req.Name, r.keepLatestMaintenanceJobs); err != nil { log.WithError(err).Warn("Failed to delete old maintenance jobs") } } @@ -325,7 +325,7 @@ func ensureRepo(repo *velerov1api.BackupRepository, repoManager repomanager.Mana } func (r *BackupRepoReconciler) recallMaintenance(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error { - history, err := repository.WaitAllMaintenanceJobComplete(ctx, r.Client, req, defaultMaintenanceStatusQueueLength, log) + history, err := maintenance.WaitAllJobsComplete(ctx, r.Client, req, defaultMaintenanceStatusQueueLength, log) if err != nil { return errors.Wrapf(err, "error waiting incomplete repo maintenance job for repo %s", req.Name) } @@ -424,8 +424,8 @@ func isIdenticalHistory(a, b []velerov1api.BackupRepositoryMaintenanceStatus) bo return true } -var funcStartMaintenanceJob = repository.StartMaintenanceJob -var funcWaitMaintenanceJobComplete = repository.WaitMaintenanceJobComplete +var funcStartMaintenanceJob = maintenance.StartNewJob +var funcWaitMaintenanceJobComplete = maintenance.WaitJobComplete func (r *BackupRepoReconciler) runMaintenanceIfDue(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error { startTime := r.clock.Now() diff --git a/pkg/controller/backup_repository_controller_test.go b/pkg/controller/backup_repository_controller_test.go index 7ad65de0d8..cb763fb8f7 100644 --- a/pkg/controller/backup_repository_controller_test.go +++ b/pkg/controller/backup_repository_controller_test.go @@ -23,15 +23,15 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/clock" ctrl "sigs.k8s.io/controller-runtime" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/builder" - "github.com/vmware-tanzu/velero/pkg/repository" + "github.com/vmware-tanzu/velero/pkg/repository/maintenance" repomokes "github.com/vmware-tanzu/velero/pkg/repository/mocks" repotypes "github.com/vmware-tanzu/velero/pkg/repository/types" velerotest "github.com/vmware-tanzu/velero/pkg/test" @@ -39,7 +39,6 @@ import ( "github.com/vmware-tanzu/velero/pkg/util/logging" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" clientFake "sigs.k8s.io/controller-runtime/pkg/client/fake" batchv1 "k8s.io/api/batch/v1" @@ -131,59 +130,268 @@ func waitMaintenanceJobCompleteFail(client.Client, context.Context, string, stri return velerov1api.BackupRepositoryMaintenanceStatus{}, errors.New("fake-wait-error") } -func waitMaintenanceJobCompleteSucceed(client.Client, context.Context, string, string, logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error) { - return velerov1api.BackupRepositoryMaintenanceStatus{ - StartTimestamp: &metav1.Time{Time: time.Now()}, - CompleteTimestamp: &metav1.Time{Time: time.Now().Add(time.Hour)}, - }, nil +func waitMaintenanceJobCompleteFunc(now time.Time, result velerov1api.BackupRepositoryMaintenanceResult, message string) func(client.Client, context.Context, string, string, logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error) { + return func(client.Client, context.Context, string, string, logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error) { + return velerov1api.BackupRepositoryMaintenanceStatus{ + StartTimestamp: &metav1.Time{Time: now}, + CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour)}, + Result: result, + Message: message, + }, nil + } +} + +type fakeClock struct { + now time.Time +} + +func (f *fakeClock) After(time.Duration) <-chan time.Time { + return nil +} + +func (f *fakeClock) NewTicker(time.Duration) clock.Ticker { + return nil +} +func (f *fakeClock) NewTimer(time.Duration) clock.Timer { + return nil +} + +func (f *fakeClock) Now() time.Time { + return f.now +} + +func (f *fakeClock) Since(time.Time) time.Duration { + return 0 +} + +func (f *fakeClock) Sleep(time.Duration) {} + +func (f *fakeClock) Tick(time.Duration) <-chan time.Time { + return nil +} + +func (f *fakeClock) AfterFunc(time.Duration, func()) clock.Timer { + return nil } func TestRunMaintenanceIfDue(t *testing.T) { - rr := mockBackupRepositoryCR() - reconciler := mockBackupRepoReconciler(t, "", rr, nil) - funcStartMaintenanceJob = startMaintenanceJobFail - err := reconciler.Client.Create(context.TODO(), rr) - assert.NoError(t, err) - lastTm := rr.Status.LastMaintenanceTime - history := rr.Status.RecentMaintenance - err = reconciler.runMaintenanceIfDue(context.TODO(), rr, reconciler.logger) - assert.NoError(t, err) - assert.Equal(t, rr.Status.LastMaintenanceTime, lastTm) - assert.NotEqual(t, rr.Status.RecentMaintenance, history) - - rr = mockBackupRepositoryCR() - reconciler = mockBackupRepoReconciler(t, "", rr, nil) - funcStartMaintenanceJob = startMaintenanceJobSucceed - funcWaitMaintenanceJobComplete = waitMaintenanceJobCompleteFail - err = reconciler.Client.Create(context.TODO(), rr) - assert.NoError(t, err) - lastTm = rr.Status.LastMaintenanceTime - history = rr.Status.RecentMaintenance - err = reconciler.runMaintenanceIfDue(context.TODO(), rr, reconciler.logger) - assert.EqualError(t, err, "error waiting repo maintenance completion status: fake-wait-error") - assert.Equal(t, rr.Status.LastMaintenanceTime, lastTm) - assert.Equal(t, rr.Status.RecentMaintenance, history) - - rr = mockBackupRepositoryCR() - reconciler = mockBackupRepoReconciler(t, "", rr, nil) - funcStartMaintenanceJob = startMaintenanceJobSucceed - funcWaitMaintenanceJobComplete = waitMaintenanceJobCompleteSucceed - err = reconciler.Client.Create(context.TODO(), rr) - assert.NoError(t, err) - lastTm = rr.Status.LastMaintenanceTime - history = rr.Status.RecentMaintenance - err = reconciler.runMaintenanceIfDue(context.TODO(), rr, reconciler.logger) - assert.NoError(t, err) - assert.NotEqual(t, rr.Status.LastMaintenanceTime, lastTm) - assert.NotEqual(t, rr.Status.RecentMaintenance, history) + now := time.Now().Round(time.Second) - rr.Status.LastMaintenanceTime = &metav1.Time{Time: time.Now()} - lastTm = rr.Status.LastMaintenanceTime - history = rr.Status.RecentMaintenance - err = reconciler.runMaintenanceIfDue(context.TODO(), rr, reconciler.logger) - assert.NoError(t, err) - assert.Equal(t, rr.Status.LastMaintenanceTime, lastTm) - assert.Equal(t, rr.Status.RecentMaintenance, history) + tests := []struct { + name string + repo *velerov1api.BackupRepository + startJobFunc func(client.Client, context.Context, *velerov1api.BackupRepository, string, kube.PodResources, logrus.Level, *logging.FormatFlag, logrus.FieldLogger) (string, error) + waitJobFunc func(client.Client, context.Context, string, string, logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error) + expectedMaintenanceTime time.Time + expectedHistory []velerov1api.BackupRepositoryMaintenanceStatus + expectedErr string + }{ + { + name: "not due", + repo: &velerov1api.BackupRepository{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: velerov1api.DefaultNamespace, + Name: "repo", + }, + Spec: velerov1api.BackupRepositorySpec{ + MaintenanceFrequency: metav1.Duration{Duration: time.Hour}, + }, + Status: velerov1api.BackupRepositoryStatus{ + LastMaintenanceTime: &metav1.Time{Time: now}, + RecentMaintenance: []velerov1api.BackupRepositoryMaintenanceStatus{ + { + StartTimestamp: &metav1.Time{Time: now.Add(-time.Hour)}, + CompleteTimestamp: &metav1.Time{Time: now}, + Result: velerov1api.BackupRepositoryMaintenanceSucceeded, + }, + }, + }, + }, + expectedMaintenanceTime: now, + expectedHistory: []velerov1api.BackupRepositoryMaintenanceStatus{ + { + StartTimestamp: &metav1.Time{Time: now.Add(-time.Hour)}, + CompleteTimestamp: &metav1.Time{Time: now}, + Result: velerov1api.BackupRepositoryMaintenanceSucceeded, + }, + }, + }, + { + name: "start failed", + repo: &velerov1api.BackupRepository{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: velerov1api.DefaultNamespace, + Name: "repo", + }, + Spec: velerov1api.BackupRepositorySpec{ + MaintenanceFrequency: metav1.Duration{Duration: time.Hour}, + }, + Status: velerov1api.BackupRepositoryStatus{ + LastMaintenanceTime: &metav1.Time{Time: now.Add(-time.Hour - time.Minute)}, + RecentMaintenance: []velerov1api.BackupRepositoryMaintenanceStatus{ + { + StartTimestamp: &metav1.Time{Time: now.Add(-time.Hour * 2)}, + CompleteTimestamp: &metav1.Time{Time: now.Add(-time.Hour)}, + Result: velerov1api.BackupRepositoryMaintenanceSucceeded, + }, + }, + }, + }, + startJobFunc: startMaintenanceJobFail, + expectedMaintenanceTime: now.Add(-time.Hour - time.Minute), + expectedHistory: []velerov1api.BackupRepositoryMaintenanceStatus{ + { + StartTimestamp: &metav1.Time{Time: now.Add(-time.Hour * 2)}, + CompleteTimestamp: &metav1.Time{Time: now.Add(-time.Hour)}, + Result: velerov1api.BackupRepositoryMaintenanceSucceeded, + }, + { + StartTimestamp: &metav1.Time{Time: now}, + Result: velerov1api.BackupRepositoryMaintenanceFailed, + Message: "Failed to start maintenance job, err: fake-start-error", + }, + }, + }, + { + name: "wait failed", + repo: &velerov1api.BackupRepository{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: velerov1api.DefaultNamespace, + Name: "repo", + }, + Spec: velerov1api.BackupRepositorySpec{ + MaintenanceFrequency: metav1.Duration{Duration: time.Hour}, + }, + Status: velerov1api.BackupRepositoryStatus{ + LastMaintenanceTime: &metav1.Time{Time: now.Add(-time.Hour - time.Minute)}, + RecentMaintenance: []velerov1api.BackupRepositoryMaintenanceStatus{ + { + StartTimestamp: &metav1.Time{Time: now.Add(-time.Hour * 2)}, + CompleteTimestamp: &metav1.Time{Time: now.Add(-time.Hour)}, + Result: velerov1api.BackupRepositoryMaintenanceSucceeded, + }, + }, + }, + }, + startJobFunc: startMaintenanceJobSucceed, + waitJobFunc: waitMaintenanceJobCompleteFail, + expectedErr: "error waiting repo maintenance completion status: fake-wait-error", + expectedMaintenanceTime: now.Add(-time.Hour - time.Minute), + expectedHistory: []velerov1api.BackupRepositoryMaintenanceStatus{ + { + StartTimestamp: &metav1.Time{Time: now.Add(-time.Hour * 2)}, + CompleteTimestamp: &metav1.Time{Time: now.Add(-time.Hour)}, + Result: velerov1api.BackupRepositoryMaintenanceSucceeded, + }, + }, + }, + { + name: "maintenance failed", + repo: &velerov1api.BackupRepository{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: velerov1api.DefaultNamespace, + Name: "repo", + }, + Spec: velerov1api.BackupRepositorySpec{ + MaintenanceFrequency: metav1.Duration{Duration: time.Hour}, + }, + Status: velerov1api.BackupRepositoryStatus{ + LastMaintenanceTime: &metav1.Time{Time: now.Add(-time.Hour - time.Minute)}, + RecentMaintenance: []velerov1api.BackupRepositoryMaintenanceStatus{ + { + StartTimestamp: &metav1.Time{Time: now.Add(-time.Hour * 2)}, + CompleteTimestamp: &metav1.Time{Time: now.Add(-time.Hour)}, + Result: velerov1api.BackupRepositoryMaintenanceSucceeded, + }, + }, + }, + }, + startJobFunc: startMaintenanceJobSucceed, + waitJobFunc: waitMaintenanceJobCompleteFunc(now, velerov1api.BackupRepositoryMaintenanceFailed, "fake-maintenance-message"), + expectedMaintenanceTime: now.Add(-time.Hour - time.Minute), + expectedHistory: []velerov1api.BackupRepositoryMaintenanceStatus{ + { + StartTimestamp: &metav1.Time{Time: now.Add(-time.Hour * 2)}, + CompleteTimestamp: &metav1.Time{Time: now.Add(-time.Hour)}, + Result: velerov1api.BackupRepositoryMaintenanceSucceeded, + }, + { + StartTimestamp: &metav1.Time{Time: now}, + CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour)}, + Result: velerov1api.BackupRepositoryMaintenanceFailed, + Message: "fake-maintenance-message", + }, + }, + }, + { + name: "maintenance succeeded", + repo: &velerov1api.BackupRepository{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: velerov1api.DefaultNamespace, + Name: "repo", + }, + Spec: velerov1api.BackupRepositorySpec{ + MaintenanceFrequency: metav1.Duration{Duration: time.Hour}, + }, + Status: velerov1api.BackupRepositoryStatus{ + LastMaintenanceTime: &metav1.Time{Time: now.Add(-time.Hour - time.Minute)}, + RecentMaintenance: []velerov1api.BackupRepositoryMaintenanceStatus{ + { + StartTimestamp: &metav1.Time{Time: now.Add(-time.Hour * 2)}, + CompleteTimestamp: &metav1.Time{Time: now.Add(-time.Hour)}, + Result: velerov1api.BackupRepositoryMaintenanceSucceeded, + }, + }, + }, + }, + startJobFunc: startMaintenanceJobSucceed, + waitJobFunc: waitMaintenanceJobCompleteFunc(now, velerov1api.BackupRepositoryMaintenanceSucceeded, ""), + expectedMaintenanceTime: now.Add(time.Hour), + expectedHistory: []velerov1api.BackupRepositoryMaintenanceStatus{ + { + StartTimestamp: &metav1.Time{Time: now.Add(-time.Hour * 2)}, + CompleteTimestamp: &metav1.Time{Time: now.Add(-time.Hour)}, + Result: velerov1api.BackupRepositoryMaintenanceSucceeded, + }, + { + StartTimestamp: &metav1.Time{Time: now}, + CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour)}, + Result: velerov1api.BackupRepositoryMaintenanceSucceeded, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + reconciler := mockBackupRepoReconciler(t, "", test.repo, nil) + reconciler.clock = &fakeClock{now} + err := reconciler.Client.Create(context.TODO(), test.repo) + assert.NoError(t, err) + + funcStartMaintenanceJob = test.startJobFunc + funcWaitMaintenanceJobComplete = test.waitJobFunc + + err = reconciler.runMaintenanceIfDue(context.TODO(), test.repo, velerotest.NewLogger()) + if test.expectedErr == "" { + assert.NoError(t, err) + } + + assert.Equal(t, test.expectedMaintenanceTime, test.repo.Status.LastMaintenanceTime.Time) + assert.Len(t, test.repo.Status.RecentMaintenance, len(test.expectedHistory)) + + for i := 0; i < len(test.expectedHistory); i++ { + assert.Equal(t, test.expectedHistory[i].StartTimestamp.Time, test.repo.Status.RecentMaintenance[i].StartTimestamp.Time) + if test.expectedHistory[i].CompleteTimestamp == nil { + assert.Nil(t, test.repo.Status.RecentMaintenance[i].CompleteTimestamp) + } else { + assert.Equal(t, test.expectedHistory[i].CompleteTimestamp.Time, test.repo.Status.RecentMaintenance[i].CompleteTimestamp.Time) + } + + assert.Equal(t, test.expectedHistory[i].Result, test.repo.Status.RecentMaintenance[i].Result) + assert.Equal(t, test.expectedHistory[i].Message, test.repo.Status.RecentMaintenance[i].Message) + } + }) + } } func TestInitializeRepo(t *testing.T) { @@ -742,7 +950,7 @@ func TestUpdateRepoMaintenanceHistory(t *testing.T) { } func TestRecallMaintenance(t *testing.T) { - now := time.Now() + now := time.Now().Round(time.Second) schemeFail := runtime.NewScheme() velerov1api.AddToScheme(schemeFail) @@ -756,7 +964,7 @@ func TestRecallMaintenance(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "job1", Namespace: velerov1api.DefaultNamespace, - Labels: map[string]string{repository.RepositoryNameLabel: "repo"}, + Labels: map[string]string{maintenance.RepositoryNameLabel: "repo"}, CreationTimestamp: metav1.Time{Time: now.Add(time.Hour)}, }, Status: batchv1.JobStatus{ @@ -766,9 +974,9 @@ func TestRecallMaintenance(t *testing.T) { }, } - jobPodSucceeded := builder.ForPod(velerov1api.DefaultNamespace, "job1").Labels(map[string]string{"job-name": "job1"}).ContainerStatuses(&v1.ContainerStatus{ - State: v1.ContainerState{ - Terminated: &v1.ContainerStateTerminated{}, + jobPodSucceeded := builder.ForPod(velerov1api.DefaultNamespace, "job1").Labels(map[string]string{"job-name": "job1"}).ContainerStatuses(&corev1.ContainerStatus{ + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{}, }, }).Result() @@ -777,8 +985,8 @@ func TestRecallMaintenance(t *testing.T) { kubeClientObj []runtime.Object runtimeScheme *runtime.Scheme repoLastMatainTime metav1.Time - expectNewHistory bool - expectTimeUpdate bool + expectNewHistory []velerov1api.BackupRepositoryMaintenanceStatus + expectTimeUpdate *metav1.Time expectedErr string }{ { @@ -798,7 +1006,13 @@ func TestRecallMaintenance(t *testing.T) { jobPodSucceeded, }, repoLastMatainTime: metav1.Time{Time: now.Add(time.Hour * 5)}, - expectNewHistory: true, + expectNewHistory: []velerov1api.BackupRepositoryMaintenanceStatus{ + { + StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)}, + CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)}, + Result: velerov1api.BackupRepositoryMaintenanceSucceeded, + }, + }, }, { name: "update last time", @@ -807,8 +1021,14 @@ func TestRecallMaintenance(t *testing.T) { jobSucceeded, jobPodSucceeded, }, - expectNewHistory: true, - expectTimeUpdate: true, + expectNewHistory: []velerov1api.BackupRepositoryMaintenanceStatus{ + { + StartTimestamp: &metav1.Time{Time: now.Add(time.Hour)}, + CompleteTimestamp: &metav1.Time{Time: now.Add(time.Hour * 2)}, + Result: velerov1api.BackupRepositoryMaintenanceSucceeded, + }, + }, + expectTimeUpdate: &metav1.Time{Time: now.Add(time.Hour * 2)}, }, } @@ -821,13 +1041,12 @@ func TestRecallMaintenance(t *testing.T) { test.kubeClientObj = append(test.kubeClientObj, backupRepo) - fakeClientBuilder := fake.NewClientBuilder() + fakeClientBuilder := clientFake.NewClientBuilder() fakeClientBuilder = fakeClientBuilder.WithScheme(test.runtimeScheme) fakeClient := fakeClientBuilder.WithRuntimeObjects(test.kubeClientObj...).Build() r.Client = fakeClient lastTm := backupRepo.Status.LastMaintenanceTime - history := backupRepo.Status.RecentMaintenance err := r.recallMaintenance(context.TODO(), backupRepo, velerotest.NewLogger()) if test.expectedErr != "" { @@ -835,12 +1054,22 @@ func TestRecallMaintenance(t *testing.T) { } else { assert.NoError(t, err) - if test.expectNewHistory { - assert.NotEqual(t, history, backupRepo.Status.RecentMaintenance) + if test.expectNewHistory == nil { + assert.Nil(t, backupRepo.Status.RecentMaintenance) + } else { + assert.Len(t, backupRepo.Status.RecentMaintenance, len(test.expectNewHistory)) + for i := 0; i < len(test.expectNewHistory); i++ { + assert.Equal(t, test.expectNewHistory[i].StartTimestamp.Time, backupRepo.Status.RecentMaintenance[i].StartTimestamp.Time) + assert.Equal(t, test.expectNewHistory[i].CompleteTimestamp.Time, backupRepo.Status.RecentMaintenance[i].CompleteTimestamp.Time) + assert.Equal(t, test.expectNewHistory[i].Result, backupRepo.Status.RecentMaintenance[i].Result) + assert.Equal(t, test.expectNewHistory[i].Message, backupRepo.Status.RecentMaintenance[i].Message) + } } - if test.expectTimeUpdate { - assert.NotEqual(t, lastTm, backupRepo.Status.LastMaintenanceTime) + if test.expectTimeUpdate != nil { + assert.Equal(t, test.expectTimeUpdate.Time, backupRepo.Status.LastMaintenanceTime.Time) + } else { + assert.Equal(t, lastTm, backupRepo.Status.LastMaintenanceTime) } } }) diff --git a/pkg/repository/maintenance.go b/pkg/repository/maintenance/maintenance.go similarity index 88% rename from pkg/repository/maintenance.go rename to pkg/repository/maintenance/maintenance.go index c1b4276a57..807a4113c7 100644 --- a/pkg/repository/maintenance.go +++ b/pkg/repository/maintenance/maintenance.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package repository +package maintenance import ( "context" @@ -67,8 +67,8 @@ func GenerateJobName(repo string) string { return jobName } -// DeleteOldMaintenanceJobs deletes old maintenance jobs and keeps the latest N jobs -func DeleteOldMaintenanceJobs(cli client.Client, repo string, keep int) error { +// DeleteOldJobs deletes old maintenance jobs and keeps the latest N jobs +func DeleteOldJobs(cli client.Client, repo string, keep int) error { // Get the maintenance job list by label jobList := &batchv1.JobList{} err := cli.List(context.TODO(), jobList, client.MatchingLabels(map[string]string{RepositoryNameLabel: repo})) @@ -119,7 +119,7 @@ func waitForJobComplete(ctx context.Context, client client.Client, ns string, jo return ret, err } -func getMaintenanceResultFromJob(cli client.Client, job *batchv1.Job) (string, error) { +func getResultFromJob(cli client.Client, job *batchv1.Job) (string, error) { // Get the maintenance job related pod by label selector podList := &v1.PodList{} err := cli.List(context.TODO(), podList, client.InNamespace(job.Namespace), client.MatchingLabels(map[string]string{"job-name": job.Name})) @@ -148,7 +148,7 @@ func getMaintenanceResultFromJob(cli client.Client, job *batchv1.Job) (string, e return terminated.Message, nil } -// getMaintenanceJobConfig is called to get the Maintenance Job Config for the +// getJobConfig is called to get the Maintenance Job Config for the // BackupRepository specified by the repo parameter. // // Params: @@ -159,7 +159,7 @@ func getMaintenanceResultFromJob(cli client.Client, job *batchv1.Job) (string, e // veleroNamespace: the Velero-installed namespace. It's used to retrieve the BackupRepository. // repoMaintenanceJobConfig: the repository maintenance job ConfigMap name. // repo: the BackupRepository needs to run the maintenance Job. -func getMaintenanceJobConfig( +func getJobConfig( ctx context.Context, client client.Client, logger logrus.FieldLogger, @@ -241,8 +241,8 @@ func getMaintenanceJobConfig( return result, nil } -// WaitMaintenanceJobComplete waits the completion of the specified maintenance job and return the BackupRepositoryMaintenanceStatus -func WaitMaintenanceJobComplete(cli client.Client, ctx context.Context, jobName, ns string, logger logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error) { +// WaitJobComplete waits the completion of the specified maintenance job and return the BackupRepositoryMaintenanceStatus +func WaitJobComplete(cli client.Client, ctx context.Context, jobName, ns string, logger logrus.FieldLogger) (velerov1api.BackupRepositoryMaintenanceStatus, error) { log := logger.WithField("job name", jobName) maintenanceJob, err := waitForJobComplete(ctx, cli, ns, jobName) @@ -252,17 +252,17 @@ func WaitMaintenanceJobComplete(cli client.Client, ctx context.Context, jobName, log.Info("Maintenance repo complete") - result, err := getMaintenanceResultFromJob(cli, maintenanceJob) + result, err := getResultFromJob(cli, maintenanceJob) if err != nil { log.WithError(err).Warn("Failed to get maintenance job result") } - return composeMaintenanceStatusFromJob(maintenanceJob, result), nil + return composeStatusFromJob(maintenanceJob, result), nil } -// WaitAllMaintenanceJobComplete checks all the incomplete maintenance jobs of the specified repo and wait for them to complete, +// WaitAllJobsComplete checks all the incomplete maintenance jobs of the specified repo and wait for them to complete, // and then return the maintenance jobs' status in the range of limit -func WaitAllMaintenanceJobComplete(ctx context.Context, cli client.Client, repo *velerov1api.BackupRepository, limit int, log logrus.FieldLogger) ([]velerov1api.BackupRepositoryMaintenanceStatus, error) { +func WaitAllJobsComplete(ctx context.Context, cli client.Client, repo *velerov1api.BackupRepository, limit int, log logrus.FieldLogger) ([]velerov1api.BackupRepositoryMaintenanceStatus, error) { jobList := &batchv1.JobList{} err := cli.List(context.TODO(), jobList, &client.ListOptions{ Namespace: repo.Namespace, @@ -303,19 +303,19 @@ func WaitAllMaintenanceJobComplete(ctx context.Context, cli client.Client, repo job = updated } - message, err := getMaintenanceResultFromJob(cli, job) + message, err := getResultFromJob(cli, job) if err != nil { return nil, errors.Wrapf(err, "error getting maintenance job[%s] result", job.Name) } - history = append(history, composeMaintenanceStatusFromJob(job, message)) + history = append(history, composeStatusFromJob(job, message)) } return history, nil } -// StartMaintenanceJob creates a new maintenance job -func StartMaintenanceJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRepository, repoMaintenanceJobConfig string, +// StartNewJob creates a new maintenance job +func StartNewJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRepository, repoMaintenanceJobConfig string, podResources kube.PodResources, logLevel logrus.Level, logFormat *logging.FormatFlag, logger logrus.FieldLogger) (string, error) { bsl := &velerov1api.BackupStorageLocation{} if err := cli.Get(ctx, client.ObjectKey{Namespace: repo.Namespace, Name: repo.Spec.BackupStorageLocation}, bsl); err != nil { @@ -329,7 +329,7 @@ func StartMaintenanceJob(cli client.Client, ctx context.Context, repo *velerov1a "repo UID": repo.UID, }) - jobConfig, err := getMaintenanceJobConfig( + jobConfig, err := getJobConfig( ctx, cli, log, @@ -346,7 +346,7 @@ func StartMaintenanceJob(cli client.Client, ctx context.Context, repo *velerov1a log.Info("Starting maintenance repo") - maintenanceJob, err := buildMaintenanceJob(cli, ctx, repo, bsl.Name, jobConfig, podResources, logLevel, logFormat) + maintenanceJob, err := buildJob(cli, ctx, repo, bsl.Name, jobConfig, podResources, logLevel, logFormat) if err != nil { return "", errors.Wrap(err, "error to build maintenance job") } @@ -362,7 +362,7 @@ func StartMaintenanceJob(cli client.Client, ctx context.Context, repo *velerov1a return maintenanceJob.Name, nil } -func buildMaintenanceJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRepository, bslName string, config *JobConfigs, +func buildJob(cli client.Client, ctx context.Context, repo *velerov1api.BackupRepository, bslName string, config *JobConfigs, podResources kube.PodResources, logLevel logrus.Level, logFormat *logging.FormatFlag) (*batchv1.Job, error) { // Get the Velero server deployment deployment := &appsv1.Deployment{} @@ -479,7 +479,7 @@ func buildMaintenanceJob(cli client.Client, ctx context.Context, repo *velerov1a return job, nil } -func composeMaintenanceStatusFromJob(job *batchv1.Job, message string) velerov1api.BackupRepositoryMaintenanceStatus { +func composeStatusFromJob(job *batchv1.Job, message string) velerov1api.BackupRepositoryMaintenanceStatus { result := velerov1api.BackupRepositoryMaintenanceSucceeded if job.Status.Failed > 0 { result = velerov1api.BackupRepositoryMaintenanceFailed diff --git a/pkg/repository/maintenance_test.go b/pkg/repository/maintenance/maintenance_test.go similarity index 97% rename from pkg/repository/maintenance_test.go rename to pkg/repository/maintenance/maintenance_test.go index 6d04a7837a..1854aa0f9f 100644 --- a/pkg/repository/maintenance_test.go +++ b/pkg/repository/maintenance/maintenance_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package repository +package maintenance import ( "context" @@ -76,7 +76,7 @@ func TestGenerateJobName1(t *testing.T) { }) } } -func TestDeleteOldMaintenanceJobs(t *testing.T) { +func TestDeleteOldJobs(t *testing.T) { // Set up test repo and keep value repo := "test-repo" keep := 2 @@ -114,7 +114,7 @@ func TestDeleteOldMaintenanceJobs(t *testing.T) { cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build() // Call the function - err := DeleteOldMaintenanceJobs(cli, repo, keep) + err := DeleteOldJobs(cli, repo, keep) assert.NoError(t, err) // Get the remaining jobs @@ -208,7 +208,7 @@ func TestWaitForJobComplete(t *testing.T) { } } -func TestGetMaintenanceResultFromJob(t *testing.T) { +func TestGetResultFromJob(t *testing.T) { // Set up test job job := &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ @@ -230,7 +230,7 @@ func TestGetMaintenanceResultFromJob(t *testing.T) { cli := fake.NewClientBuilder().WithObjects(job, pod).Build() // test an error should be returned - result, err := getMaintenanceResultFromJob(cli, job) + result, err := getResultFromJob(cli, job) assert.Error(t, err) assert.Equal(t, "", result) @@ -245,7 +245,7 @@ func TestGetMaintenanceResultFromJob(t *testing.T) { // Test an error should be returned cli = fake.NewClientBuilder().WithObjects(job, pod).Build() - result, err = getMaintenanceResultFromJob(cli, job) + result, err = getResultFromJob(cli, job) assert.Error(t, err) assert.Equal(t, "", result) @@ -264,12 +264,12 @@ func TestGetMaintenanceResultFromJob(t *testing.T) { // This call should return the termination message with no error cli = fake.NewClientBuilder().WithObjects(job, pod).Build() - result, err = getMaintenanceResultFromJob(cli, job) + result, err = getResultFromJob(cli, job) assert.NoError(t, err) assert.Equal(t, "test message", result) } -func TestGetMaintenanceJobConfig(t *testing.T) { +func TestGetJobConfig(t *testing.T) { ctx := context.Background() logger := logrus.New() veleroNamespace := "velero" @@ -425,7 +425,7 @@ func TestGetMaintenanceJobConfig(t *testing.T) { fakeClient = velerotest.NewFakeControllerRuntimeClient(t) } - jobConfig, err := getMaintenanceJobConfig( + jobConfig, err := getJobConfig( ctx, fakeClient, logger, @@ -444,7 +444,7 @@ func TestGetMaintenanceJobConfig(t *testing.T) { } } -func TestWaitAllMaintenanceJobComplete(t *testing.T) { +func TestWaitAlJobsComplete(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) veleroNamespace := "velero" @@ -712,7 +712,7 @@ func TestWaitAllMaintenanceJobComplete(t *testing.T) { fakeClient := fakeClientBuilder.WithRuntimeObjects(test.kubeClientObj...).Build() - history, err := WaitAllMaintenanceJobComplete(test.ctx, fakeClient, repo, 3, velerotest.NewLogger()) + history, err := WaitAllJobsComplete(test.ctx, fakeClient, repo, 3, velerotest.NewLogger()) if test.expectedError != "" { assert.EqualError(t, err, test.expectedError) @@ -733,7 +733,7 @@ func TestWaitAllMaintenanceJobComplete(t *testing.T) { cancel() } -func TestBuildMaintenanceJob(t *testing.T) { +func TestBuildJob(t *testing.T) { testCases := []struct { name string m *JobConfigs @@ -872,7 +872,7 @@ func TestBuildMaintenanceJob(t *testing.T) { cli := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(objs...).Build() // Call the function to test - job, err := buildMaintenanceJob(cli, context.TODO(), param.BackupRepo, param.BackupLocation.Name, tc.m, *tc.m.PodResources, tc.logLevel, tc.logFormat) + job, err := buildJob(cli, context.TODO(), param.BackupRepo, param.BackupLocation.Name, tc.m, *tc.m.PodResources, tc.logLevel, tc.logFormat) // Check the error if tc.expectedError {