From 6d320096dcfeff3694e58ad238b8084824546869 Mon Sep 17 00:00:00 2001 From: shaojiang Date: Tue, 4 Jul 2023 17:47:35 +0800 Subject: [PATCH] feat: add dataprotection isolation deployment (#4074) --- cmd/manager/main.go | 354 ++++++++++-------- controllers/apps/cluster_controller.go | 11 +- controllers/apps/cluster_controller_test.go | 5 +- .../apps/components/base_stateful_hscale.go | 120 ++---- .../apps/opsrequest_controller_test.go | 48 ++- deploy/helm/templates/deployment.yaml | 180 ++++++++- deploy/helm/values.yaml | 7 + internal/cli/cmd/cluster/dataprotection.go | 1 - internal/cli/cmd/kubeblocks/compare_test.go | 5 +- internal/cli/cmd/kubeblocks/config.go | 23 -- internal/cli/util/version.go | 6 + internal/controller/builder/builder.go | 14 - internal/controller/builder/builder_test.go | 12 - .../builder/cue/snapshot_template.cue | 48 --- 14 files changed, 446 insertions(+), 388 deletions(-) delete mode 100644 internal/controller/builder/cue/snapshot_template.cue diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 59703d4eece..0612c60f7cc 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -116,9 +116,17 @@ func init() { type flagName string const ( - probeAddrFlagKey flagName = "health-probe-bind-address" - metricsAddrFlagKey flagName = "metrics-bind-address" - leaderElectFlagKey flagName = "leader-elect" + probeAddrFlagKey flagName = "health-probe-bind-address" + metricsAddrFlagKey flagName = "metrics-bind-address" + leaderElectFlagKey flagName = "leader-elect" + leaderElectIDFlagKey flagName = "leader-elect-id" + + // switch flags key for API groups + appsFlagKey flagName = "apps" + dataProtectionFlagKey flagName = "dataprotection" + extensionsFlagKey flagName = "extensions" + workloadsFlagKey flagName = "workloads" + storageFlagKey flagName = "storage" ) func (r flagName) String() string { @@ -175,6 +183,7 @@ func validateRequiredToParseConfigs() error { func main() { var metricsAddr string var enableLeaderElection bool + var enableLeaderElectionID string var probeAddr string flag.String(metricsAddrFlagKey.String(), ":8080", "The address the metric endpoint binds to.") flag.String(probeAddrFlagKey.String(), ":8081", "The address the probe endpoint binds to.") @@ -182,6 +191,21 @@ func main() { "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") + flag.String(leaderElectIDFlagKey.String(), "001c317f", + "The leader election ID prefix for controller manager. "+ + "This ID must be unique to controller manager.") + + flag.Bool(appsFlagKey.String(), true, + "Enable the apps controller manager.") + flag.Bool(dataProtectionFlagKey.String(), true, + "Enable the dataprotection controller manager. ") + flag.Bool(extensionsFlagKey.String(), true, + "Enable the extensions controller manager. ") + flag.Bool(workloadsFlagKey.String(), true, + "Enable the workloads controller manager. ") + flag.Bool(storageFlagKey.String(), true, + "Enable the storage controller manager. ") + opts := zap.Options{ Development: true, } @@ -220,6 +244,7 @@ func main() { metricsAddr = viper.GetString(metricsAddrFlagKey.viperName()) probeAddr = viper.GetString(probeAddrFlagKey.viperName()) enableLeaderElection = viper.GetBool(leaderElectFlagKey.viperName()) + enableLeaderElectionID = viper.GetString(leaderElectIDFlagKey.viperName()) setupLog.Info(fmt.Sprintf("config settings: %v", viper.AllSettings())) if err := validateRequiredToParseConfigs(); err != nil { @@ -237,7 +262,7 @@ func main() { // following LeaderElectionID is generated via hash/fnv (FNV-1 and FNV-1a), in // pattern of '{{ hashFNV .Repo }}.{{ .Domain }}', make sure regenerate this ID // if you have forked from this project template. - LeaderElectionID: "001c317f.kubeblocks.io", + LeaderElectionID: enableLeaderElectionID + ".kubeblocks.io", // NOTES: // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily @@ -260,189 +285,198 @@ func main() { os.Exit(1) } - if err = (&appscontrollers.ClusterReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("cluster-controller"), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Cluster") - os.Exit(1) - } + if viper.GetBool(appsFlagKey.viperName()) { + if err = (&appscontrollers.ClusterReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("cluster-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Cluster") + os.Exit(1) + } - if err = (&appscontrollers.ClusterDefinitionReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("cluster-definition-controller"), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "ClusterDefinition") - os.Exit(1) - } + if err = (&appscontrollers.ClusterDefinitionReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("cluster-definition-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ClusterDefinition") + os.Exit(1) + } - if err = (&appscontrollers.ClusterVersionReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("cluster-version-controller"), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "ClusterVersion") - os.Exit(1) - } + if err = (&appscontrollers.ClusterVersionReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("cluster-version-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ClusterVersion") + os.Exit(1) + } - if err = (&dataprotectioncontrollers.BackupToolReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("backup-tool-controller"), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "BackupTool") - os.Exit(1) - } + if err = (&appscontrollers.OpsRequestReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("ops-request-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "OpsRequest") + os.Exit(1) + } - if err = (&dataprotectioncontrollers.BackupPolicyReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("backup-policy-controller"), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "BackupPolicy") - os.Exit(1) - } + if err = (&configuration.ConfigConstraintReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("config-constraint-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ConfigConstraint") + os.Exit(1) + } - if err = (&dataprotectioncontrollers.CronJobReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("cronjob-controller"), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "CronJob") - os.Exit(1) - } + if err = (&configuration.ReconfigureRequestReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("reconfigure-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ReconfigureRequest") + os.Exit(1) + } - if err = (&dataprotectioncontrollers.BackupReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("backup-controller"), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Backup") - os.Exit(1) - } + if err = (&appscontrollers.SystemAccountReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("system-account-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "SystemAccount") + os.Exit(1) + } - if err = (&dataprotectioncontrollers.RestoreJobReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("restore-job-controller"), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "RestoreJob") - os.Exit(1) - } + if err = (&k8scorecontrollers.EventReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("event-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Event") + os.Exit(1) + } - if err = (&dataprotectioncontrollers.BackupRepoReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("backup-repo-controller"), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "BackupRepo") - os.Exit(1) - } + if err = (&k8scorecontrollers.PersistentVolumeClaimReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("pvc-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "PersistentVolumeClaim") + os.Exit(1) + } - if err = (&appscontrollers.OpsRequestReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("ops-request-controller"), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "OpsRequest") - os.Exit(1) - } + if err = components.NewStatefulSetReconciler(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "StatefulSet") + os.Exit(1) + } - if err = (&configuration.ConfigConstraintReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("config-constraint-controller"), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "ConfigConstraint") - os.Exit(1) - } - if !viper.GetBool("DISABLE_ADDON_CTRLER") { - if err = (&extensionscontrollers.AddonReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("addon-controller"), - RestConfig: mgr.GetConfig(), + if err = components.NewDeploymentReconciler(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Deployment") + os.Exit(1) + } + + if err = (&appscontrollers.ComponentClassReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("class-controller"), }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Addon") + setupLog.Error(err, "unable to create controller", "controller", "Class") os.Exit(1) } } - if err = (&workloadscontrollers.ReplicatedStateMachineReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("replicated-state-machine-controller"), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "ReplicatedStateMachine") - os.Exit(1) - } + if viper.GetBool(dataProtectionFlagKey.viperName()) { + if err = (&dataprotectioncontrollers.BackupToolReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("backup-tool-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "BackupTool") + os.Exit(1) + } - if err = (&storagecontrollers.StorageProviderReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("storage-provider-controller"), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "StorageProvider") - os.Exit(1) - } - // +kubebuilder:scaffold:builder + if err = (&dataprotectioncontrollers.BackupPolicyReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("backup-policy-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "BackupPolicy") + os.Exit(1) + } - if err = (&configuration.ReconfigureRequestReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("reconfigure-controller"), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "ReconfigureRequest") - os.Exit(1) - } + if err = (&dataprotectioncontrollers.CronJobReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("cronjob-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "CronJob") + os.Exit(1) + } - if err = (&appscontrollers.SystemAccountReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("system-account-controller"), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "SystemAccount") - os.Exit(1) - } + if err = (&dataprotectioncontrollers.BackupReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("backup-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Backup") + os.Exit(1) + } - if err = (&k8scorecontrollers.EventReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("event-controller"), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Event") - os.Exit(1) - } + if err = (&dataprotectioncontrollers.RestoreJobReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("restore-job-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "RestoreJob") + os.Exit(1) + } - if err = (&k8scorecontrollers.PersistentVolumeClaimReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("pvc-controller"), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "PersistentVolumeClaim") - os.Exit(1) + if err = (&dataprotectioncontrollers.BackupRepoReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("backup-repo-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "BackupRepo") + os.Exit(1) + } } - if err = components.NewStatefulSetReconciler(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "StatefulSet") - os.Exit(1) + if viper.GetBool(extensionsFlagKey.viperName()) { + if err = (&extensionscontrollers.AddonReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("addon-controller"), + RestConfig: mgr.GetConfig(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Addon") + os.Exit(1) + } } - if err = components.NewDeploymentReconciler(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Deployment") - os.Exit(1) + if viper.GetBool(workloadsFlagKey.viperName()) { + if err = (&workloadscontrollers.ReplicatedStateMachineReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("replicated-state-machine-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ReplicatedStateMachine") + os.Exit(1) + } } - if err = (&appscontrollers.ComponentClassReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("class-controller"), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Class") - os.Exit(1) + if viper.GetBool(storageFlagKey.viperName()) { + if err = (&storagecontrollers.StorageProviderReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Recorder: mgr.GetEventRecorderFor("storage-provider-controller"), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "StorageProvider") + os.Exit(1) + } } + // +kubebuilder:scaffold:builder if viper.GetBool("enable_webhooks") { diff --git a/controllers/apps/cluster_controller.go b/controllers/apps/cluster_controller.go index bfc5f402f13..3740040f7da 100644 --- a/controllers/apps/cluster_controller.go +++ b/controllers/apps/cluster_controller.go @@ -23,8 +23,6 @@ import ( "context" "time" - snapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v3/apis/volumesnapshot/v1beta1" - snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" "github.com/spf13/viper" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" @@ -34,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" @@ -220,13 +217,7 @@ func (r *ClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { Owns(&dataprotectionv1alpha1.Backup{}). Owns(&batchv1.Job{}). Watches(&source.Kind{Type: &corev1.Pod{}}, handler.EnqueueRequestsFromMapFunc(r.filterClusterPods)) - if viper.GetBool("VOLUMESNAPSHOT") { - if intctrlutil.InVolumeSnapshotV1Beta1() { - b.Owns(&snapshotv1beta1.VolumeSnapshot{}, builder.Predicates{}) - } else { - b.Owns(&snapshotv1.VolumeSnapshot{}, builder.Predicates{}) - } - } + return b.Complete(r) } diff --git a/controllers/apps/cluster_controller_test.go b/controllers/apps/cluster_controller_test.go index 74c78562cf5..41dc5d8d6ef 100644 --- a/controllers/apps/cluster_controller_test.go +++ b/controllers/apps/cluster_controller_test.go @@ -542,9 +542,7 @@ var _ = Describe("Cluster Controller", func() { Name: backupKey.Name, Namespace: backupKey.Namespace, Labels: map[string]string{ - constant.KBManagedByKey: "cluster", - constant.AppInstanceLabelKey: clusterKey.Name, - constant.KBAppComponentLabelKey: comp.Name, + constant.DataProtectionLabelBackupNameKey: backupKey.Name, }}, Spec: snapshotv1.VolumeSnapshotSpec{ Source: snapshotv1.VolumeSnapshotSource{ @@ -610,7 +608,6 @@ var _ = Describe("Cluster Controller", func() { constant.AppInstanceLabelKey: clusterKey.Name, constant.KBAppComponentLabelKey: comp.Name, }, client.InNamespace(clusterKey.Namespace))).Should(HaveLen(0)) - Eventually(testapps.CheckObjExists(&testCtx, backupKey, &snapshotv1.VolumeSnapshot{}, false)).Should(Succeed()) if !viper.GetBool("VOLUMESNAPSHOT") && len(viper.GetString(constant.CfgKeyBackupPVCName)) > 0 { By("Checking restore job cleanup") diff --git a/controllers/apps/components/base_stateful_hscale.go b/controllers/apps/components/base_stateful_hscale.go index f2c0cd33f3a..c19dccae6e9 100644 --- a/controllers/apps/components/base_stateful_hscale.go +++ b/controllers/apps/components/base_stateful_hscale.go @@ -21,7 +21,6 @@ package components import ( "fmt" - "strings" snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" "github.com/spf13/viper" @@ -272,18 +271,8 @@ func (d *snapshotDataClone) backup() ([]client.Object, error) { backupPolicyTemplate := &appsv1alpha1.BackupPolicyTemplate{} err := d.cli.Get(d.reqCtx.Ctx, client.ObjectKey{Name: backupPolicyTplName}, backupPolicyTemplate) - if err != nil && !errors.IsNotFound(err) { - return nil, err - } - // no backuppolicytemplate, then try native volumesnapshot if err != nil { - pvcName := strings.Join([]string{d.backupVCT().Name, d.stsObj.Name, "0"}, "-") - snapshot, err := builder.BuildVolumeSnapshot(d.key, pvcName, d.stsObj) - if err != nil { - return nil, err - } - d.reqCtx.Eventf(d.cluster, corev1.EventTypeNormal, "VolumeSnapshotCreate", "Create volumesnapshot/%s", d.key.Name) - return []client.Object{snapshot}, nil + return nil, err } // if there is backuppolicytemplate created by provider @@ -304,49 +293,26 @@ func (d *snapshotDataClone) backup() ([]client.Object, error) { } func (d *snapshotDataClone) checkBackupStatus() (backupStatus, error) { - hasBackupPolicyTemplate := true backupPolicyTplName := d.component.HorizontalScalePolicy.BackupPolicyTemplateName backupPolicyTemplate := &appsv1alpha1.BackupPolicyTemplate{} err := d.cli.Get(d.reqCtx.Ctx, client.ObjectKey{Name: backupPolicyTplName}, backupPolicyTemplate) - if err != nil && !errors.IsNotFound(err) { + if err != nil { return backupStatusFailed, err } - if errors.IsNotFound(err) { - hasBackupPolicyTemplate = false - } - // if no backuppolicytemplate, do not check backup - if hasBackupPolicyTemplate { - backup := dataprotectionv1alpha1.Backup{} - if err := d.cli.Get(d.reqCtx.Ctx, d.key, &backup); err != nil { - if errors.IsNotFound(err) { - return backupStatusNotCreated, nil - } else { - return backupStatusFailed, err - } - } - if backup.Status.Phase == dataprotectionv1alpha1.BackupFailed { - return backupStatusFailed, intctrlutil.NewErrorf(intctrlutil.ErrorTypeBackupFailed, "backup for horizontalScaling failed: %s", - backup.Status.FailureReason) - } - if backup.Status.Phase != dataprotectionv1alpha1.BackupCompleted { - return backupStatusProcessing, nil - } - } else { - vsExists, err := d.isVolumeSnapshotExists() - if err != nil { - return backupStatusFailed, err - } - if !vsExists { + backup := dataprotectionv1alpha1.Backup{} + if err := d.cli.Get(d.reqCtx.Ctx, d.key, &backup); err != nil { + if errors.IsNotFound(err) { return backupStatusNotCreated, nil - } - // volumesnapshot exists, check if it is ready for use. - ready, err := d.isVolumeSnapshotReadyToUse() - if err != nil { + } else { return backupStatusFailed, err } - if !ready { - return backupStatusProcessing, nil - } + } + if backup.Status.Phase == dataprotectionv1alpha1.BackupFailed { + return backupStatusFailed, intctrlutil.NewErrorf(intctrlutil.ErrorTypeBackupFailed, "backup for horizontalScaling failed: %s", + backup.Status.FailureReason) + } + if backup.Status.Phase != dataprotectionv1alpha1.BackupCompleted { + return backupStatusProcessing, nil } return backupStatusReadyToUse, nil } @@ -377,43 +343,19 @@ func (d *snapshotDataClone) checkRestoreStatus(pvcKey types.NamespacedName) (bac return backupStatusReadyToUse, nil } -// check snapshot existence -func (d *snapshotDataClone) isVolumeSnapshotExists() (bool, error) { - ml := d.getBackupMatchingLabels() - vsList := snapshotv1.VolumeSnapshotList{} - compatClient := intctrlutil.VolumeSnapshotCompatClient{ReadonlyClient: d.cli, Ctx: d.reqCtx.Ctx} - if err := compatClient.List(&vsList, ml); err != nil { - return false, client.IgnoreNotFound(err) - } - for _, vs := range vsList.Items { - // when do h-scale very shortly after last h-scale, - // the last volume snapshot could not be deleted completely - if vs.DeletionTimestamp.IsZero() { - return true, nil - } - } - return false, nil -} - -// check snapshot ready to use -func (d *snapshotDataClone) isVolumeSnapshotReadyToUse() (bool, error) { - ml := d.getBackupMatchingLabels() - vsList := snapshotv1.VolumeSnapshotList{} +func (d *snapshotDataClone) listVolumeSnapshotByLabels(vsList *snapshotv1.VolumeSnapshotList, ml client.MatchingLabels) error { compatClient := intctrlutil.VolumeSnapshotCompatClient{ReadonlyClient: d.cli, Ctx: d.reqCtx.Ctx} - if err := compatClient.List(&vsList, ml); err != nil { - return false, client.IgnoreNotFound(err) - } - if len(vsList.Items) == 0 || vsList.Items[0].Status == nil { - return false, nil - } - status := vsList.Items[0].Status - if status.Error != nil { - return false, fmt.Errorf("VolumeSnapshot/" + vsList.Items[0].Name + ": " + *status.Error.Message) - } - if status.ReadyToUse == nil { - return false, nil + // get vs from backup. + backupList := dataprotectionv1alpha1.BackupList{} + if err := d.cli.List(d.reqCtx.Ctx, &backupList, ml); err != nil { + return err + } else if len(backupList.Items) == 0 { + // ignore not found + return nil } - return *status.ReadyToUse, nil + return compatClient.List(vsList, client.MatchingLabels{ + constant.DataProtectionLabelBackupNameKey: backupList.Items[0].Name, + }) } func (d *snapshotDataClone) checkedCreatePVCFromSnapshot(pvcKey types.NamespacedName, @@ -426,8 +368,7 @@ func (d *snapshotDataClone) checkedCreatePVCFromSnapshot(pvcKey types.Namespaced } ml := d.getBackupMatchingLabels() vsList := snapshotv1.VolumeSnapshotList{} - compatClient := intctrlutil.VolumeSnapshotCompatClient{ReadonlyClient: d.cli, Ctx: d.reqCtx.Ctx} - if err := compatClient.List(&vsList, ml); err != nil { + if err = d.listVolumeSnapshotByLabels(&vsList, ml); err != nil { return nil, err } if len(vsList.Items) == 0 { @@ -466,17 +407,6 @@ func (d *snapshotDataClone) deleteSnapshot() ([]client.Object, error) { if len(objs) > 0 { d.reqCtx.Recorder.Eventf(d.cluster, corev1.EventTypeNormal, "BackupJobDelete", "Delete backupJob/%s", d.key.Name) } - // delete volumesnapshot separately since backup may not exist if backuppolicytemplate not configured - compatClient := intctrlutil.VolumeSnapshotCompatClient{ReadonlyClient: d.cli, Ctx: d.reqCtx.Ctx} - vs := &snapshotv1.VolumeSnapshot{} - err = compatClient.Get(d.key, vs) - if err != nil && !errors.IsNotFound(err) { - return nil, err - } - if err == nil { - objs = append(objs, vs) - d.reqCtx.Recorder.Eventf(d.cluster, corev1.EventTypeNormal, "VolumeSnapshotDelete", "Delete volumeSnapshot/%s", d.key.Name) - } return objs, nil } diff --git a/controllers/apps/opsrequest_controller_test.go b/controllers/apps/opsrequest_controller_test.go index 1dc338ebe67..7531bd84318 100644 --- a/controllers/apps/opsrequest_controller_test.go +++ b/controllers/apps/opsrequest_controller_test.go @@ -23,10 +23,10 @@ import ( "fmt" "time" + snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" "github.com/spf13/viper" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -36,6 +36,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1" + dataprotectionv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1" opsutil "github.com/apecloud/kubeblocks/controllers/apps/operations/util" "github.com/apecloud/kubeblocks/internal/constant" intctrlutil "github.com/apecloud/kubeblocks/internal/generics" @@ -359,11 +360,13 @@ var _ = Describe("OpsRequest Controller", func() { By("set component to horizontal with snapshot policy and create a cluster") viper.Set("VOLUMESNAPSHOT", true) - Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(clusterDefObj), - func(clusterDef *appsv1alpha1.ClusterDefinition) { - clusterDef.Spec.ComponentDefs[0].HorizontalScalePolicy = - &appsv1alpha1.HorizontalScalePolicy{Type: appsv1alpha1.HScaleDataClonePolicyCloneVolume} - })()).ShouldNot(HaveOccurred()) + if clusterDefObj.Spec.ComponentDefs[0].HorizontalScalePolicy == nil { + Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(clusterDefObj), + func(clusterDef *appsv1alpha1.ClusterDefinition) { + clusterDef.Spec.ComponentDefs[0].HorizontalScalePolicy = + &appsv1alpha1.HorizontalScalePolicy{Type: appsv1alpha1.HScaleDataClonePolicyCloneVolume} + })()).ShouldNot(HaveOccurred()) + } pvcSpec := testapps.NewPVCSpec("1Gi") clusterObj = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterNamePrefix, clusterDefObj.Name, clusterVersionObj.Name).WithRandomName(). @@ -484,19 +487,34 @@ var _ = Describe("OpsRequest Controller", func() { g.Expect(cluster.Status.Phase).Should(Equal(appsv1alpha1.SpecReconcilingClusterPhase)) })).Should(Succeed()) - By("mock VolumeSnapshot status is ready, component phase should change to Updating when component is horizontally scaling.") - snapshotKey := types.NamespacedName{Name: fmt.Sprintf("%s-%s-scaling", + By("mock backup status is ready, component phase should change to Updating when component is horizontally scaling.") + backupKey := types.NamespacedName{Name: fmt.Sprintf("%s-%s-scaling", clusterKey.Name, mysqlCompName), Namespace: testCtx.DefaultNamespace} - volumeSnapshot := &snapshotv1.VolumeSnapshot{} - Expect(k8sClient.Get(testCtx.Ctx, snapshotKey, volumeSnapshot)).Should(Succeed()) - readyToUse := true - volumeSnapshot.Status = &snapshotv1.VolumeSnapshotStatus{ReadyToUse: &readyToUse} - Expect(k8sClient.Status().Update(testCtx.Ctx, volumeSnapshot)).Should(Succeed()) + backup := &dataprotectionv1alpha1.Backup{} + Expect(k8sClient.Get(testCtx.Ctx, backupKey, backup)).Should(Succeed()) + backup.Status.Phase = dataprotectionv1alpha1.BackupCompleted + Expect(k8sClient.Status().Update(testCtx.Ctx, backup)).Should(Succeed()) Eventually(testapps.CheckObj(&testCtx, clusterKey, func(g Gomega, cluster *appsv1alpha1.Cluster) { g.Expect(cluster.Status.Components[mysqlCompName].Phase).Should(Equal(appsv1alpha1.SpecReconcilingClusterCompPhase)) g.Expect(cluster.Status.Phase).Should(Equal(appsv1alpha1.SpecReconcilingClusterPhase)) })).Should(Succeed()) + By("mock create volumesnapshot, which should done by backup controller") + vs := &snapshotv1.VolumeSnapshot{} + vs.Name = backupKey.Name + vs.Namespace = backupKey.Namespace + vs.Labels = map[string]string{ + constant.DataProtectionLabelBackupNameKey: backupKey.Name, + } + pvcName := "" + vs.Spec = snapshotv1.VolumeSnapshotSpec{ + Source: snapshotv1.VolumeSnapshotSource{ + PersistentVolumeClaimName: &pvcName, + }, + } + Expect(k8sClient.Create(testCtx.Ctx, vs)).Should(Succeed()) + Eventually(testapps.CheckObjExists(&testCtx, backupKey, vs, true)).Should(Succeed()) + By("check the underlying workload been updated") Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(componentWorkload()), func(g Gomega, sts *appsv1.StatefulSet) { @@ -521,8 +539,8 @@ var _ = Describe("OpsRequest Controller", func() { })()).Should(Succeed()) } - By("check the volumesnapshot created for scaling has been deleted") - Eventually(testapps.CheckObjExists(&testCtx, snapshotKey, volumeSnapshot, false)).Should(Succeed()) + By("check the backup created for scaling has been deleted") + Eventually(testapps.CheckObjExists(&testCtx, backupKey, backup, false)).Should(Succeed()) By("mock component workload is running and expect cluster and component are running") mockCompRunning(replicas) diff --git a/deploy/helm/templates/deployment.yaml b/deploy/helm/templates/deployment.yaml index fae23b97074..ce24de8f524 100644 --- a/deploy/helm/templates/deployment.yaml +++ b/deploy/helm/templates/deployment.yaml @@ -4,6 +4,7 @@ metadata: name: {{ include "kubeblocks.fullname" . }} labels: {{- include "kubeblocks.labels" . | nindent 4 }} + app.kubernetes.io/component: "apps" spec: {{- if not .Values.autoscaling.enabled }} replicas: {{ .Values.replicaCount }} @@ -52,6 +53,11 @@ spec: {{- with .Values.loggerSettings.encoder }} - "--zap-encoder={{ . }}" {{- end }} + - "--extensions={{- default "true" ( include "kubeblocks.addonControllerEnabled" . ) }}" + - "--apps=true" + - "--workloads=true" + - "--storage=true" + - "--dataprotection={{- and .Values.dataProtection.enabled (not .Values.separatedDeployment) }}" env: - name: CM_NAMESPACE value: {{ .Release.Namespace }} @@ -85,10 +91,7 @@ spec: - name: ENABLE_WEBHOOKS value: "true" {{- end }} - {{- if ( include "kubeblocks.addonControllerEnabled" . ) | deepEqual "false" }} - - name: DISABLE_ADDON_CTRLER - value: "true" - {{- else }} + {{- if ( include "kubeblocks.addonControllerEnabled" . ) | deepEqual "true" }} - name: ADDON_JOB_TTL value: {{ .jobTTL | quote }} - name: ADDON_JOB_IMAGE_PULL_POLICY @@ -173,3 +176,172 @@ spec: defaultMode: 420 secretName: {{ include "kubeblocks.fullname" . }}.{{ .Release.Namespace }}.svc.tls-pair {{- end }} + +{{- if and .Values.separatedDeployment .Values.dataProtection.enabled }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "kubeblocks.fullname" . }}-dataprotection + labels: + {{- include "kubeblocks.labels" . | nindent 4 }} + app.kubernetes.io/component: "dataprotection" +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "kubeblocks.selectorLabels" . | nindent 6 }} + {{- if .Values.updateStrategy }} + strategy: + {{ toYaml .Values.updateStrategy | nindent 4 | trim }} + {{- end }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "kubeblocks.selectorLabels" . | nindent 8 }} + spec: + priorityClassName: {{ template "kubeblocks.priorityClassName" . }} + {{- with .Values.image.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "kubeblocks.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + initContainers: # only download tools image to local + - name: tools + image: "{{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.tools.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - /bin/true + containers: + - name: manager + args: + - "--health-probe-bind-address=:8081" + - "--metrics-bind-address=:8080" + - "--leader-elect" + - "--leader-elect-id=abd03fda" + - "--zap-devel={{- default "false" .Values.loggerSettings.developmentMode }}" + - "--zap-time-encoding={{- default "iso8601" .Values.loggerSettings.timeEncoding }}" + {{- with .Values.loggerSettings.level }} + - "--zap-log-level={{ . }}" + {{- end }} + {{- with .Values.loggerSettings.encoder }} + - "--zap-encoder={{ . }}" + {{- end }} + - "--dataprotection=true" + - "--apps=false" + - "--extensions=false" + - "--workloads=false" + - "--storage=false" + env: + - name: CM_NAMESPACE + value: {{ .Release.Namespace }} + {{- with .Values.affinity }} + - name: CM_AFFINITY + value: {{ toJson . | quote }} + {{- end }} + {{- with .Values.nodeSelector }} + - name: CM_NODE_SELECTOR + value: {{ toJson . | quote }} + {{- end }} + {{- with .Values.tolerations }} + - name: CM_TOLERATIONS + value: {{ toJson . | quote }} + {{- end }} + - name: KUBEBLOCKS_IMAGE_PULL_POLICY + value: {{ .Values.image.pullPolicy }} + - name: KUBEBLOCKS_TOOLS_IMAGE + value: "{{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.tools.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + - name: KUBEBLOCKS_SERVICEACCOUNT_NAME + value: {{ include "kubeblocks.serviceAccountName" . }} + {{- if or .Values.dataProtection.enableVolumeSnapshot (index .Values "snapshot-controller" "enabled") }} + - name: VOLUMESNAPSHOT + value: "true" + {{- end }} + {{- if .Capabilities.APIVersions.Has "snapshot.storage.k8s.io/v1beta1" }} + - name: VOLUMESNAPSHOT_API_BETA + value: "true" + {{- end }} + {{- if .Values.admissionWebhooks.enabled }} + - name: ENABLE_WEBHOOKS + value: "true" + {{- end }} + {{- with .Values.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + image: "{{ .Values.image.registry | default "docker.io" }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: webhook-server + containerPort: 9443 + protocol: TCP + - name: health + containerPort: 8081 + protocol: TCP + - name: metrics + containerPort: 8080 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: health + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: health + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - mountPath: /etc/kubeblocks + name: manager-config + {{- if .Values.admissionWebhooks.enabled }} + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + {{- end }} + {{- if .Values.hostNetwork }} + hostNetwork: {{ .Values.hostNetwork }} + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy }} + {{- end }} + {{- with .Values.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: 10 + volumes: + - name: manager-config + configMap: + name: {{ include "kubeblocks.fullname" . }}-manager-config + {{- if .Values.admissionWebhooks.enabled }} + - name: cert + secret: + defaultMode: 420 + secretName: {{ include "kubeblocks.fullname" . }}.{{ .Release.Namespace }}.svc.tls-pair + {{- end }} +{{- end }} \ No newline at end of file diff --git a/deploy/helm/values.yaml b/deploy/helm/values.yaml index 244c6377c4e..1fa7f4f01bc 100644 --- a/deploy/helm/values.yaml +++ b/deploy/helm/values.yaml @@ -21,6 +21,11 @@ image: tools: repository: apecloud/kubeblocks-tools +## @param separatedDeployment separated the KubeBlocks deployment into two deployments. +## This switch is used to isolate the apps controller and dataprotection controller +## +separatedDeployment: true + ## @param replicaCount ## replicaCount: 1 @@ -268,6 +273,7 @@ admissionWebhooks: ## Data protection settings ## +## @param dataProtection.enabled - set the dataProtection controllers for backup functions ## @param dataProtection.enableVolumeSnapshot - set this to true if cluster does have snapshot.storage.k8s.io API installed ## @param dataProtection.backupPVCName - set the default pvc to store the file for backup ## @param dataProtection.backupPVCInitCapacity - set the default pvc initCapacity if the pvc need to be created by backup controller @@ -276,6 +282,7 @@ admissionWebhooks: ## @param dataProtection.backupPVConfigMapName - set the default configmap name which contains key "persistentVolume" and value of the persistentVolume struct. ## @param dataProtection.backupPVConfigMapNamespace - set the default configmap namespace of pv template. dataProtection: + enabled: true enableVolumeSnapshot: false backupPVCName: kb-backup-data backupPVCInitCapacity: "" diff --git a/internal/cli/cmd/cluster/dataprotection.go b/internal/cli/cmd/cluster/dataprotection.go index 8e00dbbbb4f..944640f3234 100644 --- a/internal/cli/cmd/cluster/dataprotection.go +++ b/internal/cli/cmd/cluster/dataprotection.go @@ -342,7 +342,6 @@ func NewDeleteBackupCmd(f cmdutil.Factory, streams genericclioptions.IOStreams) }, } cmd.Flags().StringSliceVar(&o.Names, "name", []string{}, "Backup names") - _ = cmd.MarkFlagRequired("name") o.AddFlags(cmd) return cmd } diff --git a/internal/cli/cmd/kubeblocks/compare_test.go b/internal/cli/cmd/kubeblocks/compare_test.go index 43cb4ad87c5..faed9fbc571 100644 --- a/internal/cli/cmd/kubeblocks/compare_test.go +++ b/internal/cli/cmd/kubeblocks/compare_test.go @@ -20,14 +20,15 @@ along with this program. If not, see . package kubeblocks import ( - "github.com/apecloud/kubeblocks/internal/cli/testing" - "github.com/apecloud/kubeblocks/internal/cli/util/helm" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/spf13/cobra" "k8s.io/cli-runtime/pkg/genericclioptions" clientfake "k8s.io/client-go/rest/fake" cmdtesting "k8s.io/kubectl/pkg/cmd/testing" + + "github.com/apecloud/kubeblocks/internal/cli/testing" + "github.com/apecloud/kubeblocks/internal/cli/util/helm" ) var _ = Describe("kubeblocks compare", func() { diff --git a/internal/cli/cmd/kubeblocks/config.go b/internal/cli/cmd/kubeblocks/config.go index aeedc2dea74..a19286d0551 100644 --- a/internal/cli/cmd/kubeblocks/config.go +++ b/internal/cli/cmd/kubeblocks/config.go @@ -25,20 +25,16 @@ import ( "time" "github.com/spf13/cobra" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/kubernetes" cmdutil "k8s.io/kubectl/pkg/cmd/util" - deploymentutil "k8s.io/kubectl/pkg/util/deployment" "k8s.io/kubectl/pkg/util/templates" "github.com/apecloud/kubeblocks/internal/cli/printer" "github.com/apecloud/kubeblocks/internal/cli/types" "github.com/apecloud/kubeblocks/internal/cli/util" "github.com/apecloud/kubeblocks/internal/cli/util/helm" - "github.com/apecloud/kubeblocks/internal/constant" ) var showAllConfig = false @@ -241,26 +237,7 @@ func markKubeBlocksPodsToLoadConfigMap(client kubernetes.Interface) error { if err != nil { return err } - if len(pods.Items) == 0 { - return nil - } - condition := deploymentutil.GetDeploymentCondition(deploy.Status, appsv1.DeploymentProgressing) - if condition == nil { - return nil - } - podBelongToKubeBlocks := func(pod corev1.Pod) bool { - for _, v := range pod.OwnerReferences { - if v.Kind == constant.ReplicaSetKind && strings.Contains(condition.Message, v.Name) { - return true - } - } - return false - } for _, pod := range pods.Items { - belongToKubeBlocks := podBelongToKubeBlocks(pod) - if !belongToKubeBlocks { - continue - } // mark the pod to load configmap if pod.Annotations == nil { pod.Annotations = map[string]string{} diff --git a/internal/cli/util/version.go b/internal/cli/util/version.go index de9666d694e..9f2cd2ed4d8 100644 --- a/internal/cli/util/version.go +++ b/internal/cli/util/version.go @@ -113,6 +113,12 @@ func GetKubeBlocksDeploy(client kubernetes.Interface) (*appsv1.Deployment, error return nil, nil } if len(deploys.Items) > 1 { + // for compatibility with older versions, filter here instead of LabelSelector + for _, i := range deploys.Items { + if _, ok := i.Labels["app.kubernetes.io/component"]; ok { + return &i, nil + } + } return nil, fmt.Errorf("found multiple KubeBlocks deployments, please check your cluster") } return &deploys.Items[0], nil diff --git a/internal/controller/builder/builder.go b/internal/controller/builder/builder.go index 400b904a5e4..cb5012b5f8a 100644 --- a/internal/controller/builder/builder.go +++ b/internal/controller/builder/builder.go @@ -568,20 +568,6 @@ func BuildBackup(cluster *appsv1alpha1.Cluster, return &backup, nil } -func BuildVolumeSnapshot(snapshotKey types.NamespacedName, - pvcName string, - sts *appsv1.StatefulSet) (*snapshotv1.VolumeSnapshot, error) { - snapshot := snapshotv1.VolumeSnapshot{} - if err := buildFromCUE("snapshot_template.cue", map[string]any{ - "snapshot_key": snapshotKey, - "pvc_name": pvcName, - "sts": sts, - }, "snapshot", &snapshot); err != nil { - return nil, err - } - return &snapshot, nil -} - func BuildConfigMapWithTemplate(cluster *appsv1alpha1.Cluster, component *component.SynthesizedComponent, configs map[string]string, diff --git a/internal/controller/builder/builder_test.go b/internal/controller/builder/builder_test.go index ed4d6d7fee5..0e8eee8cbac 100644 --- a/internal/controller/builder/builder_test.go +++ b/internal/controller/builder/builder_test.go @@ -386,18 +386,6 @@ var _ = Describe("builder", func() { Expect(backupJob).ShouldNot(BeNil()) }) - It("builds VolumeSnapshot correctly", func() { - sts := newStsObj() - snapshotKey := types.NamespacedName{ - Namespace: "default", - Name: "test-snapshot", - } - pvcName := "test-pvc-name" - vs, err := BuildVolumeSnapshot(snapshotKey, pvcName, sts) - Expect(err).Should(BeNil()) - Expect(vs).ShouldNot(BeNil()) - }) - It("builds ConfigMap with template correctly", func() { config := map[string]string{} _, cluster, synthesizedComponent := newClusterObjs(nil) diff --git a/internal/controller/builder/cue/snapshot_template.cue b/internal/controller/builder/cue/snapshot_template.cue deleted file mode 100644 index 270a01bf91e..00000000000 --- a/internal/controller/builder/cue/snapshot_template.cue +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) 2022-2023 ApeCloud Co., Ltd -// -// This file is part of KubeBlocks project -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -snapshot_key: { - Name: string - Namespace: string -} -pvc_name: string -sts: { - metadata: { - labels: [string]: string - namespace: string - } -} - -snapshot: { - apiVersion: "snapshot.storage.k8s.io/v1" - kind: "VolumeSnapshot" - metadata: { - name: snapshot_key.Name - namespace: snapshot_key.Namespace - labels: { - "apps.kubeblocks.io/managed-by": "cluster" - for k, v in sts.metadata.labels { - "\(k)": "\(v)" - } - } - } - spec: { - source: { - persistentVolumeClaimName: pvc_name - } - } -}