diff --git a/apis/apps/v1alpha1/cluster_types.go b/apis/apps/v1alpha1/cluster_types.go index 9687325bfdb..f6b398e91e9 100644 --- a/apis/apps/v1alpha1/cluster_types.go +++ b/apis/apps/v1alpha1/cluster_types.go @@ -93,6 +93,7 @@ type ClusterSpec struct { // - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. // - `Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs), // allowing for data preservation while stopping other operations. + // Warning: Halt policy is deprecated in 0.9.1 and will have same meaning as DoNotTerminate. // - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while // removing all persistent data. // - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and diff --git a/config/crd/bases/apps.kubeblocks.io_clusters.yaml b/config/crd/bases/apps.kubeblocks.io_clusters.yaml index 43bf54cc27c..18f0112b209 100644 --- a/config/crd/bases/apps.kubeblocks.io_clusters.yaml +++ b/config/crd/bases/apps.kubeblocks.io_clusters.yaml @@ -16012,6 +16012,7 @@ spec: - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. - `Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs), allowing for data preservation while stopping other operations. + Warning: Halt policy is deprecated in 0.9.1 and will have same meaning as DoNotTerminate. - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and diff --git a/controllers/apps/cluster_controller_test.go b/controllers/apps/cluster_controller_test.go index 17f44dfa0e2..e9815233ab9 100644 --- a/controllers/apps/cluster_controller_test.go +++ b/controllers/apps/cluster_controller_test.go @@ -987,37 +987,23 @@ var _ = Describe("Cluster Controller", func() { Context: testCtx.Ctx, Client: testCtx.Cli, } - preserveKinds := haltPreserveKinds() - preserveObjs, err := getOwningNamespacedObjects(transCtx.Context, transCtx.Client, clusterObj.Namespace, getAppInstanceML(*clusterObj), preserveKinds) + namespacedKinds, clusteredKinds := kindsForWipeOut() + allKinds := append(namespacedKinds, clusteredKinds...) + createdObjs, err := getOwningNamespacedObjects(transCtx.Context, transCtx.Client, clusterObj.Namespace, getAppInstanceML(*clusterObj), allKinds) Expect(err).Should(Succeed()) - for _, obj := range preserveObjs { - // Expect(obj.GetFinalizers()).Should(ContainElements(constant.DBClusterFinalizerName)) - Expect(obj.GetAnnotations()).ShouldNot(HaveKey(constant.LastAppliedClusterAnnotationKey)) - } By("delete the cluster") testapps.DeleteObject(&testCtx, clusterKey, &appsv1alpha1.Cluster{}) + Consistently(testapps.CheckObjExists(&testCtx, clusterKey, &appsv1alpha1.Cluster{}, true)).Should(Succeed()) - By("wait for the cluster to terminate") - Eventually(testapps.CheckObjExists(&testCtx, clusterKey, &appsv1alpha1.Cluster{}, false)).Should(Succeed()) - - By("check expected preserved objects") - keptObjs, err := getOwningNamespacedObjects(transCtx.Context, transCtx.Client, clusterObj.Namespace, getAppInstanceML(*clusterObj), preserveKinds) + By("check all cluster resources again") + objs, err := getOwningNamespacedObjects(transCtx.Context, transCtx.Client, clusterObj.Namespace, getAppInstanceML(*clusterObj), allKinds) Expect(err).Should(Succeed()) - for key, obj := range preserveObjs { - Expect(keptObjs).Should(HaveKey(key)) - keptObj := keptObjs[key] - Expect(obj.GetUID()).Should(BeEquivalentTo(keptObj.GetUID())) - Expect(keptObj.GetFinalizers()).ShouldNot(ContainElements(constant.DBClusterFinalizerName)) - Expect(keptObj.GetAnnotations()).Should(HaveKey(constant.LastAppliedClusterAnnotationKey)) + // check all objects existed before cluster deletion still be there + for key, obj := range createdObjs { + Expect(objs).Should(HaveKey(key)) + Expect(obj.GetUID()).Should(BeEquivalentTo(objs[key].GetUID())) } - - By("check all other resources deleted") - namespacedKinds, clusteredKinds := kindsForHalt() - kindsToDelete := append(namespacedKinds, clusteredKinds...) - otherObjs, err := getOwningNamespacedObjects(transCtx.Context, transCtx.Client, clusterObj.Namespace, getAppInstanceML(*clusterObj), kindsToDelete) - Expect(err).Should(Succeed()) - Expect(otherObjs).Should(HaveLen(0)) } testClusterHaltNRecovery := func(createObj func(appsv1alpha1.TerminationPolicyType)) { diff --git a/controllers/apps/transformer_cluster_deletion.go b/controllers/apps/transformer_cluster_deletion.go index a3b7963815d..551a4fc9ef3 100644 --- a/controllers/apps/transformer_cluster_deletion.go +++ b/controllers/apps/transformer_cluster_deletion.go @@ -66,7 +66,9 @@ func (t *clusterDeletionTransformer) Transform(ctx graph.TransformContext, dag * "spec.terminationPolicy %s is preventing deletion.", cluster.Spec.TerminationPolicy) return graph.ErrPrematureStop case appsv1alpha1.Halt: - toDeleteNamespacedKinds, toDeleteNonNamespacedKinds = kindsForHalt() + transCtx.EventRecorder.Eventf(cluster, corev1.EventTypeWarning, "Halt", + "spec.terminationPolicy %s is preventing deletion. Halt policy is deprecated is 0.9.1 and will have same meaning as DoNotTerminate.", cluster.Spec.TerminationPolicy) + return graph.ErrPrematureStop case appsv1alpha1.Delete: toDeleteNamespacedKinds, toDeleteNonNamespacedKinds = kindsForDelete() case appsv1alpha1.WipeOut: diff --git a/controllers/apps/transformer_component_deletion.go b/controllers/apps/transformer_component_deletion.go index add1b7a8614..075dcd7f764 100644 --- a/controllers/apps/transformer_component_deletion.go +++ b/controllers/apps/transformer_component_deletion.go @@ -20,7 +20,6 @@ along with this program. If not, see . package apps import ( - "context" "fmt" "time" @@ -108,25 +107,14 @@ func (t *componentDeletionTransformer) handleCompDeleteWhenScaleIn(transCtx *com // handleCompDeleteWhenClusterDelete handles the component deletion when the cluster is being deleted, the sub-resources owned by the component depends on the cluster's TerminationPolicy. func (t *componentDeletionTransformer) handleCompDeleteWhenClusterDelete(transCtx *componentTransformContext, graphCli model.GraphClient, dag *graph.DAG, cluster *appsv1alpha1.Cluster, comp *appsv1alpha1.Component, matchLabels map[string]string) error { - var ( - toPreserveKinds, toDeleteKinds []client.ObjectList - ) + var toDeleteKinds []client.ObjectList switch cluster.Spec.TerminationPolicy { - case appsv1alpha1.Halt: - toPreserveKinds = compOwnedPreserveKinds() - toDeleteKinds = kindsForCompHalt() case appsv1alpha1.Delete: toDeleteKinds = kindsForCompDelete() case appsv1alpha1.WipeOut: toDeleteKinds = kindsForCompWipeOut() } - if len(toPreserveKinds) > 0 { - // preserve the objects owned by the component when the component is being deleted - if err := preserveCompObjects(transCtx.Context, transCtx.Client, graphCli, dag, comp, matchLabels, toPreserveKinds); err != nil { - return newRequeueError(requeueDuration, err.Error()) - } - } return t.deleteCompResources(transCtx, graphCli, dag, comp, matchLabels, toDeleteKinds) } @@ -215,9 +203,3 @@ func kindsForCompDelete() []client.ObjectList { func kindsForCompWipeOut() []client.ObjectList { return kindsForCompDelete() } - -// preserveCompObjects preserves the objects owned by the component when the component is being deleted -func preserveCompObjects(ctx context.Context, cli client.Reader, graphCli model.GraphClient, dag *graph.DAG, - comp *appsv1alpha1.Component, ml client.MatchingLabels, toPreserveKinds []client.ObjectList) error { - return preserveObjects(ctx, cli, graphCli, dag, comp, ml, toPreserveKinds, constant.DBComponentFinalizerName, constant.LastAppliedClusterAnnotationKey) -} diff --git a/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml b/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml index 43bf54cc27c..18f0112b209 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml @@ -16012,6 +16012,7 @@ spec: - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact. - `Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs), allowing for data preservation while stopping other operations. + Warning: Halt policy is deprecated in 0.9.1 and will have same meaning as DoNotTerminate. - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while removing all persistent data. - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and diff --git a/docs/developer_docs/api-reference/cluster.md b/docs/developer_docs/api-reference/cluster.md index b24d54fdb33..7dd2a57e6ff 100644 --- a/docs/developer_docs/api-reference/cluster.md +++ b/docs/developer_docs/api-reference/cluster.md @@ -199,7 +199,8 @@ Choose a policy based on the desired level of resource cleanup and data preserva