Skip to content

Commit

Permalink
Merge branch 'main' into support/lifecycle-crds-version-v1
Browse files Browse the repository at this point in the history
  • Loading branch information
leon-inf committed Sep 10, 2024
2 parents 4f40b46 + 448ac3c commit 200e566
Show file tree
Hide file tree
Showing 66 changed files with 680 additions and 686 deletions.
52 changes: 51 additions & 1 deletion .github/CODEOWNERS
Validating CODEOWNERS rules …
Original file line number Diff line number Diff line change
@@ -1 +1,51 @@
* @nayutah @ldming @free6om @wangyelei @Y-Rookie @weicao @shanshanying @leon-inf @zjx20
* @apecloud/kb-reviewers

# Cluster APIs
apis/**/cluster*.go @leon-inf @Y-Rookie @apecloud/kb-reviewers
apis/**/component*.go @leon-inf @Y-Rookie @apecloud/kb-reviewers
apis/**/service*.go @leon-inf @Y-Rookie @apecloud/kb-reviewers
controllers/apps/cluster*.go @leon-inf @Y-Rookie @apecloud/kb-reviewers
controllers/apps/transformer_cluster*.go @leon-inf @Y-Rookie @apecloud/kb-reviewers
controllers/apps/component*.go @leon-inf @Y-Rookie @apecloud/kb-reviewers
controllers/apps/transformer_component*.go @leon-inf @Y-Rookie @apecloud/kb-reviewers
controllers/apps/service*.go @leon-inf @Y-Rookie @apecloud/kb-reviewers

# OpsRequest API
apis/**/opsrequest*.go @wangyelei @apecloud/kb-reviewers
apis/**/opsdefinition*.go @wangyelei @apecloud/kb-reviewers
controllers/apps/opsdefinition*.go @wangyelei @apecloud/kb-reviewers
controllers/apps/operations/ @wangyelei @apecloud/kb-reviewers

# Config API
apis/**/config*.go @sophon-zt @apecloud/kb-reviewers
controllers/apps/configuration/ @sophon-zt @apecloud/kb-reviewers
controllers/apps/operations/reconfigure*.go @sophon-zt @apecloud/kb-reviewers

# DataProtection APIs
apis/dataprotection/ @ldming @wangyelei @zjx20 @apecloud/kb-reviewers
controllers/dataprotection/ @ldming @wangyelei @zjx20 @apecloud/kb-reviewers
pkg/dataprotection/ @ldming @wangyelei @zjx20 @apecloud/kb-reviewers

# Experimental APIs
apis/experimental/ @free6om @apecloud/kb-reviewers
controllers/experimental/ @free6om @apecloud/kb-reviewers

# Extensions APIs
apis/extensions/ @ldming @apecloud/kb-reviewers
controllers/extensions/ @ldming @apecloud/kb-reviewers

# Workloads APIs
apis/workloads/ @free6om @apecloud/kb-reviewers
controllers/workloads/ @free6om @apecloud/kb-reviewers

# Docs
docs/ @michelle-0808 @shanshanying @apecloud/kb-reviewers
i18n/ @michelle-0808 @shanshanying @apecloud/kb-reviewers
docs/developer_docs/api-reference/

# Examples
examples/ @ahjing99 @shanshanying @apecloud/kb-reviewers

# Tests
.github/ @JashBook @ahjing99 @apecloud/kb-reviewers
test/e2e/ @JashBook @ahjing99 @linghan-hub @apecloud/kb-reviewers
1 change: 1 addition & 0 deletions apis/apps/v1alpha1/cluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@ type ClusterSpec struct {
// - `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact.
// - `Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs),
// allowing for data preservation while stopping other operations.
// Warning: Halt policy is deprecated in 0.9.1 and will have same meaning as DoNotTerminate.
// - `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while
// removing all persistent data.
// - `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and
Expand Down
1 change: 1 addition & 0 deletions config/crd/bases/apps.kubeblocks.io_clusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33647,6 +33647,7 @@ spec:
- `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact.
- `Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs),
allowing for data preservation while stopping other operations.
Warning: Halt policy is deprecated in 0.9.1 and will have same meaning as DoNotTerminate.
- `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while
removing all persistent data.
- `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and
Expand Down
34 changes: 10 additions & 24 deletions controllers/apps/cluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -791,37 +791,23 @@ var _ = Describe("Cluster Controller", func() {
Context: testCtx.Ctx,
Client: testCtx.Cli,
}
preserveKinds := haltPreserveKinds()
preserveObjs, err := getOwningNamespacedObjects(transCtx.Context, transCtx.Client, clusterObj.Namespace, getAppInstanceML(*clusterObj), preserveKinds)
namespacedKinds, clusteredKinds := kindsForWipeOut()
allKinds := append(namespacedKinds, clusteredKinds...)
createdObjs, err := getOwningNamespacedObjects(transCtx.Context, transCtx.Client, clusterObj.Namespace, getAppInstanceML(*clusterObj), allKinds)
Expect(err).Should(Succeed())
for _, obj := range preserveObjs {
// Expect(obj.GetFinalizers()).Should(ContainElements(constant.DBClusterFinalizerName))
Expect(obj.GetAnnotations()).ShouldNot(HaveKey(constant.LastAppliedClusterAnnotationKey))
}

By("delete the cluster")
testapps.DeleteObject(&testCtx, clusterKey, &appsv1.Cluster{})
Consistently(testapps.CheckObjExists(&testCtx, clusterKey, &appsv1.Cluster{}, true)).Should(Succeed())

By("wait for the cluster to terminate")
Eventually(testapps.CheckObjExists(&testCtx, clusterKey, &appsv1.Cluster{}, false)).Should(Succeed())

By("check expected preserved objects")
keptObjs, err := getOwningNamespacedObjects(transCtx.Context, transCtx.Client, clusterObj.Namespace, getAppInstanceML(*clusterObj), preserveKinds)
By("check all cluster resources again")
objs, err := getOwningNamespacedObjects(transCtx.Context, transCtx.Client, clusterObj.Namespace, getAppInstanceML(*clusterObj), allKinds)
Expect(err).Should(Succeed())
for key, obj := range preserveObjs {
Expect(keptObjs).Should(HaveKey(key))
keptObj := keptObjs[key]
Expect(obj.GetUID()).Should(BeEquivalentTo(keptObj.GetUID()))
Expect(keptObj.GetFinalizers()).ShouldNot(ContainElements(constant.DBClusterFinalizerName))
Expect(keptObj.GetAnnotations()).Should(HaveKey(constant.LastAppliedClusterAnnotationKey))
// check all objects existed before cluster deletion still be there
for key, obj := range createdObjs {
Expect(objs).Should(HaveKey(key))
Expect(obj.GetUID()).Should(BeEquivalentTo(objs[key].GetUID()))
}

By("check all other resources deleted")
namespacedKinds, clusteredKinds := kindsForHalt()
kindsToDelete := append(namespacedKinds, clusteredKinds...)
otherObjs, err := getOwningNamespacedObjects(transCtx.Context, transCtx.Client, clusterObj.Namespace, getAppInstanceML(*clusterObj), kindsToDelete)
Expect(err).Should(Succeed())
Expect(otherObjs).Should(HaveLen(0))
}

testClusterHaltNRecovery := func(createObj func(appsv1.TerminationPolicyType)) {
Expand Down
4 changes: 3 additions & 1 deletion controllers/apps/transformer_cluster_deletion.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,9 @@ func (t *clusterDeletionTransformer) Transform(ctx graph.TransformContext, dag *
"spec.terminationPolicy %s is preventing deletion.", cluster.Spec.TerminationPolicy)
return graph.ErrPrematureStop
case kbappsv1.Halt:
toDeleteNamespacedKinds, toDeleteNonNamespacedKinds = kindsForHalt()
transCtx.EventRecorder.Eventf(cluster, corev1.EventTypeWarning, "Halt",
"spec.terminationPolicy %s is preventing deletion. Halt policy is deprecated is 0.9.1 and will have same meaning as DoNotTerminate.", cluster.Spec.TerminationPolicy)
return graph.ErrPrematureStop
case kbappsv1.Delete:
toDeleteNamespacedKinds, toDeleteNonNamespacedKinds = kindsForDelete()
case kbappsv1.WipeOut:
Expand Down
20 changes: 1 addition & 19 deletions controllers/apps/transformer_component_deletion.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
package apps

import (
"context"
"fmt"
"time"

Expand Down Expand Up @@ -91,25 +90,14 @@ func (t *componentDeletionTransformer) handleCompDeleteWhenScaleIn(transCtx *com
// handleCompDeleteWhenClusterDelete handles the component deletion when the cluster is being deleted, the sub-resources owned by the component depends on the cluster's TerminationPolicy.
func (t *componentDeletionTransformer) handleCompDeleteWhenClusterDelete(transCtx *componentTransformContext, graphCli model.GraphClient,
dag *graph.DAG, cluster *appsv1.Cluster, comp *appsv1.Component, matchLabels map[string]string) error {
var (
toPreserveKinds, toDeleteKinds []client.ObjectList
)
var toDeleteKinds []client.ObjectList
switch cluster.Spec.TerminationPolicy {
case appsv1.Halt:
toPreserveKinds = compOwnedPreserveKinds()
toDeleteKinds = kindsForCompHalt()
case appsv1.Delete:
toDeleteKinds = kindsForCompDelete()
case appsv1.WipeOut:
toDeleteKinds = kindsForCompWipeOut()
}

if len(toPreserveKinds) > 0 {
// preserve the objects owned by the component when the component is being deleted
if err := preserveCompObjects(transCtx.Context, transCtx.Client, graphCli, dag, comp, matchLabels, toPreserveKinds); err != nil {
return newRequeueError(requeueDuration, err.Error())
}
}
return t.deleteCompResources(transCtx, graphCli, dag, comp, matchLabels, toDeleteKinds)
}

Expand Down Expand Up @@ -219,9 +207,3 @@ func kindsForCompDelete() []client.ObjectList {
func kindsForCompWipeOut() []client.ObjectList {
return kindsForCompDelete()
}

// preserveCompObjects preserves the objects owned by the component when the component is being deleted
func preserveCompObjects(ctx context.Context, cli client.Reader, graphCli model.GraphClient, dag *graph.DAG,
comp *appsv1.Component, ml client.MatchingLabels, toPreserveKinds []client.ObjectList) error {
return preserveObjects(ctx, cli, graphCli, dag, comp, ml, toPreserveKinds, constant.DBComponentFinalizerName, constant.LastAppliedClusterAnnotationKey)
}
1 change: 1 addition & 0 deletions deploy/helm/crds/apps.kubeblocks.io_clusters.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33647,6 +33647,7 @@ spec:
- `DoNotTerminate`: Prevents deletion of the Cluster. This policy ensures that all resources remain intact.
- `Halt`: Deletes Cluster resources like Pods and Services but retains Persistent Volume Claims (PVCs),
allowing for data preservation while stopping other operations.
Warning: Halt policy is deprecated in 0.9.1 and will have same meaning as DoNotTerminate.
- `Delete`: Extends the `Halt` policy by also removing PVCs, leading to a thorough cleanup while
removing all persistent data.
- `WipeOut`: An aggressive policy that deletes all Cluster resources, including volume snapshots and
Expand Down
4 changes: 0 additions & 4 deletions docs/api_docs/connect_database/_category_.yml

This file was deleted.

This file was deleted.

Loading

0 comments on commit 200e566

Please sign in to comment.