From 896efddb4833ddcd825d7a33f7ccb3407c6127f9 Mon Sep 17 00:00:00 2001 From: jacklu Date: Tue, 30 Apr 2024 17:45:05 +0800 Subject: [PATCH 01/32] add subscription workload Signed-off-by: jacklu --- e2e/deployers/crud.go | 210 +++++++++++++++++++++++++++++++-- e2e/deployers/subscription.go | 40 +++---- e2e/dractions/actions.go | 216 ++++++++++++++++++++++++++++++++-- e2e/dractions/crud.go | 141 ++++++++++++++++++++++ e2e/dractions/retry.go | 175 +++++++++++++++++++++++++++ e2e/exhaustive_suite_test.go | 3 +- e2e/go.mod | 2 +- e2e/util/const.go | 2 + e2e/util/context.go | 8 +- e2e/workloads/workload.go | 6 +- 10 files changed, 759 insertions(+), 44 deletions(-) create mode 100644 e2e/dractions/crud.go create mode 100644 e2e/dractions/retry.go diff --git a/e2e/deployers/crud.go b/e2e/deployers/crud.go index 03aa238dd..e347a2ad1 100644 --- a/e2e/deployers/crud.go +++ b/e2e/deployers/crud.go @@ -3,34 +3,228 @@ package deployers -func createNamespace() error { +import ( + "context" + "strings" + + "github.com/ramendr/ramen/e2e/util" + "github.com/ramendr/ramen/e2e/workloads" + "k8s.io/apimachinery/pkg/api/errors" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ocmv1b1 "open-cluster-management.io/api/cluster/v1beta1" + ocmv1b2 "open-cluster-management.io/api/cluster/v1beta2" + placementrulev1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/placementrule/v1" + subscriptionv1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/v1" +) + +const ( + AppLabelKey = "app" + ClusterSetName = "default" +) + +func createNamespace(namespace string) error { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + + err := util.Ctx.Hub.CtrlClient.Create(context.Background(), ns) + if err != nil { + if !errors.IsAlreadyExists(err) { + return err + } + + util.Ctx.Log.Info("namespace " + namespace + " already Exists") + } + return nil } -func createManagedClusterSetBinding() error { +func deleteNamespace(namespace string) error { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + + err := util.Ctx.Hub.CtrlClient.Delete(context.Background(), ns) + if err != nil { + if !errors.IsNotFound(err) { + return err + } + + util.Ctx.Log.Info("namespace " + namespace + " not found") + } + return nil } -func createPlacement() error { +func createManagedClusterSetBinding(name, namespace string) error { + labels := make(map[string]string) + labels[AppLabelKey] = namespace + mcsb := &ocmv1b2.ManagedClusterSetBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + }, + Spec: ocmv1b2.ManagedClusterSetBindingSpec{ + ClusterSet: ClusterSetName, + }, + } + + err := util.Ctx.Hub.CtrlClient.Create(context.Background(), mcsb) + if err != nil { + if !errors.IsAlreadyExists(err) { + return err + } + + util.Ctx.Log.Info("managedClusterSetBinding " + mcsb.Name + " already Exists") + } + return nil } -func createSubscription() error { +func deleteManagedClusterSetBinding(name, namespace string) error { + mcsb := &ocmv1b2.ManagedClusterSetBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + + err := util.Ctx.Hub.CtrlClient.Delete(context.Background(), mcsb) + if err != nil { + if !errors.IsNotFound(err) { + return err + } + + util.Ctx.Log.Info("managedClusterSetBinding " + name + " not found") + } + return nil } -func deleteNamespace() error { +func createPlacement(name, namespace string) error { + labels := make(map[string]string) + labels[AppLabelKey] = name + clusterSet := []string{"default"} + + var numClusters int32 = 1 + placement := &ocmv1b1.Placement{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + }, + Spec: ocmv1b1.PlacementSpec{ + ClusterSets: clusterSet, + NumberOfClusters: &numClusters, + }, + } + + err := util.Ctx.Hub.CtrlClient.Create(context.Background(), placement) + if err != nil { + if !errors.IsAlreadyExists(err) { + return err + } + + util.Ctx.Log.Info("placement " + placement.Name + " already Exists") + } + return nil } -func deleteManagedClusterSetBinding() error { +func deletePlacement(name, namespace string) error { + placement := &ocmv1b1.Placement{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + + err := util.Ctx.Hub.CtrlClient.Delete(context.Background(), placement) + if err != nil { + if !errors.IsNotFound(err) { + return err + } + + util.Ctx.Log.Info("placement " + name + " not found") + } + return nil } -func deletePlacement() error { +func createSubscription(s Subscription, w workloads.Workload) error { + name := GetCombinedName(s, w) + namespace := name + + labels := make(map[string]string) + labels[AppLabelKey] = name + + annotations := make(map[string]string) + annotations["apps.open-cluster-management.io/github-branch"] = w.GetRevision() + annotations["apps.open-cluster-management.io/github-path"] = w.GetPath() + + placementRef := corev1.ObjectReference{ + Kind: "Placement", + Name: name, + } + + placementRulePlacement := &placementrulev1.Placement{} + placementRulePlacement.PlacementRef = &placementRef + + subscription := &subscriptionv1.Subscription{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + Annotations: annotations, + }, + Spec: subscriptionv1.SubscriptionSpec{ + Channel: util.ChannelNamespace + "/" + util.ChannelName, + Placement: placementRulePlacement, + }, + } + + err := util.Ctx.Hub.CtrlClient.Create(context.Background(), subscription) + if err != nil { + if !errors.IsAlreadyExists(err) { + return err + } + + util.Ctx.Log.Info("placement " + subscription.Name + " already Exists") + } + return nil } -func deleteSubscription() error { +func deleteSubscription(s Subscription, w workloads.Workload) error { + name := GetCombinedName(s, w) + namespace := name + + subscription := &subscriptionv1.Subscription{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + + err := util.Ctx.Hub.CtrlClient.Delete(context.Background(), subscription) + if err != nil { + if !errors.IsNotFound(err) { + return err + } + + util.Ctx.Log.Info("subscription " + name + " not found") + } + return nil } + +func GetCombinedName(d Deployer, w workloads.Workload) string { + return strings.ToLower(d.GetName() + "-" + w.GetAppName()) +} diff --git a/e2e/deployers/subscription.go b/e2e/deployers/subscription.go index f8e27b8fb..682386955 100644 --- a/e2e/deployers/subscription.go +++ b/e2e/deployers/subscription.go @@ -8,18 +8,12 @@ import ( "github.com/ramendr/ramen/e2e/workloads" ) -type Subscription struct { - // NamePrefix helps when the same workload needs to run in parallel with different deployers. - // In the future we potentially would add a resource suffix that is either randomly generated - // or a hash of the full test name, to handle cases where we want to run the "same" combination - // of deployer+workload for various reasons. - NamePrefix string - McsbName string -} +// mcsb name must be same as the target ManagedClusterSet +const McsbName = ClusterSetName + +type Subscription struct{} -func (s *Subscription) Init() { - s.NamePrefix = "sub-" - s.McsbName = "default" +func (s Subscription) Init() { } func (s Subscription) GetName() string { @@ -33,25 +27,28 @@ func (s Subscription) Deploy(w workloads.Workload) error { // Generate a Subscription for the Workload // - Kustomize the Workload; call Workload.Kustomize(StorageType) // Address namespace/label/suffix as needed for various resources - util.Ctx.Log.Info("enter Deploy " + w.GetName() + "/Subscription") + util.Ctx.Log.Info("enter Deploy " + w.GetName() + "/" + s.GetName()) + + name := GetCombinedName(s, w) + namespace := name // w.Kustomize() - err := createNamespace() + err := createNamespace(namespace) if err != nil { return err } - err = createManagedClusterSetBinding() + err = createManagedClusterSetBinding(McsbName, namespace) if err != nil { return err } - err = createPlacement() + err = createPlacement(name, namespace) if err != nil { return err } - err = createSubscription() + err = createSubscription(s, w) if err != nil { return err } @@ -63,22 +60,25 @@ func (s Subscription) Undeploy(w workloads.Workload) error { // Delete Subscription, Placement, Binding util.Ctx.Log.Info("enter Undeploy " + w.GetName() + "/Subscription") - err := deleteSubscription() + name := GetCombinedName(s, w) + namespace := name + + err := deleteSubscription(s, w) if err != nil { return err } - err = deletePlacement() + err = deletePlacement(name, namespace) if err != nil { return err } - err = deleteManagedClusterSetBinding() + err = deleteManagedClusterSetBinding(McsbName, namespace) if err != nil { return err } - err = deleteNamespace() + err = deleteNamespace(namespace) if err != nil { return err } diff --git a/e2e/dractions/actions.go b/e2e/dractions/actions.go index 73ae41932..ea06b8a7b 100644 --- a/e2e/dractions/actions.go +++ b/e2e/dractions/actions.go @@ -4,6 +4,7 @@ package dractions import ( + "fmt" "time" "github.com/ramendr/ramen/e2e/deployers" @@ -17,26 +18,227 @@ const ( FiveSecondsDuration = 5 * time.Second ) +// If AppSet/Subscription, find Placement +// Determine DRPolicy +// Determine preferredCluster +// Determine PVC label selector +// Determine KubeObjectProtection requirements if Imperative (?) +// Create DRPC, in desired namespace func EnableProtection(w workloads.Workload, d deployers.Deployer) error { util.Ctx.Log.Info("enter EnableProtection " + w.GetName() + "/" + d.GetName()) - return nil + _, isSub := d.(*deployers.Subscription) + _, isAppSet := d.(*deployers.ApplicationSet) + + if !isSub && !isAppSet { + return fmt.Errorf("deployer unknown") + } + + name := GetCombinedName(d, w) + namespace := name + + // if isAppSet { + // namespace = util.ArgocdNamespace + // } + + drPolicyName := DefaultDRPolicyName + appname := w.GetAppName() + + placementName := name + drpcName := name + + placement, placementDecisionName, err := waitPlacementDecision(util.Ctx.Hub.CtrlClient, namespace, placementName) + if err != nil { + return err + } + + util.Ctx.Log.Info("get placementdecision " + placementDecisionName) + + placementDecision, err := getPlacementDecision(util.Ctx.Hub.CtrlClient, namespace, placementDecisionName) + if err != nil { + return err + } + + clusterName := placementDecision.Status.Decisions[0].ClusterName + util.Ctx.Log.Info("placementdecision clusterName: " + clusterName) + + // move update placement annotation after placement has been handled + // otherwise if we first add ocm disable annotation then it might not + // yet be handled by ocm and thus PlacementSatisfied=false + if placement.Annotations == nil { + placement.Annotations = make(map[string]string) + } + + placement.Annotations[OcmSchedulingDisable] = "true" + + util.Ctx.Log.Info("update placement " + placementName + " annotation") + + if err = updatePlacement(util.Ctx.Hub.CtrlClient, placement); err != nil { + return err + } + + util.Ctx.Log.Info("create drpc " + drpcName) + + drpc := generateDRPC(name, namespace, clusterName, drPolicyName, placementName, appname) + if err = createDRPC(util.Ctx.Hub.CtrlClient, drpc); err != nil { + return err + } + + return waitDRPCReady(util.Ctx.Hub.CtrlClient, namespace, drpcName) } +// remove DRPC +// update placement annotation func DisableProtection(w workloads.Workload, d deployers.Deployer) error { - util.Ctx.Log.Info("enter DisableProtection " + w.GetName() + "/" + d.GetName()) + util.Ctx.Log.Info("enter DRActions DisableProtection") + + _, isSub := d.(*deployers.Subscription) + _, isAppSet := d.(*deployers.ApplicationSet) + + if !isSub && !isAppSet { + return fmt.Errorf("deployers.Deployer not known") + } + + name := GetCombinedName(d, w) + namespace := name + placementName := name + drpcName := name + client := util.Ctx.Hub.CtrlClient + + // if isAppSet { + // namespace = util.ArgocdNamespace + // } - return nil + util.Ctx.Log.Info("delete drpc " + drpcName) + + if err := deleteDRPC(client, namespace, drpcName); err != nil { + return err + } + + util.Ctx.Log.Info("get placement " + placementName) + + placement, err := getPlacement(client, namespace, placementName) + if err != nil { + return err + } + + delete(placement.Annotations, OcmSchedulingDisable) + util.Ctx.Log.Info("updated placement " + placementName + " annotation") + + return updatePlacement(client, placement) } func Failover(w workloads.Workload, d deployers.Deployer) error { - util.Ctx.Log.Info("enter Failover " + w.GetName() + "/" + d.GetName()) + util.Ctx.Log.Info("enter DRActions Failover") + + name := GetCombinedName(d, w) + namespace := name - return nil + // _, isAppSet := d.(*deployers.ApplicationSet) + // if isAppSet { + // namespace = util.ArgocdNamespace + // } + + drPolicyName := DefaultDRPolicyName + drpcName := name + client := util.Ctx.Hub.CtrlClient + + // here we expect drpc should be ready before failover + if err := waitDRPCReady(client, namespace, drpcName); err != nil { + return err + } + + util.Ctx.Log.Info("get drpc " + drpcName) + + drpc, err := getDRPC(client, namespace, drpcName) + if err != nil { + return err + } + + util.Ctx.Log.Info("get drpolicy " + drPolicyName) + + drpolicy, err := getDRPolicy(client, drPolicyName) + if err != nil { + return err + } + + targetCluster, err := getTargetCluster(client, namespace, name, drpolicy) + if err != nil { + return err + } + + util.Ctx.Log.Info("failover to cluster: " + targetCluster) + + drpc.Spec.Action = "Failover" + drpc.Spec.FailoverCluster = targetCluster + + util.Ctx.Log.Info("update drpc " + drpcName) + + if err = updateDRPC(client, drpc); err != nil { + return err + } + + return waitDRPC(client, namespace, name, "FailedOver") } +// Determine DRPC +// Check Placement +// Relocate to Primary in DRPolicy as the PrimaryCluster +// Update DRPC func Relocate(w workloads.Workload, d deployers.Deployer) error { - util.Ctx.Log.Info("enter Relocate " + w.GetName() + "/" + d.GetName()) + util.Ctx.Log.Info("enter DRActions Relocate") + + name := GetCombinedName(d, w) + namespace := name + + // _, isAppSet := d.(*deployers.ApplicationSet) + // if isAppSet { + // namespace = util.ArgocdNamespace + // } + drPolicyName := DefaultDRPolicyName + drpcName := name + client := util.Ctx.Hub.CtrlClient + + // here we expect drpc should be ready before relocate + err := waitDRPCReady(client, namespace, drpcName) + if err != nil { + return err + } + + util.Ctx.Log.Info("get drpc " + drpcName) + + drpc, err := getDRPC(client, namespace, drpcName) + if err != nil { + return err + } + + util.Ctx.Log.Info("get drpolicy " + drPolicyName) + + drpolicy, err := getDRPolicy(client, drPolicyName) + if err != nil { + return err + } + + targetCluster, err := getTargetCluster(client, namespace, name, drpolicy) + if err != nil { + return err + } + + util.Ctx.Log.Info("relocate to cluster: " + targetCluster) + + drpc.Spec.Action = "Relocate" + drpc.Spec.PreferredCluster = targetCluster + + util.Ctx.Log.Info("update drpc " + drpcName) + + err = updateDRPC(client, drpc) + if err != nil { + return err + } + + return waitDRPC(client, namespace, name, "Relocated") +} - return nil +func GetCombinedName(d deployers.Deployer, w workloads.Workload) string { + return deployers.GetCombinedName(d, w) } diff --git a/e2e/dractions/crud.go b/e2e/dractions/crud.go new file mode 100644 index 000000000..f6ff30abe --- /dev/null +++ b/e2e/dractions/crud.go @@ -0,0 +1,141 @@ +package dractions + +import ( + "context" + + ramen "github.com/ramendr/ramen/api/v1alpha1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func getPlacement(ctrlClient client.Client, namespace, name string) (*clusterv1beta1.Placement, error) { + placement := &clusterv1beta1.Placement{} + key := types.NamespacedName{Namespace: namespace, Name: name} + + err := ctrlClient.Get(context.Background(), key, placement) + if err != nil { + return nil, err + } + + return placement, nil +} + +func updatePlacement(ctrlClient client.Client, placement *clusterv1beta1.Placement) error { + err := ctrlClient.Update(context.Background(), placement) + if err != nil { + return err + } + + return nil +} + +func getPlacementDecision(ctrlClient client.Client, namespace, name string) (*clusterv1beta1.PlacementDecision, error) { + placementDecision := &clusterv1beta1.PlacementDecision{} + key := types.NamespacedName{Namespace: namespace, Name: name} + + err := ctrlClient.Get(context.Background(), key, placementDecision) + if err != nil { + return nil, err + } + + return placementDecision, nil +} + +func getDRPC(ctrlClient client.Client, namespace, name string) (*ramen.DRPlacementControl, error) { + drpc := &ramen.DRPlacementControl{} + key := types.NamespacedName{Namespace: namespace, Name: name} + + err := ctrlClient.Get(context.Background(), key, drpc) + if err != nil { + return nil, err + } + + return drpc, nil +} + +func createDRPC(ctrlClient client.Client, drpc *ramen.DRPlacementControl) error { + err := ctrlClient.Create(context.Background(), drpc) + if err != nil { + if !errors.IsAlreadyExists(err) { + return err + } + // ctx.Log.Info("drpc " + drpc.Name + " already Exists") + } + + return nil +} + +func updateDRPC(ctrlClient client.Client, drpc *ramen.DRPlacementControl) error { + err := ctrlClient.Update(context.Background(), drpc) + if err != nil { + return err + } + + return nil +} + +func deleteDRPC(ctrlClient client.Client, namespace, name string) error { + objDrpc := &ramen.DRPlacementControl{} + key := types.NamespacedName{Namespace: namespace, Name: name} + + err := ctrlClient.Get(context.Background(), key, objDrpc) + if err != nil { + if !errors.IsNotFound(err) { + return err + } + + return nil + } + + err = ctrlClient.Delete(context.Background(), objDrpc) + if err != nil { + return err + } + + return nil +} + +func getDRPolicy(ctrlClient client.Client, name string) (*ramen.DRPolicy, error) { + drpolicy := &ramen.DRPolicy{} + key := types.NamespacedName{Name: name} + + err := ctrlClient.Get(context.Background(), key, drpolicy) + if err != nil { + return nil, err + } + + return drpolicy, nil +} + +func generateDRPC(name, namespace, clusterName, drPolicyName, placementName, appname string) *ramen.DRPlacementControl { + drpc := &ramen.DRPlacementControl{ + TypeMeta: metav1.TypeMeta{ + Kind: "DRPlacementControl", + APIVersion: "ramendr.openshift.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{"app": name}, + }, + Spec: ramen.DRPlacementControlSpec{ + PreferredCluster: clusterName, + DRPolicyRef: v1.ObjectReference{ + Name: drPolicyName, + }, + PlacementRef: v1.ObjectReference{ + Kind: "placement", + Name: placementName, + }, + PVCSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"appname": appname}, + }, + }, + } + + return drpc +} diff --git a/e2e/dractions/retry.go b/e2e/dractions/retry.go new file mode 100644 index 000000000..131197ecb --- /dev/null +++ b/e2e/dractions/retry.go @@ -0,0 +1,175 @@ +package dractions + +import ( + "fmt" + "time" + + ramen "github.com/ramendr/ramen/api/v1alpha1" + "github.com/ramendr/ramen/e2e/util" + "open-cluster-management.io/api/cluster/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// return placement object, placementDecisionName, error +func waitPlacementDecision(client client.Client, namespace string, placementName string, +) (*v1beta1.Placement, string, error) { + startTime := time.Now() + placementDecisionName := "" + + for { + placement, err := getPlacement(client, namespace, placementName) + if err != nil { + return nil, "", err + } + + for _, cond := range placement.Status.Conditions { + if cond.Type == "PlacementSatisfied" && cond.Status == "True" { + placementDecisionName = placement.Status.DecisionGroups[0].Decisions[0] + if placementDecisionName != "" { + util.Ctx.Log.Info("got placementdecision name " + placementDecisionName) + + return placement, placementDecisionName, nil + } + } + } + + if time.Since(startTime) > time.Second*time.Duration(util.Timeout) { + return nil, "", fmt.Errorf("could not get placement decision before timeout") + } + + util.Ctx.Log.Info(fmt.Sprintf("could not get placement decision, retry in %v seconds", util.TimeInterval)) + time.Sleep(time.Second * time.Duration(util.TimeInterval)) + } +} + +func waitDRPCReady(client client.Client, namespace string, drpcName string) error { + startTime := time.Now() + + for { + drpc, err := getDRPC(client, namespace, drpcName) + if err != nil { + return err + } + + conditionReady := checkDRPCConditions(drpc) + if conditionReady && drpc.Status.LastGroupSyncTime != nil { + util.Ctx.Log.Info("drpc " + drpcName + " is ready") + + return nil + } + + if conditionReady && drpc.Status.LastGroupSyncTime == nil { + util.Ctx.Log.Info("drpc " + drpcName + " LastGroupSyncTime is nil") + } + + if time.Since(startTime) > time.Second*time.Duration(util.Timeout) { + return fmt.Errorf(fmt.Sprintf("drpc "+drpcName+" is not ready yet before timeout of %v", util.Timeout)) + } + + util.Ctx.Log.Info(fmt.Sprintf("drpc "+drpcName+" is not ready yet, retry in %v seconds", util.TimeInterval)) + time.Sleep(time.Second * time.Duration(util.TimeInterval)) + } +} + +func checkDRPCConditions(drpc *ramen.DRPlacementControl) bool { + available := false + peerReady := false + + for _, cond := range drpc.Status.Conditions { + if cond.Type == "Available" { + if cond.Status != "True" { + util.Ctx.Log.Info("drpc " + drpc.Name + " condition Available is not True") + + return false + } + + available = true + } + + if cond.Type == "PeerReady" { + if cond.Status != "True" { + util.Ctx.Log.Info("drpc " + drpc.Name + " condition PeerReady is not True") + + return false + } + + peerReady = true + } + } + + return available && peerReady +} + +func waitDRPCPhase(client client.Client, namespace string, name string, phase string) error { + startTime := time.Now() + + for { + drpc, err := getDRPC(client, namespace, name) + if err != nil { + return err + } + + currentPhase := string(drpc.Status.Phase) + if currentPhase == phase { + util.Ctx.Log.Info("drpc " + name + " phase is " + phase) + + return nil + } + + if time.Since(startTime) > time.Second*time.Duration(util.Timeout) { + // fmt.Printf("drpc "+drpcName+" phase is not %s yet before timeout of %v\n", phase, timeout) + return fmt.Errorf(fmt.Sprintf("drpc "+name+" status is not %s yet before timeout of %v", phase, util.Timeout)) + } + + util.Ctx.Log.Info(fmt.Sprintf("current drpc "+name+ + " phase is %s, expecting %s, retry in %v seconds", currentPhase, phase, util.TimeInterval)) + time.Sleep(time.Second * time.Duration(util.TimeInterval)) + } +} + +func getCurrentCluster(client client.Client, namespace string, placementName string) (string, error) { + _, placementDecisionName, err := waitPlacementDecision(client, namespace, placementName) + if err != nil { + return "", err + } + + util.Ctx.Log.Info("get placementdecision " + placementDecisionName) + + placementDecision, err := getPlacementDecision(client, namespace, placementDecisionName) + if err != nil { + return "", err + } + + clusterName := placementDecision.Status.Decisions[0].ClusterName + util.Ctx.Log.Info("placementdecision clusterName: " + clusterName) + + return clusterName, nil +} + +func getTargetCluster(client client.Client, namespace, placementName string, drpolicy *ramen.DRPolicy) (string, error) { + currentCluster, err := getCurrentCluster(client, namespace, placementName) + if err != nil { + return "", err + } + + targetCluster := "" + if currentCluster == drpolicy.Spec.DRClusters[0] { + targetCluster = drpolicy.Spec.DRClusters[1] + } else { + targetCluster = drpolicy.Spec.DRClusters[0] + } + + return targetCluster, nil +} + +// first wait DRPC to have +func waitDRPC(client client.Client, namespace, name, expectedPhase string) error { + // sleep to wait for DRPC is processed + time.Sleep(FiveSecondsDuration) + // check Phase + if err := waitDRPCPhase(client, namespace, name, expectedPhase); err != nil { + return err + } + // then check Conditions + return waitDRPCReady(client, namespace, name) +} diff --git a/e2e/exhaustive_suite_test.go b/e2e/exhaustive_suite_test.go index 6a92cf4aa..381ed2deb 100644 --- a/e2e/exhaustive_suite_test.go +++ b/e2e/exhaustive_suite_test.go @@ -30,7 +30,8 @@ func Exhaustive(t *testing.T) { appset := &deployers.ApplicationSet{} appset.Init() - Deployers := []deployers.Deployer{subscription, appset} + // Deployers := []deployers.Deployer{subscription, appset} + Deployers := []deployers.Deployer{subscription} for _, workload := range Workloads { for _, deployer := range Deployers { diff --git a/e2e/go.mod b/e2e/go.mod index 552fc84f9..11729ed85 100644 --- a/e2e/go.mod +++ b/e2e/go.mod @@ -6,6 +6,7 @@ require ( github.com/go-logr/logr v1.4.1 github.com/ramendr/ramen/api v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.27.0 + k8s.io/api v0.29.3 k8s.io/apimachinery v0.29.3 k8s.io/client-go v12.0.0+incompatible k8s.io/kubectl v0.29.3 @@ -50,7 +51,6 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.29.3 // indirect k8s.io/component-base v0.29.3 // indirect k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect diff --git a/e2e/util/const.go b/e2e/util/const.go index d53b1e040..8cfd6abe3 100644 --- a/e2e/util/const.go +++ b/e2e/util/const.go @@ -7,4 +7,6 @@ const ( RamenSystemNamespace = "ramen-system" ChannelName = "ramen-gitops" ChannelNamespace = "ramen-samples" + Timeout = 600 // seconds + TimeInterval = 30 // seconds ) diff --git a/e2e/util/context.go b/e2e/util/context.go index a95bc5a48..a8e449638 100644 --- a/e2e/util/context.go +++ b/e2e/util/context.go @@ -15,9 +15,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" // Placement - ocmclusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + ocmv1b1 "open-cluster-management.io/api/cluster/v1beta1" // ManagedClusterSetBinding - ocmclusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2" + ocmv1b2 "open-cluster-management.io/api/cluster/v1beta2" ramen "github.com/ramendr/ramen/api/v1alpha1" subscription "open-cluster-management.io/multicloud-operators-subscription/pkg/apis" @@ -45,11 +45,11 @@ type Context struct { var Ctx *Context func addToScheme(scheme *runtime.Scheme) error { - if err := ocmclusterv1beta1.AddToScheme(scheme); err != nil { + if err := ocmv1b1.AddToScheme(scheme); err != nil { return err } - if err := ocmclusterv1beta2.AddToScheme(scheme); err != nil { + if err := ocmv1b2.AddToScheme(scheme); err != nil { return err } diff --git a/e2e/workloads/workload.go b/e2e/workloads/workload.go index c74f19f64..abdc61b44 100644 --- a/e2e/workloads/workload.go +++ b/e2e/workloads/workload.go @@ -8,11 +8,11 @@ type Workload interface { // GetResources() error // Get the actual workload resources GetName() string - // GetAppName() string + GetAppName() string // GetRepoURL() string // Possibly all this is part of Workload than each implementation of the interfaces? - // GetPath() string - // GetRevision() string + GetPath() string + GetRevision() string Init() } From 750c8b75fd7f911eeda985e620015a96aa2564aa Mon Sep 17 00:00:00 2001 From: jacklu Date: Mon, 6 May 2024 18:34:23 +0800 Subject: [PATCH 02/32] update make e2e-rdr script to let rdr-e2e.sh could run directly Signed-off-by: jacklu --- Makefile | 2 +- e2e/dractions/retry.go | 2 +- e2e/rdr-e2e.sh | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 34f3b0111..ff5091c11 100644 --- a/Makefile +++ b/Makefile @@ -189,7 +189,7 @@ test-ramenctl: ## Run ramenctl tests. $(MAKE) -C ramenctl e2e-rdr: generate manifests ## Run rdr-e2e tests. - ./e2e/rdr-e2e.sh + cd e2e && ./rdr-e2e.sh coverage: go tool cover -html=cover.out diff --git a/e2e/dractions/retry.go b/e2e/dractions/retry.go index 131197ecb..da7d075f4 100644 --- a/e2e/dractions/retry.go +++ b/e2e/dractions/retry.go @@ -162,7 +162,7 @@ func getTargetCluster(client client.Client, namespace, placementName string, drp return targetCluster, nil } -// first wait DRPC to have +// first wait DRPC to have the expected phase, then check DRPC conditions func waitDRPC(client client.Client, namespace, name, expectedPhase string) error { // sleep to wait for DRPC is processed time.Sleep(FiveSecondsDuration) diff --git a/e2e/rdr-e2e.sh b/e2e/rdr-e2e.sh index e9f6c0ef6..482508895 100755 --- a/e2e/rdr-e2e.sh +++ b/e2e/rdr-e2e.sh @@ -5,7 +5,6 @@ echo "Running tests..." -cd ./e2e/ || exit 1 go test -kubeconfig-c1 ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-dr1 -kubeconfig-c2 ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-dr2 -kubeconfig-hub ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-hub -v exit 0 \ No newline at end of file From 9ed3a8c755f4dadc45a88c6935fb284be3f4dd2a Mon Sep 17 00:00:00 2001 From: jacklu Date: Mon, 6 May 2024 18:50:35 +0800 Subject: [PATCH 03/32] rename rdr-e2e.sh to e2e-rdr.sh Signed-off-by: jacklu --- Makefile | 2 +- e2e/{rdr-e2e.sh => e2e-rdr.sh} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename e2e/{rdr-e2e.sh => e2e-rdr.sh} (100%) diff --git a/Makefile b/Makefile index ff5091c11..63499b212 100644 --- a/Makefile +++ b/Makefile @@ -189,7 +189,7 @@ test-ramenctl: ## Run ramenctl tests. $(MAKE) -C ramenctl e2e-rdr: generate manifests ## Run rdr-e2e tests. - cd e2e && ./rdr-e2e.sh + cd e2e && ./e2e-rdr.sh coverage: go tool cover -html=cover.out diff --git a/e2e/rdr-e2e.sh b/e2e/e2e-rdr.sh similarity index 100% rename from e2e/rdr-e2e.sh rename to e2e/e2e-rdr.sh From 11b20d1724b8e4827a7c61315717f104d73eaf64 Mon Sep 17 00:00:00 2001 From: jacklu Date: Tue, 7 May 2024 15:23:21 +0800 Subject: [PATCH 04/32] create channel and wait for subscription is ready Signed-off-by: jacklu --- e2e/deployers/crud.go | 65 ++++++++++++++++++++++++++++++++++- e2e/deployers/retry.go | 42 ++++++++++++++++++++++ e2e/deployers/subscription.go | 20 +++++++++-- e2e/dractions/retry.go | 6 ++-- e2e/e2e-rdr.sh | 4 +-- e2e/go.mod | 2 +- e2e/util/const.go | 33 +++++++++++++++--- 7 files changed, 159 insertions(+), 13 deletions(-) create mode 100644 e2e/deployers/retry.go diff --git a/e2e/deployers/crud.go b/e2e/deployers/crud.go index e347a2ad1..8491081db 100644 --- a/e2e/deployers/crud.go +++ b/e2e/deployers/crud.go @@ -10,11 +10,14 @@ import ( "github.com/ramendr/ramen/e2e/util" "github.com/ramendr/ramen/e2e/workloads" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ocmv1b1 "open-cluster-management.io/api/cluster/v1beta1" ocmv1b2 "open-cluster-management.io/api/cluster/v1beta2" + channelv1 "open-cluster-management.io/multicloud-operators-channel/pkg/apis/apps/v1" placementrulev1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/placementrule/v1" subscriptionv1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/v1" ) @@ -185,7 +188,7 @@ func createSubscription(s Subscription, w workloads.Workload) error { Annotations: annotations, }, Spec: subscriptionv1.SubscriptionSpec{ - Channel: util.ChannelNamespace + "/" + util.ChannelName, + Channel: util.GetChannelNamespace() + "/" + util.GetChannelName(), Placement: placementRulePlacement, }, } @@ -228,3 +231,63 @@ func deleteSubscription(s Subscription, w workloads.Workload) error { func GetCombinedName(d Deployer, w workloads.Workload) string { return strings.ToLower(d.GetName() + "-" + w.GetAppName()) } + +func createChannel() error { + objChannel := &channelv1.Channel{ + ObjectMeta: metav1.ObjectMeta{ + Name: util.GetChannelName(), + Namespace: util.GetChannelNamespace(), + }, + Spec: channelv1.ChannelSpec{ + Pathname: util.GetChannelPathname(), + Type: channelv1.ChannelTypeGitHub, + }, + } + + err := util.Ctx.Hub.CtrlClient.Create(context.Background(), objChannel) + if err != nil { + if !errors.IsAlreadyExists(err) { + return err + } + + util.Ctx.Log.Info("channel " + util.GetChannelName() + " already Exists") + } + + return nil +} + +// should not delete channel when undeploy, since other tests may also use this channel +// comment out this func to avoid golint issue: unused function +/* +func deleteChannel() error { + channel := &channelv1.Channel{ + ObjectMeta: metav1.ObjectMeta{ + Name: util.GetChannelName(), + Namespace: util.GetChannelNamespace(), + }, + } + + err := util.Ctx.Hub.CtrlClient.Delete(context.Background(), channel) + if err != nil { + if !errors.IsNotFound(err) { + return err + } + + util.Ctx.Log.Info("channel " + util.GetChannelName() + " not found") + } + + return nil +} +*/ + +func getSubscription(ctrlClient client.Client, namespace, name string) (*subscriptionv1.Subscription, error) { + subscription := &subscriptionv1.Subscription{} + key := types.NamespacedName{Name: name, Namespace: namespace} + + err := ctrlClient.Get(context.Background(), key, subscription) + if err != nil { + return nil, err + } + + return subscription, nil +} diff --git a/e2e/deployers/retry.go b/e2e/deployers/retry.go new file mode 100644 index 000000000..6f641f5b5 --- /dev/null +++ b/e2e/deployers/retry.go @@ -0,0 +1,42 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package deployers + +import ( + "fmt" + "time" + + "github.com/ramendr/ramen/e2e/util" +) + +const FiveSecondsDuration = 5 * time.Second + +func waitSubscriptionPhase(namespace, name, phase string) error { + // sleep to wait for subscription is processed + time.Sleep(FiveSecondsDuration) + + startTime := time.Now() + + for { + sub, err := getSubscription(util.Ctx.Hub.CtrlClient, namespace, name) + if err != nil { + return err + } + + currentPhase := string(sub.Status.Phase) + if currentPhase == phase { + util.Ctx.Log.Info("subscription " + name + " phase is " + phase) + + return nil + } + + if time.Since(startTime) > time.Second*time.Duration(util.Timeout) { + return fmt.Errorf(fmt.Sprintf("subscription "+name+" status is not %s yet before timeout of %v", phase, util.Timeout)) + } + + util.Ctx.Log.Info(fmt.Sprintf("current subscription "+name+ + " phase is %s, expecting %s, retry in %v seconds", currentPhase, phase, util.TimeInterval)) + time.Sleep(time.Second * time.Duration(util.TimeInterval)) + } +} diff --git a/e2e/deployers/subscription.go b/e2e/deployers/subscription.go index 682386955..59b50e550 100644 --- a/e2e/deployers/subscription.go +++ b/e2e/deployers/subscription.go @@ -32,8 +32,19 @@ func (s Subscription) Deploy(w workloads.Workload) error { name := GetCombinedName(s, w) namespace := name - // w.Kustomize() - err := createNamespace(namespace) + // create channel namespace + err := createNamespace(util.GetChannelNamespace()) + if err != nil { + return err + } + + err = createChannel() + if err != nil { + return err + } + + // create subscription namespace + err = createNamespace(namespace) if err != nil { return err } @@ -53,6 +64,11 @@ func (s Subscription) Deploy(w workloads.Workload) error { return err } + err = waitSubscriptionPhase(namespace, name, "Propagated") + if err != nil { + return err + } + return nil } diff --git a/e2e/dractions/retry.go b/e2e/dractions/retry.go index da7d075f4..0206b4711 100644 --- a/e2e/dractions/retry.go +++ b/e2e/dractions/retry.go @@ -111,17 +111,17 @@ func waitDRPCPhase(client client.Client, namespace string, name string, phase st currentPhase := string(drpc.Status.Phase) if currentPhase == phase { - util.Ctx.Log.Info("drpc " + name + " phase is " + phase) + util.Ctx.Log.Info("drpc " + name + " phase is " + phase) return nil } if time.Since(startTime) > time.Second*time.Duration(util.Timeout) { // fmt.Printf("drpc "+drpcName+" phase is not %s yet before timeout of %v\n", phase, timeout) - return fmt.Errorf(fmt.Sprintf("drpc "+name+" status is not %s yet before timeout of %v", phase, util.Timeout)) + return fmt.Errorf(fmt.Sprintf("drpc "+name+" status is not %s yet before timeout of %v", phase, util.Timeout)) } - util.Ctx.Log.Info(fmt.Sprintf("current drpc "+name+ + util.Ctx.Log.Info(fmt.Sprintf("current drpc "+name+ " phase is %s, expecting %s, retry in %v seconds", currentPhase, phase, util.TimeInterval)) time.Sleep(time.Second * time.Duration(util.TimeInterval)) } diff --git a/e2e/e2e-rdr.sh b/e2e/e2e-rdr.sh index 482508895..f1f69e9a0 100755 --- a/e2e/e2e-rdr.sh +++ b/e2e/e2e-rdr.sh @@ -5,6 +5,6 @@ echo "Running tests..." -go test -kubeconfig-c1 ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-dr1 -kubeconfig-c2 ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-dr2 -kubeconfig-hub ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-hub -v +go test -kubeconfig-c1 ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-dr1 -kubeconfig-c2 ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-dr2 -kubeconfig-hub ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-hub -v "$@" -exit 0 \ No newline at end of file +exit 0 diff --git a/e2e/go.mod b/e2e/go.mod index 11729ed85..2899bd32a 100644 --- a/e2e/go.mod +++ b/e2e/go.mod @@ -11,6 +11,7 @@ require ( k8s.io/client-go v12.0.0+incompatible k8s.io/kubectl v0.29.3 open-cluster-management.io/api v0.13.0 + open-cluster-management.io/multicloud-operators-channel v0.10.1-0.20230316173315-10f48e51f3aa open-cluster-management.io/multicloud-operators-subscription v0.13.0 sigs.k8s.io/controller-runtime v0.17.3 ) @@ -55,7 +56,6 @@ require ( k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect - open-cluster-management.io/multicloud-operators-channel v0.10.1-0.20230316173315-10f48e51f3aa // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/e2e/util/const.go b/e2e/util/const.go index 8cfd6abe3..d90d6db30 100644 --- a/e2e/util/const.go +++ b/e2e/util/const.go @@ -3,10 +3,35 @@ package util +import "os" + const ( RamenSystemNamespace = "ramen-system" - ChannelName = "ramen-gitops" - ChannelNamespace = "ramen-samples" - Timeout = 600 // seconds - TimeInterval = 30 // seconds + + Timeout = 600 // seconds + TimeInterval = 30 // seconds + + defaultChannelName = "ramen-gitops" + defaultChannelNamespace = "ramen-samples" + defaultChannelPathname = "https://github.com/RamenDR/ocm-ramen-samples.git" ) + +func GetChannelName() string { + return getEnv("ChannelName", defaultChannelName) +} + +func GetChannelNamespace() string { + return getEnv("ChannelNamespace", defaultChannelNamespace) +} + +func GetChannelPathname() string { + return getEnv("ChannelPathname", defaultChannelPathname) +} + +func getEnv(name, defaultValue string) string { + if value := os.Getenv(name); value != "" { + return value + } + + return defaultValue +} From 27273e9bdeca6ae45f2bcd6a7d3397c5beb93b90 Mon Sep 17 00:00:00 2001 From: jacklu Date: Tue, 7 May 2024 15:35:19 +0800 Subject: [PATCH 05/32] adopt the latest e2e.yaml change for e2e workflow Signed-off-by: jacklu --- e2e/deployers/retry.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/e2e/deployers/retry.go b/e2e/deployers/retry.go index 6f641f5b5..d1a8b03d6 100644 --- a/e2e/deployers/retry.go +++ b/e2e/deployers/retry.go @@ -32,7 +32,12 @@ func waitSubscriptionPhase(namespace, name, phase string) error { } if time.Since(startTime) > time.Second*time.Duration(util.Timeout) { - return fmt.Errorf(fmt.Sprintf("subscription "+name+" status is not %s yet before timeout of %v", phase, util.Timeout)) + return fmt.Errorf(fmt.Sprintf("subscription "+name+" status is not %s yet before timeout of %v", + phase, util.Timeout)) + } + + if currentPhase == "" { + currentPhase = "empty" } util.Ctx.Log.Info(fmt.Sprintf("current subscription "+name+ From 722442b0e0d817fc8427bc44b3c2d36c5d77e547 Mon Sep 17 00:00:00 2001 From: jacklu Date: Mon, 13 May 2024 13:36:46 +0800 Subject: [PATCH 06/32] refactor code based on review comments Signed-off-by: jacklu --- e2e/config.yaml | 3 + e2e/deployers/applicationset.go | 4 +- e2e/deployers/crud.go | 91 +------------------------ e2e/deployers/deployer.go | 1 - e2e/deployers/retry.go | 15 ++-- e2e/deployers/subscription.go | 23 ++----- e2e/dractions/actions.go | 18 ----- e2e/dractions/crud.go | 3 + e2e/dractions/retry.go | 16 ++--- e2e/e2e-rdr.sh | 2 +- e2e/exhaustive_suite_test.go | 26 +++---- e2e/go.mod | 2 +- e2e/main_test.go | 15 +++- e2e/util/const.go | 25 +++++-- e2e/util/crud.go | 117 ++++++++++++++++++++++++++++++++ e2e/util/validation.go | 12 ++++ e2e/util/yaml.go | 61 +++++++++++++++++ e2e/validation_suite_test.go | 46 ++++++++++++- e2e/workloads/deployment.go | 18 ++--- e2e/workloads/workload.go | 2 - 20 files changed, 322 insertions(+), 178 deletions(-) create mode 100644 e2e/config.yaml create mode 100644 e2e/util/crud.go create mode 100644 e2e/util/yaml.go diff --git a/e2e/config.yaml b/e2e/config.yaml new file mode 100644 index 000000000..c6b78cfe0 --- /dev/null +++ b/e2e/config.yaml @@ -0,0 +1,3 @@ +channelname: "ramen-gitops" +channelnamespace: "ramen-samples" +giturl: "https://github.com/RamenDR/ocm-ramen-samples.git" diff --git a/e2e/deployers/applicationset.go b/e2e/deployers/applicationset.go index daa205c2e..9abd3d29a 100644 --- a/e2e/deployers/applicationset.go +++ b/e2e/deployers/applicationset.go @@ -10,8 +10,8 @@ import ( type ApplicationSet struct{} -func (a *ApplicationSet) Init() { -} +// func (a *ApplicationSet) Init() { +// } func (a ApplicationSet) Deploy(w workloads.Workload) error { util.Ctx.Log.Info("enter Deploy " + w.GetName() + "/Appset") diff --git a/e2e/deployers/crud.go b/e2e/deployers/crud.go index 8491081db..0e66212a4 100644 --- a/e2e/deployers/crud.go +++ b/e2e/deployers/crud.go @@ -17,7 +17,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ocmv1b1 "open-cluster-management.io/api/cluster/v1beta1" ocmv1b2 "open-cluster-management.io/api/cluster/v1beta2" - channelv1 "open-cluster-management.io/multicloud-operators-channel/pkg/apis/apps/v1" placementrulev1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/placementrule/v1" subscriptionv1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/v1" ) @@ -27,44 +26,6 @@ const ( ClusterSetName = "default" ) -func createNamespace(namespace string) error { - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - }, - } - - err := util.Ctx.Hub.CtrlClient.Create(context.Background(), ns) - if err != nil { - if !errors.IsAlreadyExists(err) { - return err - } - - util.Ctx.Log.Info("namespace " + namespace + " already Exists") - } - - return nil -} - -func deleteNamespace(namespace string) error { - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - }, - } - - err := util.Ctx.Hub.CtrlClient.Delete(context.Background(), ns) - if err != nil { - if !errors.IsNotFound(err) { - return err - } - - util.Ctx.Log.Info("namespace " + namespace + " not found") - } - - return nil -} - func createManagedClusterSetBinding(name, namespace string) error { labels := make(map[string]string) labels[AppLabelKey] = namespace @@ -84,8 +45,6 @@ func createManagedClusterSetBinding(name, namespace string) error { if !errors.IsAlreadyExists(err) { return err } - - util.Ctx.Log.Info("managedClusterSetBinding " + mcsb.Name + " already Exists") } return nil @@ -229,56 +188,8 @@ func deleteSubscription(s Subscription, w workloads.Workload) error { } func GetCombinedName(d Deployer, w workloads.Workload) string { - return strings.ToLower(d.GetName() + "-" + w.GetAppName()) -} - -func createChannel() error { - objChannel := &channelv1.Channel{ - ObjectMeta: metav1.ObjectMeta{ - Name: util.GetChannelName(), - Namespace: util.GetChannelNamespace(), - }, - Spec: channelv1.ChannelSpec{ - Pathname: util.GetChannelPathname(), - Type: channelv1.ChannelTypeGitHub, - }, - } - - err := util.Ctx.Hub.CtrlClient.Create(context.Background(), objChannel) - if err != nil { - if !errors.IsAlreadyExists(err) { - return err - } - - util.Ctx.Log.Info("channel " + util.GetChannelName() + " already Exists") - } - - return nil -} - -// should not delete channel when undeploy, since other tests may also use this channel -// comment out this func to avoid golint issue: unused function -/* -func deleteChannel() error { - channel := &channelv1.Channel{ - ObjectMeta: metav1.ObjectMeta{ - Name: util.GetChannelName(), - Namespace: util.GetChannelNamespace(), - }, - } - - err := util.Ctx.Hub.CtrlClient.Delete(context.Background(), channel) - if err != nil { - if !errors.IsNotFound(err) { - return err - } - - util.Ctx.Log.Info("channel " + util.GetChannelName() + " not found") - } - - return nil + return strings.ToLower(d.GetName() + "-" + w.GetName() + "-" + w.GetAppName()) } -*/ func getSubscription(ctrlClient client.Client, namespace, name string) (*subscriptionv1.Subscription, error) { subscription := &subscriptionv1.Subscription{} diff --git a/e2e/deployers/deployer.go b/e2e/deployers/deployer.go index 0ad4181b6..5b246ebd7 100644 --- a/e2e/deployers/deployer.go +++ b/e2e/deployers/deployer.go @@ -7,7 +7,6 @@ import "github.com/ramendr/ramen/e2e/workloads" // Deployer interface has methods to deploy a workload to a cluster type Deployer interface { - Init() Deploy(workloads.Workload) error Undeploy(workloads.Workload) error // Scale(Workload) for adding/removing PVCs; in Deployer even though scaling is a Workload interface diff --git a/e2e/deployers/retry.go b/e2e/deployers/retry.go index d1a8b03d6..8999db124 100644 --- a/e2e/deployers/retry.go +++ b/e2e/deployers/retry.go @@ -8,11 +8,12 @@ import ( "time" "github.com/ramendr/ramen/e2e/util" + subscriptionv1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/v1" ) const FiveSecondsDuration = 5 * time.Second -func waitSubscriptionPhase(namespace, name, phase string) error { +func waitSubscriptionPhase(namespace, name string, phase subscriptionv1.SubscriptionPhase) error { // sleep to wait for subscription is processed time.Sleep(FiveSecondsDuration) @@ -24,24 +25,24 @@ func waitSubscriptionPhase(namespace, name, phase string) error { return err } - currentPhase := string(sub.Status.Phase) + currentPhase := sub.Status.Phase if currentPhase == phase { - util.Ctx.Log.Info("subscription " + name + " phase is " + phase) + util.Ctx.Log.Info(fmt.Sprintf("subscription %s phase is %s", name, phase)) return nil } if time.Since(startTime) > time.Second*time.Duration(util.Timeout) { - return fmt.Errorf(fmt.Sprintf("subscription "+name+" status is not %s yet before timeout of %v", - phase, util.Timeout)) + return fmt.Errorf(fmt.Sprintf("subscription %s status is not %s yet before timeout of %v", + name, phase, util.Timeout)) } if currentPhase == "" { currentPhase = "empty" } - util.Ctx.Log.Info(fmt.Sprintf("current subscription "+name+ - " phase is %s, expecting %s, retry in %v seconds", currentPhase, phase, util.TimeInterval)) + util.Ctx.Log.Info(fmt.Sprintf("current subscription %s phase is %s, expecting %s, retry in %v seconds", + name, currentPhase, phase, util.TimeInterval)) time.Sleep(time.Second * time.Duration(util.TimeInterval)) } } diff --git a/e2e/deployers/subscription.go b/e2e/deployers/subscription.go index 59b50e550..916dfcdc9 100644 --- a/e2e/deployers/subscription.go +++ b/e2e/deployers/subscription.go @@ -6,6 +6,7 @@ package deployers import ( "github.com/ramendr/ramen/e2e/util" "github.com/ramendr/ramen/e2e/workloads" + subscriptionv1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/v1" ) // mcsb name must be same as the target ManagedClusterSet @@ -13,9 +14,6 @@ const McsbName = ClusterSetName type Subscription struct{} -func (s Subscription) Init() { -} - func (s Subscription) GetName() string { return "Subscription" } @@ -32,19 +30,8 @@ func (s Subscription) Deploy(w workloads.Workload) error { name := GetCombinedName(s, w) namespace := name - // create channel namespace - err := createNamespace(util.GetChannelNamespace()) - if err != nil { - return err - } - - err = createChannel() - if err != nil { - return err - } - // create subscription namespace - err = createNamespace(namespace) + err := util.CreateNamespace(util.Ctx.Hub.CtrlClient, namespace) if err != nil { return err } @@ -64,7 +51,7 @@ func (s Subscription) Deploy(w workloads.Workload) error { return err } - err = waitSubscriptionPhase(namespace, name, "Propagated") + err = waitSubscriptionPhase(namespace, name, subscriptionv1.SubscriptionPropagated) if err != nil { return err } @@ -74,7 +61,7 @@ func (s Subscription) Deploy(w workloads.Workload) error { func (s Subscription) Undeploy(w workloads.Workload) error { // Delete Subscription, Placement, Binding - util.Ctx.Log.Info("enter Undeploy " + w.GetName() + "/Subscription") + util.Ctx.Log.Info("enter Undeploy " + w.GetName() + s.GetName()) name := GetCombinedName(s, w) namespace := name @@ -94,7 +81,7 @@ func (s Subscription) Undeploy(w workloads.Workload) error { return err } - err = deleteNamespace(namespace) + err = util.DeleteNamespace(util.Ctx.Hub.CtrlClient, namespace) if err != nil { return err } diff --git a/e2e/dractions/actions.go b/e2e/dractions/actions.go index ea06b8a7b..dc0ad7a77 100644 --- a/e2e/dractions/actions.go +++ b/e2e/dractions/actions.go @@ -4,7 +4,6 @@ package dractions import ( - "fmt" "time" "github.com/ramendr/ramen/e2e/deployers" @@ -27,23 +26,13 @@ const ( func EnableProtection(w workloads.Workload, d deployers.Deployer) error { util.Ctx.Log.Info("enter EnableProtection " + w.GetName() + "/" + d.GetName()) - _, isSub := d.(*deployers.Subscription) - _, isAppSet := d.(*deployers.ApplicationSet) - - if !isSub && !isAppSet { - return fmt.Errorf("deployer unknown") - } - name := GetCombinedName(d, w) namespace := name - // if isAppSet { // namespace = util.ArgocdNamespace // } - drPolicyName := DefaultDRPolicyName appname := w.GetAppName() - placementName := name drpcName := name @@ -92,13 +81,6 @@ func EnableProtection(w workloads.Workload, d deployers.Deployer) error { func DisableProtection(w workloads.Workload, d deployers.Deployer) error { util.Ctx.Log.Info("enter DRActions DisableProtection") - _, isSub := d.(*deployers.Subscription) - _, isAppSet := d.(*deployers.ApplicationSet) - - if !isSub && !isAppSet { - return fmt.Errorf("deployers.Deployer not known") - } - name := GetCombinedName(d, w) namespace := name placementName := name diff --git a/e2e/dractions/crud.go b/e2e/dractions/crud.go index f6ff30abe..5b289a36e 100644 --- a/e2e/dractions/crud.go +++ b/e2e/dractions/crud.go @@ -1,3 +1,6 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + package dractions import ( diff --git a/e2e/dractions/retry.go b/e2e/dractions/retry.go index 0206b4711..a9941b6e7 100644 --- a/e2e/dractions/retry.go +++ b/e2e/dractions/retry.go @@ -1,3 +1,6 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + package dractions import ( @@ -63,10 +66,10 @@ func waitDRPCReady(client client.Client, namespace string, drpcName string) erro } if time.Since(startTime) > time.Second*time.Duration(util.Timeout) { - return fmt.Errorf(fmt.Sprintf("drpc "+drpcName+" is not ready yet before timeout of %v", util.Timeout)) + return fmt.Errorf(fmt.Sprintf("drpc %s is not ready yet before timeout of %v", drpcName, util.Timeout)) } - util.Ctx.Log.Info(fmt.Sprintf("drpc "+drpcName+" is not ready yet, retry in %v seconds", util.TimeInterval)) + util.Ctx.Log.Info(fmt.Sprintf("drpc %s is not ready yet, retry in %v seconds", drpcName, util.TimeInterval)) time.Sleep(time.Second * time.Duration(util.TimeInterval)) } } @@ -117,12 +120,11 @@ func waitDRPCPhase(client client.Client, namespace string, name string, phase st } if time.Since(startTime) > time.Second*time.Duration(util.Timeout) { - // fmt.Printf("drpc "+drpcName+" phase is not %s yet before timeout of %v\n", phase, timeout) - return fmt.Errorf(fmt.Sprintf("drpc "+name+" status is not %s yet before timeout of %v", phase, util.Timeout)) + return fmt.Errorf(fmt.Sprintf("drpc %s status is not %s yet before timeout of %v", name, phase, util.Timeout)) } - util.Ctx.Log.Info(fmt.Sprintf("current drpc "+name+ - " phase is %s, expecting %s, retry in %v seconds", currentPhase, phase, util.TimeInterval)) + util.Ctx.Log.Info(fmt.Sprintf("current drpc %s phase is %s, expecting %s, retry in %v seconds", + name, currentPhase, phase, util.TimeInterval)) time.Sleep(time.Second * time.Duration(util.TimeInterval)) } } @@ -133,8 +135,6 @@ func getCurrentCluster(client client.Client, namespace string, placementName str return "", err } - util.Ctx.Log.Info("get placementdecision " + placementDecisionName) - placementDecision, err := getPlacementDecision(client, namespace, placementDecisionName) if err != nil { return "", err diff --git a/e2e/e2e-rdr.sh b/e2e/e2e-rdr.sh index f1f69e9a0..7d2320204 100755 --- a/e2e/e2e-rdr.sh +++ b/e2e/e2e-rdr.sh @@ -5,6 +5,6 @@ echo "Running tests..." -go test -kubeconfig-c1 ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-dr1 -kubeconfig-c2 ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-dr2 -kubeconfig-hub ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-hub -v "$@" +go test -kubeconfig-c1 ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-dr1 -kubeconfig-c2 ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-dr2 -kubeconfig-hub ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-hub -timeout 0 -v "$@" exit 0 diff --git a/e2e/exhaustive_suite_test.go b/e2e/exhaustive_suite_test.go index 381ed2deb..8c2763d9a 100644 --- a/e2e/exhaustive_suite_test.go +++ b/e2e/exhaustive_suite_test.go @@ -15,23 +15,25 @@ import ( // Workloads = {"Deployment", "STS", "DaemonSet"} // Classes = {"rbd", "cephfs"} -func Exhaustive(t *testing.T) { - t.Helper() - t.Parallel() +var deployment = &workloads.Deployment{ + Path: "workloads/deployment/k8s-regional-rbd", + Revision: "main", + AppName: "busybox", + Name: "Deployment", +} - deployment := &workloads.Deployment{} - deployment.Init() +var Workloads = []workloads.Workload{deployment} - Workloads := []workloads.Workload{deployment} +var subscription = &deployers.Subscription{} - subscription := &deployers.Subscription{} - subscription.Init() +// appset := &deployers.ApplicationSet{} +// Deployers := []deployers.Deployer{subscription, appset} - appset := &deployers.ApplicationSet{} - appset.Init() +var Deployers = []deployers.Deployer{subscription} - // Deployers := []deployers.Deployer{subscription, appset} - Deployers := []deployers.Deployer{subscription} +func Exhaustive(t *testing.T) { + t.Helper() + t.Parallel() for _, workload := range Workloads { for _, deployer := range Deployers { diff --git a/e2e/go.mod b/e2e/go.mod index 2899bd32a..b576d328d 100644 --- a/e2e/go.mod +++ b/e2e/go.mod @@ -6,6 +6,7 @@ require ( github.com/go-logr/logr v1.4.1 github.com/ramendr/ramen/api v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.27.0 + gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.29.3 k8s.io/apimachinery v0.29.3 k8s.io/client-go v12.0.0+incompatible @@ -50,7 +51,6 @@ require ( google.golang.org/appengine v1.6.8 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/component-base v0.29.3 // indirect k8s.io/klog/v2 v2.110.1 // indirect diff --git a/e2e/main_test.go b/e2e/main_test.go index 41296a892..4cb2263cc 100644 --- a/e2e/main_test.go +++ b/e2e/main_test.go @@ -49,14 +49,27 @@ type testDef struct { } var Suites = []testDef{ - {"Validate", Validate}, {"Exhaustive", Exhaustive}, } func TestSuites(t *testing.T) { util.Ctx.Log.Info(t.Name()) + if err := util.EnsureChannel(); err != nil { + t.Fatalf("failed to ensure channel: %v", err) + } + + if !t.Run("Validate", Validate) { + t.Fatal("failed to validate the test suite") + } + for _, suite := range Suites { t.Run(suite.name, suite.test) } + + t.Cleanup(func() { + if err := util.EnsureChannelDeleted(); err != nil { + t.Fatalf("failed to ensure channel deleted: %v", err) + } + }) } diff --git a/e2e/util/const.go b/e2e/util/const.go index d90d6db30..4ff22642a 100644 --- a/e2e/util/const.go +++ b/e2e/util/const.go @@ -3,7 +3,9 @@ package util -import "os" +import ( + "os" +) const ( RamenSystemNamespace = "ramen-system" @@ -13,19 +15,34 @@ const ( defaultChannelName = "ramen-gitops" defaultChannelNamespace = "ramen-samples" - defaultChannelPathname = "https://github.com/RamenDR/ocm-ramen-samples.git" + defaultGitURL = "https://github.com/RamenDR/ocm-ramen-samples.git" ) func GetChannelName() string { + channelName, err := GetChannelNameFromYaml() + if err == nil && channelName != "" { + return channelName + } + return getEnv("ChannelName", defaultChannelName) } func GetChannelNamespace() string { + channelNamespace, err := GetChannelNamespaceFromYaml() + if err == nil && channelNamespace != "" { + return channelNamespace + } + return getEnv("ChannelNamespace", defaultChannelNamespace) } -func GetChannelPathname() string { - return getEnv("ChannelPathname", defaultChannelPathname) +func GetGitURL() string { + gitURL, err := GetGitURLFromYaml() + if err == nil && gitURL != "" { + return gitURL + } + + return getEnv("GitURL", defaultGitURL) } func getEnv(name, defaultValue string) string { diff --git a/e2e/util/crud.go b/e2e/util/crud.go new file mode 100644 index 000000000..650bce856 --- /dev/null +++ b/e2e/util/crud.go @@ -0,0 +1,117 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package util + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + channelv1 "open-cluster-management.io/multicloud-operators-channel/pkg/apis/apps/v1" +) + +func CreateNamespace(client client.Client, namespace string) error { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + + err := client.Create(context.Background(), ns) + if err != nil { + if !errors.IsAlreadyExists(err) { + return err + } + } + + return nil +} + +func DeleteNamespace(client client.Client, namespace string) error { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + + err := client.Delete(context.Background(), ns) + if err != nil { + if !errors.IsNotFound(err) { + return err + } + + Ctx.Log.Info("namespace " + namespace + " not found") + } + + return nil +} + +func createChannel() error { + objChannel := &channelv1.Channel{ + ObjectMeta: metav1.ObjectMeta{ + Name: GetChannelName(), + Namespace: GetChannelNamespace(), + }, + Spec: channelv1.ChannelSpec{ + Pathname: GetGitURL(), + Type: channelv1.ChannelTypeGitHub, + }, + } + + err := Ctx.Hub.CtrlClient.Create(context.Background(), objChannel) + if err != nil { + if !errors.IsAlreadyExists(err) { + return err + } + + Ctx.Log.Info("channel " + GetChannelName() + " already exists") + } else { + Ctx.Log.Info("channel " + GetChannelName() + " is created") + } + + return nil +} + +func deleteChannel() error { + channel := &channelv1.Channel{ + ObjectMeta: metav1.ObjectMeta{ + Name: GetChannelName(), + Namespace: GetChannelNamespace(), + }, + } + + err := Ctx.Hub.CtrlClient.Delete(context.Background(), channel) + if err != nil { + if !errors.IsNotFound(err) { + return err + } + + Ctx.Log.Info("channel " + GetChannelName() + " not found") + } else { + Ctx.Log.Info("channel " + GetChannelName() + " is deleted") + } + + return nil +} + +func EnsureChannel() error { + // create channel namespace + err := CreateNamespace(Ctx.Hub.CtrlClient, GetChannelNamespace()) + if err != nil { + return err + } + + return createChannel() +} + +func EnsureChannelDeleted() error { + if err := deleteChannel(); err != nil { + return err + } + + return DeleteNamespace(Ctx.Hub.CtrlClient, GetChannelNamespace()) +} diff --git a/e2e/util/validation.go b/e2e/util/validation.go index 11b4f5e33..3c9be576b 100644 --- a/e2e/util/validation.go +++ b/e2e/util/validation.go @@ -24,6 +24,18 @@ func CheckRamenHubPodRunningStatus(k8sClient *kubernetes.Clientset) (bool, strin return CheckPodRunningStatus(k8sClient, ramenNameSpace, labelSelector, podIdentifier) } +func CheckRamenSpokePodRunningStatus(k8sClient *kubernetes.Clientset) (bool, string, error) { + labelSelector := "app=ramen-dr-cluster" + podIdentifier := "ramen-dr-cluster-operator" + + ramenNameSpace, err := GetRamenNameSpace(k8sClient) + if err != nil { + return false, "", err + } + + return CheckPodRunningStatus(k8sClient, ramenNameSpace, labelSelector, podIdentifier) +} + func GetRamenNameSpace(k8sClient *kubernetes.Clientset) (string, error) { isOpenShift, err := IsOpenShiftCluster(k8sClient) if err != nil { diff --git a/e2e/util/yaml.go b/e2e/util/yaml.go new file mode 100644 index 000000000..21de96cd6 --- /dev/null +++ b/e2e/util/yaml.go @@ -0,0 +1,61 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package util + +import ( + "os" + + "gopkg.in/yaml.v2" +) + +type yamlConf struct { + ChannelName string `yaml:"channelname,omitempty"` + ChannelNamespace string `yaml:"channelnamespace,omitempty"` + GitURL string `yaml:"giturl,omitempty"` +} + +const configFile = "config.yaml" + +func getConf() (yamlConf, error) { + cnf := &yamlConf{} + + data, err := os.ReadFile(configFile) + if err != nil { + return *cnf, err + } + + err = yaml.Unmarshal(data, cnf) + if err != nil { + return *cnf, err + } + + return *cnf, nil +} + +func GetChannelNameFromYaml() (string, error) { + cnf, err := getConf() + if err != nil { + return "", err + } + + return cnf.ChannelName, nil +} + +func GetChannelNamespaceFromYaml() (string, error) { + cnf, err := getConf() + if err != nil { + return "", err + } + + return cnf.ChannelNamespace, nil +} + +func GetGitURLFromYaml() (string, error) { + cnf, err := getConf() + if err != nil { + return "", err + } + + return cnf.GitURL, nil +} diff --git a/e2e/validation_suite_test.go b/e2e/validation_suite_test.go index 94bad7adc..4ca531e4e 100644 --- a/e2e/validation_suite_test.go +++ b/e2e/validation_suite_test.go @@ -5,15 +5,59 @@ package e2e_test import ( "testing" + + "github.com/ramendr/ramen/e2e/util" ) func Validate(t *testing.T) { t.Helper() if !t.Run("CheckRamenHubOperatorStatus", CheckRamenHubOperatorStatus) { - t.Error() + t.Error("CheckRamenHubOperatorStatus failed") + } + + if !t.Run("CheckRamenSpokeOperatorStatus", CheckRamenSpokeOperatorStatus) { + t.Error("CheckRamenHubOperatorStatus failed") } } func CheckRamenHubOperatorStatus(t *testing.T) { + util.Ctx.Log.Info("enter CheckRamenHubOperatorStatus") + + isRunning, podName, err := util.CheckRamenHubPodRunningStatus(util.Ctx.Hub.K8sClientSet) + if err != nil { + t.Error(err) + } + + if isRunning { + util.Ctx.Log.Info("Ramen Hub Operator is running", "pod", podName) + } else { + t.Error("no running Ramen Hub Operator pod found") + } +} + +func CheckRamenSpokeOperatorStatus(t *testing.T) { + util.Ctx.Log.Info("enter CheckRamenSpokeOperatorStatus") + + isRunning, podName, err := util.CheckRamenSpokePodRunningStatus(util.Ctx.C1.K8sClientSet) + if err != nil { + t.Error(err) + } + + if isRunning { + util.Ctx.Log.Info("Ramen Spoke Operator is running on cluster 1", "pod", podName) + } else { + t.Error("no running Ramen Spoke Operator pod on cluster 1") + } + + isRunning, podName, err = util.CheckRamenSpokePodRunningStatus(util.Ctx.C2.K8sClientSet) + if err != nil { + t.Error(err) + } + + if isRunning { + util.Ctx.Log.Info("Ramen Spoke Operator is running on cluster 2", "pod", podName) + } else { + t.Error("no running Ramen Spoke Operator pod on cluster 2") + } } diff --git a/e2e/workloads/deployment.go b/e2e/workloads/deployment.go index 92706a0ef..6f1a81238 100644 --- a/e2e/workloads/deployment.go +++ b/e2e/workloads/deployment.go @@ -4,17 +4,11 @@ package workloads type Deployment struct { - RepoURL string + // RepoURL string Path string Revision string AppName string -} - -func (w *Deployment) Init() { - w.RepoURL = "https://github.com/ramendr/ocm-ramen-samples.git" - w.Path = "workloads/deployment/k8s-regional-rbd" - w.Revision = "main" - w.AppName = "busybox" + Name string } func (w Deployment) GetAppName() string { @@ -22,12 +16,12 @@ func (w Deployment) GetAppName() string { } func (w Deployment) GetName() string { - return "Deployment" + return w.Name } -func (w Deployment) GetRepoURL() string { - return w.RepoURL -} +// func (w Deployment) GetRepoURL() string { +// return w.RepoURL +// } func (w Deployment) GetPath() string { return w.Path diff --git a/e2e/workloads/workload.go b/e2e/workloads/workload.go index abdc61b44..5a4f8dcc2 100644 --- a/e2e/workloads/workload.go +++ b/e2e/workloads/workload.go @@ -13,6 +13,4 @@ type Workload interface { // GetRepoURL() string // Possibly all this is part of Workload than each implementation of the interfaces? GetPath() string GetRevision() string - - Init() } From 97c6e61ee0abe024965178bb29df1cbcfb0a47d0 Mon Sep 17 00:00:00 2001 From: jacklu Date: Wed, 15 May 2024 19:26:16 +0800 Subject: [PATCH 07/32] fix lint issue Signed-off-by: jacklu --- e2e/config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/e2e/config.yaml b/e2e/config.yaml index c6b78cfe0..8d41d6138 100644 --- a/e2e/config.yaml +++ b/e2e/config.yaml @@ -1,3 +1,4 @@ +--- channelname: "ramen-gitops" channelnamespace: "ramen-samples" giturl: "https://github.com/RamenDR/ocm-ramen-samples.git" From 54f59d5c80a880ee596c4a2736a4108fc4f8a2ca Mon Sep 17 00:00:00 2001 From: jacklu Date: Thu, 16 May 2024 15:49:21 +0800 Subject: [PATCH 08/32] use viper lib to read config in env and yaml Signed-off-by: jacklu --- e2e/go.mod | 20 ++++++++++++--- e2e/go.sum | 44 +++++++++++++++++++++++++++------ e2e/main_test.go | 4 +++ e2e/util/config.go | 60 +++++++++++++++++++++++++++++++++++++++++++++ e2e/util/const.go | 39 ----------------------------- e2e/util/yaml.go | 61 ---------------------------------------------- 6 files changed, 117 insertions(+), 111 deletions(-) create mode 100644 e2e/util/config.go delete mode 100644 e2e/util/yaml.go diff --git a/e2e/go.mod b/e2e/go.mod index b576d328d..6b1efbea2 100644 --- a/e2e/go.mod +++ b/e2e/go.mod @@ -5,6 +5,7 @@ go 1.21 require ( github.com/go-logr/logr v1.4.1 github.com/ramendr/ramen/api v0.0.0-00010101000000-000000000000 + github.com/spf13/viper v1.18.2 go.uber.org/zap v1.27.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.29.3 @@ -18,9 +19,10 @@ require ( ) require ( - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch/v5 v5.8.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -31,26 +33,38 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.5.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect github.com/imdario/mergo v0.3.13 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stolostron/multicloud-operators-placementrule v1.2.4-1-20220311-8eedb3f.0.20230828200208-cd3c119a7fa0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/net v0.20.0 // indirect - golang.org/x/oauth2 v0.13.0 // indirect + golang.org/x/oauth2 v0.15.0 // indirect golang.org/x/sys v0.16.0 // indirect golang.org/x/term v0.16.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.3.0 // indirect + golang.org/x/time v0.5.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/component-base v0.29.3 // indirect k8s.io/klog/v2 v2.110.1 // indirect diff --git a/e2e/go.sum b/e2e/go.sum index 14278a39d..783d7dd16 100644 --- a/e2e/go.sum +++ b/e2e/go.sum @@ -4,14 +4,17 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= @@ -50,6 +53,8 @@ github.com/google/pprof v0.0.0-20230510103437-eeec1cb781c3 h1:2XF1Vzq06X+inNqgJ9 github.com/google/pprof v0.0.0-20230510103437-eeec1cb781c3/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -65,11 +70,15 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -82,10 +91,13 @@ github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= @@ -96,8 +108,20 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/stolostron/multicloud-operators-placementrule v1.2.4-1-20220311-8eedb3f.0.20230828200208-cd3c119a7fa0 h1:qL6eeBtdjLq7ktBBg8tB44b6jTKQjFy6bdl8EM+Kq6o= github.com/stolostron/multicloud-operators-placementrule v1.2.4-1-20220311-8eedb3f.0.20230828200208-cd3c119a7fa0/go.mod h1:uMTaz9cMLe5N+yJ/PpHPtSOdlBFB00WdxAW+K5TfkVw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -109,6 +133,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= @@ -122,8 +148,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -135,8 +161,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= -golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= +golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= +golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -160,8 +186,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -186,6 +212,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= diff --git a/e2e/main_test.go b/e2e/main_test.go index 4cb2263cc..1a0540e24 100644 --- a/e2e/main_test.go +++ b/e2e/main_test.go @@ -40,6 +40,10 @@ func TestMain(m *testing.M) { panic(err) } + if err := util.ReadConfig(); err != nil { + panic(err) + } + os.Exit(m.Run()) } diff --git a/e2e/util/config.go b/e2e/util/config.go new file mode 100644 index 000000000..c31c03e44 --- /dev/null +++ b/e2e/util/config.go @@ -0,0 +1,60 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package util + +import ( + "fmt" + + "github.com/spf13/viper" +) + +type TestConfig struct { + ChannelName string + ChannelNamespace string + GitURL string +} + +var config = &TestConfig{} + +func ReadConfig() error { + viper.SetDefault("ChannelName", defaultChannelName) + viper.SetDefault("ChannelNamespace", defaultChannelNamespace) + viper.SetDefault("GitURL", defaultGitURL) + + if err := viper.BindEnv("ChannelName", "ChannelName"); err != nil { + return (err) + } + + if err := viper.BindEnv("ChannelNamespace", "ChannelNamespace"); err != nil { + return (err) + } + + if err := viper.BindEnv("GitURL", "GitURL"); err != nil { + return (err) + } + + viper.SetConfigFile("config.yaml") + + if err := viper.ReadInConfig(); err != nil { + return fmt.Errorf("failed to read config: %v", err) + } + + if err := viper.Unmarshal(config); err != nil { + return fmt.Errorf("failed to unmarshal config: %v", err) + } + + return nil +} + +func GetChannelName() string { + return config.ChannelName +} + +func GetChannelNamespace() string { + return config.ChannelNamespace +} + +func GetGitURL() string { + return config.GitURL +} diff --git a/e2e/util/const.go b/e2e/util/const.go index 4ff22642a..37cc00871 100644 --- a/e2e/util/const.go +++ b/e2e/util/const.go @@ -3,10 +3,6 @@ package util -import ( - "os" -) - const ( RamenSystemNamespace = "ramen-system" @@ -17,38 +13,3 @@ const ( defaultChannelNamespace = "ramen-samples" defaultGitURL = "https://github.com/RamenDR/ocm-ramen-samples.git" ) - -func GetChannelName() string { - channelName, err := GetChannelNameFromYaml() - if err == nil && channelName != "" { - return channelName - } - - return getEnv("ChannelName", defaultChannelName) -} - -func GetChannelNamespace() string { - channelNamespace, err := GetChannelNamespaceFromYaml() - if err == nil && channelNamespace != "" { - return channelNamespace - } - - return getEnv("ChannelNamespace", defaultChannelNamespace) -} - -func GetGitURL() string { - gitURL, err := GetGitURLFromYaml() - if err == nil && gitURL != "" { - return gitURL - } - - return getEnv("GitURL", defaultGitURL) -} - -func getEnv(name, defaultValue string) string { - if value := os.Getenv(name); value != "" { - return value - } - - return defaultValue -} diff --git a/e2e/util/yaml.go b/e2e/util/yaml.go deleted file mode 100644 index 21de96cd6..000000000 --- a/e2e/util/yaml.go +++ /dev/null @@ -1,61 +0,0 @@ -// SPDX-FileCopyrightText: The RamenDR authors -// SPDX-License-Identifier: Apache-2.0 - -package util - -import ( - "os" - - "gopkg.in/yaml.v2" -) - -type yamlConf struct { - ChannelName string `yaml:"channelname,omitempty"` - ChannelNamespace string `yaml:"channelnamespace,omitempty"` - GitURL string `yaml:"giturl,omitempty"` -} - -const configFile = "config.yaml" - -func getConf() (yamlConf, error) { - cnf := &yamlConf{} - - data, err := os.ReadFile(configFile) - if err != nil { - return *cnf, err - } - - err = yaml.Unmarshal(data, cnf) - if err != nil { - return *cnf, err - } - - return *cnf, nil -} - -func GetChannelNameFromYaml() (string, error) { - cnf, err := getConf() - if err != nil { - return "", err - } - - return cnf.ChannelName, nil -} - -func GetChannelNamespaceFromYaml() (string, error) { - cnf, err := getConf() - if err != nil { - return "", err - } - - return cnf.ChannelNamespace, nil -} - -func GetGitURLFromYaml() (string, error) { - cnf, err := getConf() - if err != nil { - return "", err - } - - return cnf.GitURL, nil -} From 24b60ac2167b7ac16ae162ad35ee6d7f6c468e90 Mon Sep 17 00:00:00 2001 From: jacklu Date: Fri, 17 May 2024 22:14:15 +0800 Subject: [PATCH 09/32] wait DRPC to be deleted Signed-off-by: jacklu --- e2e/dractions/actions.go | 4 ++++ e2e/dractions/retry.go | 25 +++++++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/e2e/dractions/actions.go b/e2e/dractions/actions.go index dc0ad7a77..f2725f4d3 100644 --- a/e2e/dractions/actions.go +++ b/e2e/dractions/actions.go @@ -97,6 +97,10 @@ func DisableProtection(w workloads.Workload, d deployers.Deployer) error { return err } + if err := waitDRPCDeleted(client, namespace, drpcName); err != nil { + return err + } + util.Ctx.Log.Info("get placement " + placementName) placement, err := getPlacement(client, namespace, placementName) diff --git a/e2e/dractions/retry.go b/e2e/dractions/retry.go index a9941b6e7..efd88cc0b 100644 --- a/e2e/dractions/retry.go +++ b/e2e/dractions/retry.go @@ -9,6 +9,7 @@ import ( ramen "github.com/ramendr/ramen/api/v1alpha1" "github.com/ramendr/ramen/e2e/util" + "k8s.io/apimachinery/pkg/api/errors" "open-cluster-management.io/api/cluster/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -173,3 +174,27 @@ func waitDRPC(client client.Client, namespace, name, expectedPhase string) error // then check Conditions return waitDRPCReady(client, namespace, name) } + +func waitDRPCDeleted(client client.Client, namespace string, name string) error { + startTime := time.Now() + + for { + _, err := getDRPC(client, namespace, name) + if err != nil { + if errors.IsNotFound(err) { + util.Ctx.Log.Info("drpc " + name + " is deleted") + + return nil + } + + util.Ctx.Log.Info(fmt.Sprintf("error to get drpc %s: %v", name, err)) + } + + if time.Since(startTime) > time.Second*time.Duration(util.Timeout) { + return fmt.Errorf(fmt.Sprintf("drpc %s is not deleted yet before timeout of %v", name, util.Timeout)) + } + + util.Ctx.Log.Info(fmt.Sprintf("drpc %s is not deleted yet, retry in %v seconds", name, util.TimeInterval)) + time.Sleep(time.Second * time.Duration(util.TimeInterval)) + } +} From d9816876bde7b0ca342652251453382a73ec7ca0 Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Sat, 18 May 2024 00:14:47 +0300 Subject: [PATCH 10/32] Skip unneeded setup based on minikube version Minikube v1.33.1 includes the fixes we added recently for v1.33.0, so we don't need to setup or clean up anything. We can remove the code and require users and developer to upgrade to latest version, but it is nicer to make this transparent and skip the unneeded configuration. We can remove the special fixes for minikube 1.33.0 later maybe when 1.34 will be released. Example run with minikube 1.33.1: $ drenv setup -v 2024-05-18 00:19:54,127 INFO [main] Setting up minikube for drenv 2024-05-18 00:19:54,152 DEBUG [minikube] Using minikube version 1.33.1 2024-05-18 00:19:54,152 DEBUG [minikube] Skipping sysctl configuration 2024-05-18 00:19:54,153 DEBUG [minikube] Skipping systemd-resolved configuration Signed-off-by: Nir Soffer --- test/drenv/minikube.py | 31 +++++++++++++++++++++++++++---- test/setup.py | 1 + 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/test/drenv/minikube.py b/test/drenv/minikube.py index e7ac6349d..ad94c4afc 100644 --- a/test/drenv/minikube.py +++ b/test/drenv/minikube.py @@ -2,9 +2,12 @@ # SPDX-License-Identifier: Apache-2.0 import errno +import json import logging import os +from packaging.version import Version + from . import commands EXTRA_CONFIG = [ @@ -113,8 +116,10 @@ def setup_files(): To load the configuration you must call load_files() after a cluster is created. """ - _setup_sysctl() - _setup_systemd_resolved() + version = _version() + logging.debug("[minikube] Using minikube version %s", version) + _setup_sysctl(version) + _setup_systemd_resolved(version) def load_files(profile): @@ -137,7 +142,17 @@ def cleanup_files(): _cleanup_file(_sysctl_drenv_conf()) -def _setup_sysctl(): +def _version(): + """ + Get minikube version string ("v1.33.1") and return a package.version.Version + instance. + """ + out = _run("version", output="json") + info = json.loads(out) + return Version(info["minikubeVersion"]) + + +def _setup_sysctl(version): """ Increase fs.inotify limits to avoid random timeouts when starting kubevirt VM. @@ -145,6 +160,10 @@ def _setup_sysctl(): We use the same configuration as OpenShift worker node. See also https://www.suse.com/support/kb/doc/?id=000020048 """ + if version >= Version("v1.33.1"): + logging.debug("[minikube] Skipping sysctl configuration") + return + path = _sysctl_drenv_conf() data = """# Added by drenv setup fs.inotify.max_user_instances = 8192 @@ -165,7 +184,7 @@ def _sysctl_drenv_conf(): return _minikube_file("etc", "sysctl.d", "99-drenv.conf") -def _setup_systemd_resolved(): +def _setup_systemd_resolved(version): """ Disable DNSSEC in systemd-resolved configuration. @@ -174,6 +193,10 @@ def _setup_systemd_resolved(): TODO: Remove when issue is fixed in minikube. """ + if version >= Version("v1.33.1"): + logging.debug("[minikube] Skipping systemd-resolved configuration") + return + path = _systemd_resolved_drenv_conf() data = """# Added by drenv setup [Resolve] diff --git a/test/setup.py b/test/setup.py index f409f3431..ae122d6d7 100644 --- a/test/setup.py +++ b/test/setup.py @@ -21,6 +21,7 @@ install_requires=[ "PyYAML", "toml", + "packaging", ], classifiers=[ "Development Status :: 4 - Beta", From 1cf25fbb196cf5ec844d19b5152dcd2717b82c3f Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Sat, 18 May 2024 00:23:18 +0300 Subject: [PATCH 11/32] Update minikube tested version to v1.33.1 Signed-off-by: Nir Soffer --- docs/user-quick-start.md | 2 +- test/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/user-quick-start.md b/docs/user-quick-start.md index cfd6d818f..fedde1f11 100644 --- a/docs/user-quick-start.md +++ b/docs/user-quick-start.md @@ -97,7 +97,7 @@ enough resources: sudo dnf install https://storage.googleapis.com/minikube/releases/latest/minikube-latest.x86_64.rpm ``` - Tested with version v1.33.0. + Tested with version v1.33.1. 1. Validate the installation diff --git a/test/README.md b/test/README.md index 2925db2bd..2174946b0 100644 --- a/test/README.md +++ b/test/README.md @@ -28,7 +28,7 @@ environment. ``` You need `minikube` version supporting the `--extra-disks` option. - Tested with version v1.33.0. + Tested with version v1.33.1. 1. Install the `kubectl` tool. See [Install and Set Up kubectl on Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/) From a56be4d96b400198fc6cb1c02cd89e7c159e29d9 Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Sat, 18 May 2024 00:26:54 +0300 Subject: [PATCH 12/32] Remove note about --extra-disk argument The note is correct but not helpful at this point. Let's drop unnecessary details like we did for docs/user-quick-start.md. Signed-off-by: Nir Soffer --- test/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/test/README.md b/test/README.md index 2174946b0..47c76b69e 100644 --- a/test/README.md +++ b/test/README.md @@ -27,7 +27,6 @@ environment. sudo dnf install https://storage.googleapis.com/minikube/releases/latest/minikube-latest.x86_64.rpm ``` - You need `minikube` version supporting the `--extra-disks` option. Tested with version v1.33.1. 1. Install the `kubectl` tool. See From e2b4d3b167a3672ebea58a5acd2d0e96ef93e358 Mon Sep 17 00:00:00 2001 From: Abhijeet Shakya Date: Tue, 14 May 2024 04:08:27 -0400 Subject: [PATCH 13/32] Upgrade olm version from v0.22.0 to v0.27.0 Fixes: #1260 Signed-off-by: Abhijeet Shakya --- test/addons/olm/start | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/addons/olm/start b/test/addons/olm/start index 11d6e7e0e..acdf2fca1 100755 --- a/test/addons/olm/start +++ b/test/addons/olm/start @@ -9,7 +9,7 @@ import sys import drenv from drenv import kubectl -VERSION = "v0.22.0" +VERSION = "v0.27.0" BASE_URL = f"https://github.com/operator-framework/operator-lifecycle-manager/releases/download/{VERSION}" NAMESPACE = "olm" From c159b763cffec427c42d51c781a412a648f18199 Mon Sep 17 00:00:00 2001 From: Abhijeet Shakya Date: Tue, 14 May 2024 04:40:29 -0400 Subject: [PATCH 14/32] Add single cluster env for olm Signed-off-by: Abhijeet Shakya --- test/README.md | 1 + test/envs/olm.yaml | 15 +++++++++++++++ 2 files changed, 16 insertions(+) create mode 100644 test/envs/olm.yaml diff --git a/test/README.md b/test/README.md index 47c76b69e..8965a6a49 100644 --- a/test/README.md +++ b/test/README.md @@ -651,6 +651,7 @@ simpler and faster to work with a minimal environment. - `kubevirt.yaml` - for testing kubevirt and cdi addons - `minio.yaml` - for testing `minio` deployment - `ocm.yaml` - for testing `ocm` deployment +- `olm.yaml` - for testing `olm` deployment - `rook.yaml` - for testing `rook` deployment - `submariner.yaml` - for testing `submariner` deployment - `velero.yaml` - for testing `velero` deployment diff --git a/test/envs/olm.yaml b/test/envs/olm.yaml new file mode 100644 index 000000000..fd3bc62c7 --- /dev/null +++ b/test/envs/olm.yaml @@ -0,0 +1,15 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +# Environment for testing olm deployment. +--- +name: olm + +profiles: + - name: c1 + driver: $vm + container_runtime: containerd + memory: 2g + workers: + - addons: + - name: olm From 08a376b0969f9fb5c7561b0b28211e2519ba1767 Mon Sep 17 00:00:00 2001 From: Abhijeet Shakya Date: Tue, 14 May 2024 04:42:19 -0400 Subject: [PATCH 15/32] Use static kustomization for olm Signed-off-by: Abhijeet Shakya --- test/addons/olm/crds/kustomization.yaml | 6 ++++++ test/addons/olm/operators/kustomization.yaml | 6 ++++++ test/addons/olm/start | 12 +++--------- 3 files changed, 15 insertions(+), 9 deletions(-) create mode 100644 test/addons/olm/crds/kustomization.yaml create mode 100644 test/addons/olm/operators/kustomization.yaml diff --git a/test/addons/olm/crds/kustomization.yaml b/test/addons/olm/crds/kustomization.yaml new file mode 100644 index 000000000..5b66f73a5 --- /dev/null +++ b/test/addons/olm/crds/kustomization.yaml @@ -0,0 +1,6 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +--- +resources: + - https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.27.0/crds.yaml diff --git a/test/addons/olm/operators/kustomization.yaml b/test/addons/olm/operators/kustomization.yaml new file mode 100644 index 000000000..c31d744b7 --- /dev/null +++ b/test/addons/olm/operators/kustomization.yaml @@ -0,0 +1,6 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +--- +resources: + - https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.27.0/olm.yaml diff --git a/test/addons/olm/start b/test/addons/olm/start index acdf2fca1..8a28f9a46 100755 --- a/test/addons/olm/start +++ b/test/addons/olm/start @@ -9,8 +9,6 @@ import sys import drenv from drenv import kubectl -VERSION = "v0.27.0" -BASE_URL = f"https://github.com/operator-framework/operator-lifecycle-manager/releases/download/{VERSION}" NAMESPACE = "olm" @@ -21,21 +19,17 @@ def deploy(cluster): # The CustomResourceDefinition "clusterserviceversions.operators.coreos.com" # is invalid: metadata.annotations: Too long: must have at most 262144 bytes # See https://medium.com/pareture/kubectl-install-crd-failed-annotations-too-long-2ebc91b40c7d - kubectl.apply( - f"--filename={BASE_URL}/crds.yaml", - "--server-side=true", - context=cluster, - ) + kubectl.apply("--kustomize", "crds", "--server-side=true", context=cluster) print("Waiting until cdrs are established") kubectl.wait( "--for=condition=established", - f"--filename={BASE_URL}/crds.yaml", + "--filename=https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.27.0/crds.yaml", context=cluster, ) print("Deploying olm") - kubectl.apply("--filename", f"{BASE_URL}/olm.yaml", context=cluster) + kubectl.apply("--kustomize", "operators", context=cluster) def wait(cluster): From ec2118a36ee80eb82f26cb6e43bff7ee65312917 Mon Sep 17 00:00:00 2001 From: Abhijeet Shakya Date: Tue, 14 May 2024 05:14:13 -0400 Subject: [PATCH 16/32] Cache olm resources This change will start using cache for kustomization resources, so starting the addon can directly use the cached resources. Changes: - drenv fetch can be used to fetch resources anytime. - Starting an addon will first try to fetch resources, then apply the fetched resources. If there is no change, fetch won't do anything, so takes very less time. Fixes: #1337 Signed-off-by: Abhijeet Shakya --- test/addons/olm/fetch | 15 +++++++++++++++ test/addons/olm/start | 12 +++++++++--- 2 files changed, 24 insertions(+), 3 deletions(-) create mode 100755 test/addons/olm/fetch diff --git a/test/addons/olm/fetch b/test/addons/olm/fetch new file mode 100755 index 000000000..b67c38238 --- /dev/null +++ b/test/addons/olm/fetch @@ -0,0 +1,15 @@ +#!/usr/bin/env python3 + +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +import os + +from drenv import cache + +os.chdir(os.path.dirname(__file__)) +path = cache.path("addons/olm-crds.yaml") +cache.fetch("crds", path) + +path = cache.path("addons/olm-operators.yaml") +cache.fetch("operators", path) diff --git a/test/addons/olm/start b/test/addons/olm/start index 8a28f9a46..d7e5fb3e6 100755 --- a/test/addons/olm/start +++ b/test/addons/olm/start @@ -8,6 +8,7 @@ import sys import drenv from drenv import kubectl +from drenv import cache NAMESPACE = "olm" @@ -19,17 +20,22 @@ def deploy(cluster): # The CustomResourceDefinition "clusterserviceversions.operators.coreos.com" # is invalid: metadata.annotations: Too long: must have at most 262144 bytes # See https://medium.com/pareture/kubectl-install-crd-failed-annotations-too-long-2ebc91b40c7d - kubectl.apply("--kustomize", "crds", "--server-side=true", context=cluster) + path = cache.path("addons/olm-crds.yaml") + cache.fetch("crds", path) + kubectl.apply("--filename", path, "--server-side=true", context=cluster) print("Waiting until cdrs are established") kubectl.wait( "--for=condition=established", - "--filename=https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.27.0/crds.yaml", + "--filename", + path, context=cluster, ) print("Deploying olm") - kubectl.apply("--kustomize", "operators", context=cluster) + path = cache.path("addons/olm-operators.yaml") + cache.fetch("operators", path) + kubectl.apply("--filename", path, context=cluster) def wait(cluster): From 463cfcf487b199db3842431ebd21a9d5a8c398b2 Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Mon, 20 May 2024 22:31:22 +0300 Subject: [PATCH 17/32] Downgrade olm back to 0.22 Since we upgraded, the e2e job is failing[1] (due to a bug in the e2e integration, the job does not fail!). Lets try to go back to olm 0.22 since we know it worked before this change[2]. [1] last good build: https://github.com/RamenDR/ramen/actions/runs/9134579476/job/25120395289 [2] first bad build: https://github.com/RamenDR/ramen/actions/runs/9158274985/job/25177239838 Signed-off-by: Nir Soffer --- test/addons/olm/crds/kustomization.yaml | 2 +- test/addons/olm/operators/kustomization.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/addons/olm/crds/kustomization.yaml b/test/addons/olm/crds/kustomization.yaml index 5b66f73a5..a70db1faa 100644 --- a/test/addons/olm/crds/kustomization.yaml +++ b/test/addons/olm/crds/kustomization.yaml @@ -3,4 +3,4 @@ --- resources: - - https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.27.0/crds.yaml + - https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.22.0/crds.yaml diff --git a/test/addons/olm/operators/kustomization.yaml b/test/addons/olm/operators/kustomization.yaml index c31d744b7..ebe0b7b87 100644 --- a/test/addons/olm/operators/kustomization.yaml +++ b/test/addons/olm/operators/kustomization.yaml @@ -3,4 +3,4 @@ --- resources: - - https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.27.0/olm.yaml + - https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.22.0/olm.yaml From b5f89360b8c091e63957071028f1c04d30121771 Mon Sep 17 00:00:00 2001 From: Raghavendra Talur Date: Mon, 13 May 2024 15:49:11 -0400 Subject: [PATCH 18/32] util: add ProtectedPVCNamespacedName func Signed-off-by: Raghavendra Talur --- controllers/util/objectmeta.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/controllers/util/objectmeta.go b/controllers/util/objectmeta.go index c84e448b3..ed71dfb41 100644 --- a/controllers/util/objectmeta.go +++ b/controllers/util/objectmeta.go @@ -4,7 +4,9 @@ package util import ( + ramen "github.com/ramendr/ramen/api/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -23,3 +25,10 @@ func ObjectMetaEmbedded(objectMeta *metav1.ObjectMeta) metav1.ObjectMeta { func ResourceIsDeleted(obj client.Object) bool { return !obj.GetDeletionTimestamp().IsZero() } + +func ProtectedPVCNamespacedName(pvc ramen.ProtectedPVC) types.NamespacedName { + return types.NamespacedName{ + Namespace: pvc.Namespace, + Name: pvc.Name, + } +} From 2226128c3d24e08aedbcbd099f779e3e3c434c28 Mon Sep 17 00:00:00 2001 From: Raghavendra Talur Date: Mon, 13 May 2024 04:59:50 -0400 Subject: [PATCH 19/32] vrg: add adminNamespaceVRG field to vshandler Signed-off-by: Raghavendra Talur --- controllers/volsync/vshandler.go | 3 +++ controllers/volumereplicationgroup_controller.go | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/controllers/volsync/vshandler.go b/controllers/volsync/vshandler.go index 96d94e237..740cdeb87 100644 --- a/controllers/volsync/vshandler.go +++ b/controllers/volsync/vshandler.go @@ -70,10 +70,12 @@ type VSHandler struct { defaultCephFSCSIDriverName string destinationCopyMethod volsyncv1alpha1.CopyMethodType volumeSnapshotClassList *snapv1.VolumeSnapshotClassList + vrgInAdminNamespace bool } func NewVSHandler(ctx context.Context, client client.Client, log logr.Logger, owner metav1.Object, asyncSpec *ramendrv1alpha1.VRGAsyncSpec, defaultCephFSCSIDriverName string, copyMethod string, + adminNamespaceVRG bool, ) *VSHandler { vsHandler := &VSHandler{ ctx: ctx, @@ -83,6 +85,7 @@ func NewVSHandler(ctx context.Context, client client.Client, log logr.Logger, ow defaultCephFSCSIDriverName: defaultCephFSCSIDriverName, destinationCopyMethod: volsyncv1alpha1.CopyMethodType(copyMethod), volumeSnapshotClassList: nil, // Do not initialize until we need it + vrgInAdminNamespace: adminNamespaceVRG, } if asyncSpec != nil { diff --git a/controllers/volumereplicationgroup_controller.go b/controllers/volumereplicationgroup_controller.go index 464374f0f..d9e1d9290 100644 --- a/controllers/volumereplicationgroup_controller.go +++ b/controllers/volumereplicationgroup_controller.go @@ -427,9 +427,11 @@ func (r *VolumeReplicationGroupReconciler) Reconcile(ctx context.Context, req ct } v.ramenConfig = ramenConfig + adminNamespaceVRG := vrgInAdminNamespace(v.instance, v.ramenConfig) + v.volSyncHandler = volsync.NewVSHandler(ctx, r.Client, log, v.instance, v.instance.Spec.Async, cephFSCSIDriverNameOrDefault(v.ramenConfig), - volSyncDestinationCopyMethodOrDefault(v.ramenConfig)) + volSyncDestinationCopyMethodOrDefault(v.ramenConfig), adminNamespaceVRG) if v.instance.Status.ProtectedPVCs == nil { v.instance.Status.ProtectedPVCs = []ramendrv1alpha1.ProtectedPVC{} From edb3f16ed586a835108977767b04709e553643fa Mon Sep 17 00:00:00 2001 From: Raghavendra Talur Date: Mon, 13 May 2024 05:17:43 -0400 Subject: [PATCH 20/32] vrg: use FindProtectedPVC and remove findFirstProtectedPVCWithName This is required because we can have two PVCs with the same name when multinamespace support is enabled. Signed-off-by: Raghavendra Talur --- controllers/vrg_status_pvcs.go | 11 ----------- controllers/vrg_volsync.go | 6 +++--- 2 files changed, 3 insertions(+), 14 deletions(-) diff --git a/controllers/vrg_status_pvcs.go b/controllers/vrg_status_pvcs.go index cb08e10e9..1ee53c0e7 100644 --- a/controllers/vrg_status_pvcs.go +++ b/controllers/vrg_status_pvcs.go @@ -50,17 +50,6 @@ func FindProtectedPvcAndIndex( return nil, len(vrg.Status.ProtectedPVCs) } -func (v *VRGInstance) findFirstProtectedPVCWithName(pvcName string) *ramen.ProtectedPVC { - for index := range v.instance.Status.ProtectedPVCs { - protectedPVC := &v.instance.Status.ProtectedPVCs[index] - if protectedPVC.Name == pvcName { - return protectedPVC - } - } - - return nil -} - func (v *VRGInstance) vrgStatusPvcNamespacesSetIfUnset() { vrg := v.instance diff --git a/controllers/vrg_volsync.go b/controllers/vrg_volsync.go index cc2e2c87d..d594bc3ab 100644 --- a/controllers/vrg_volsync.go +++ b/controllers/vrg_volsync.go @@ -33,7 +33,7 @@ func (v *VRGInstance) restorePVsAndPVCsForVolSync() (int, error) { if err != nil { v.log.Info(fmt.Sprintf("Unable to ensure PVC %v -- err: %v", rdSpec, err)) - protectedPVC := v.findFirstProtectedPVCWithName(rdSpec.ProtectedPVC.Name) + protectedPVC := FindProtectedPVC(v.instance, rdSpec.ProtectedPVC.Namespace, rdSpec.ProtectedPVC.Name) if protectedPVC == nil { protectedPVC = &ramendrv1alpha1.ProtectedPVC{} rdSpec.ProtectedPVC.DeepCopyInto(protectedPVC) @@ -48,7 +48,7 @@ func (v *VRGInstance) restorePVsAndPVCsForVolSync() (int, error) { numPVsRestored++ - protectedPVC := v.findFirstProtectedPVCWithName(rdSpec.ProtectedPVC.Name) + protectedPVC := FindProtectedPVC(v.instance, rdSpec.ProtectedPVC.Namespace, rdSpec.ProtectedPVC.Name) if protectedPVC == nil { protectedPVC = &ramendrv1alpha1.ProtectedPVC{} rdSpec.ProtectedPVC.DeepCopyInto(protectedPVC) @@ -123,7 +123,7 @@ func (v *VRGInstance) reconcilePVCAsVolSyncPrimary(pvc corev1.PersistentVolumeCl Resources: pvc.Spec.Resources, } - protectedPVC := v.findFirstProtectedPVCWithName(pvc.Name) + protectedPVC := FindProtectedPVC(v.instance, pvc.Namespace, pvc.Name) if protectedPVC == nil { protectedPVC = newProtectedPVC v.instance.Status.ProtectedPVCs = append(v.instance.Status.ProtectedPVCs, *protectedPVC) From 055e8664416443251510615fe688a7bee531b443 Mon Sep 17 00:00:00 2001 From: Raghavendra Talur Date: Mon, 13 May 2024 05:28:32 -0400 Subject: [PATCH 21/32] vrg: send pvcNamespacedName everywhere instead of just name Also, create the rd and rs in the same namespace as the PVC and not VRG. Signed-off-by: Raghavendra Talur --- controllers/volsync/vshandler.go | 143 +++++++++++++++---------------- controllers/vrg_volsync.go | 6 +- 2 files changed, 74 insertions(+), 75 deletions(-) diff --git a/controllers/volsync/vshandler.go b/controllers/volsync/vshandler.go index 740cdeb87..632bf8771 100644 --- a/controllers/volsync/vshandler.go +++ b/controllers/volsync/vshandler.go @@ -186,7 +186,7 @@ func (v *VSHandler) createOrUpdateRD( rd := &volsyncv1alpha1.ReplicationDestination{ ObjectMeta: metav1.ObjectMeta{ Name: getReplicationDestinationName(rdSpec.ProtectedPVC.Name), - Namespace: v.owner.GetNamespace(), + Namespace: rdSpec.ProtectedPVC.Namespace, }, } @@ -226,14 +226,10 @@ func (v *VSHandler) createOrUpdateRD( return rd, nil } -func (v *VSHandler) isPVCInUseByNonRDPod(pvcName string) (bool, error) { +func (v *VSHandler) isPVCInUseByNonRDPod(pvcNamespacedName types.NamespacedName) (bool, error) { rd := &volsyncv1alpha1.ReplicationDestination{} - err := v.client.Get(v.ctx, - types.NamespacedName{ - Name: getReplicationDestinationName(pvcName), - Namespace: v.owner.GetNamespace(), - }, rd) + err := v.client.Get(v.ctx, pvcNamespacedName, rd) // IF RD is Found, then no more checks are needed. We'll assume that the RD // was created when the PVC was Not in use. @@ -244,7 +240,7 @@ func (v *VSHandler) isPVCInUseByNonRDPod(pvcName string) (bool, error) { } // PVC must not be in use - pvcInUse, err := v.pvcExistsAndInUse(pvcName, false) + pvcInUse, err := v.pvcExistsAndInUse(pvcNamespacedName, false) if err != nil { return false, err } @@ -295,7 +291,7 @@ func (v *VSHandler) ReconcileRS(rsSpec ramendrv1alpha1.VolSyncReplicationSourceS pvcOk, err := v.validatePVCBeforeRS(rsSpec, runFinalSync) if !pvcOk || err != nil { // Return the replicationSource if it already exists - existingRS, getRSErr := v.getRS(getReplicationSourceName(rsSpec.ProtectedPVC.Name)) + existingRS, getRSErr := v.getRS(getReplicationSourceName(rsSpec.ProtectedPVC.Name), rsSpec.ProtectedPVC.Namespace) if getRSErr != nil { return false, nil, err } @@ -336,7 +332,7 @@ func (v *VSHandler) validatePVCBeforeRS(rsSpec ramendrv1alpha1.VolSyncReplicatio if runFinalSync { // If runFinalSync, check the PVC and make sure it's not mounted to a pod // as we want the app to be quiesced/removed before running final sync - pvcIsMounted, err := v.pvcExistsAndInUse(rsSpec.ProtectedPVC.Name, false) + pvcIsMounted, err := v.pvcExistsAndInUse(util.ProtectedPVCNamespacedName(rsSpec.ProtectedPVC), false) if err != nil { return false, err } @@ -351,12 +347,13 @@ func (v *VSHandler) validatePVCBeforeRS(rsSpec ramendrv1alpha1.VolSyncReplicatio // Not running final sync - if we have not yet created an RS for this PVC, then make sure a pod has mounted // the PVC and is in "Running" state before attempting to create an RS. // This is a best effort to confirm the app that is using the PVC is started before trying to replicate the PVC. - _, err := v.getRS(getReplicationSourceName(rsSpec.ProtectedPVC.Name)) + _, err := v.getRS(getReplicationSourceName(rsSpec.ProtectedPVC.Name), rsSpec.ProtectedPVC.Namespace) if err != nil && kerrors.IsNotFound(err) { l.Info("ReplicationSource does not exist yet. " + "validating that the PVC to be protected is in use by a ready pod ...") // RS does not yet exist - consider PVC is ok if it's mounted and in use by running pod - inUseByReadyPod, err := v.pvcExistsAndInUse(rsSpec.ProtectedPVC.Name, true /* Check mounting pod is Ready */) + inUseByReadyPod, err := v.pvcExistsAndInUse(util.ProtectedPVCNamespacedName(rsSpec.ProtectedPVC), + true /* Check mounting pod is Ready */) if err != nil { return false, err } @@ -403,7 +400,7 @@ func (v *VSHandler) cleanupAfterRSFinalSync(rsSpec ramendrv1alpha1.VolSyncReplic v.log.Info("Cleanup after final sync", "pvcName", rsSpec.ProtectedPVC.Name) - return util.DeletePVC(v.ctx, v.client, rsSpec.ProtectedPVC.Name, v.owner.GetNamespace(), v.log) + return util.DeletePVC(v.ctx, v.client, rsSpec.ProtectedPVC.Name, rsSpec.ProtectedPVC.Namespace, v.log) } //nolint:funlen @@ -430,12 +427,12 @@ func (v *VSHandler) createOrUpdateRS(rsSpec ramendrv1alpha1.VolSyncReplicationSo // Remote service address created for the ReplicationDestination on the secondary // The secondary namespace will be the same as primary namespace so use the vrg.Namespace - remoteAddress := getRemoteServiceNameForRDFromPVCName(rsSpec.ProtectedPVC.Name, v.owner.GetNamespace()) + remoteAddress := getRemoteServiceNameForRDFromPVCName(rsSpec.ProtectedPVC.Name, rsSpec.ProtectedPVC.Namespace) rs := &volsyncv1alpha1.ReplicationSource{ ObjectMeta: metav1.ObjectMeta{ Name: getReplicationSourceName(rsSpec.ProtectedPVC.Name), - Namespace: v.owner.GetNamespace(), + Namespace: rsSpec.ProtectedPVC.Namespace, }, } @@ -495,9 +492,9 @@ func (v *VSHandler) createOrUpdateRS(rsSpec ramendrv1alpha1.VolSyncReplicationSo return rs, nil } -func (v *VSHandler) PreparePVC(pvcName string, prepFinalSync, copyMethodDirect bool) error { +func (v *VSHandler) PreparePVC(pvcNamespacedName types.NamespacedName, prepFinalSync, copyMethodDirect bool) error { if prepFinalSync || copyMethodDirect { - prepared, err := v.TakePVCOwnership(pvcName) + prepared, err := v.TakePVCOwnership(pvcNamespacedName) if err != nil || !prepared { return fmt.Errorf("waiting to take pvc ownership (%w), prepFinalSync: %t, Direct: %t", err, prepFinalSync, copyMethodDirect) @@ -509,11 +506,11 @@ func (v *VSHandler) PreparePVC(pvcName string, prepFinalSync, copyMethodDirect b // TakePVCOwnership adds do-not-delete annotation to indicate that ACM should not delete/cleanup this pvc // when the appsub is removed and adds VRG as owner so the PVC is garbage collected when the VRG is deleted. -func (v *VSHandler) TakePVCOwnership(pvcName string) (bool, error) { - l := v.log.WithValues("pvcName", pvcName) +func (v *VSHandler) TakePVCOwnership(pvcNamespacedName types.NamespacedName) (bool, error) { + l := v.log.WithValues("pvc", pvcNamespacedName) // Confirm PVC exists and add our VRG as ownerRef - pvc, err := v.validatePVCAndAddVRGOwnerRef(pvcName) + pvc, err := v.validatePVCAndAddVRGOwnerRef(pvcNamespacedName) if err != nil { l.Error(err, "unable to validate PVC or add ownership") @@ -534,11 +531,10 @@ func (v *VSHandler) TakePVCOwnership(pvcName string) (bool, error) { // If inUsePodMustBeReady is true, will only return true if the pod mounting the PVC is in Ready state // If inUsePodMustBeReady is false, will run an additional volume attachment check to make sure the PV underlying // the PVC is really detached (i.e. I/O operations complete) and therefore we can assume, quiesced. -func (v *VSHandler) pvcExistsAndInUse(pvcName string, inUsePodMustBeReady bool) (bool, error) { - pvcNamespacedName := types.NamespacedName{Namespace: v.owner.GetNamespace(), Name: pvcName} +func (v *VSHandler) pvcExistsAndInUse(pvcNamespacedName types.NamespacedName, inUsePodMustBeReady bool) (bool, error) { log := v.log.WithValues("pvc", pvcNamespacedName.String()) - pvc, err := v.getPVC(pvcName) + pvc, err := v.getPVC(pvcNamespacedName) if err != nil { if kerrors.IsNotFound(err) { log.Info("PVC not found") @@ -561,14 +557,10 @@ func (v *VSHandler) pvcExistsAndInUse(pvcName string, inUsePodMustBeReady bool) return util.IsPVAttachedToNode(v.ctx, v.client, v.log, pvc) } -func (v *VSHandler) getPVC(pvcName string) (*corev1.PersistentVolumeClaim, error) { +func (v *VSHandler) getPVC(pvcNamespacedName types.NamespacedName) (*corev1.PersistentVolumeClaim, error) { pvc := &corev1.PersistentVolumeClaim{} - err := v.client.Get(v.ctx, - types.NamespacedName{ - Name: pvcName, - Namespace: v.owner.GetNamespace(), - }, pvc) + err := v.client.Get(v.ctx, pvcNamespacedName, pvc) if err != nil { return nil, fmt.Errorf("%w", err) } @@ -578,13 +570,15 @@ func (v *VSHandler) getPVC(pvcName string) (*corev1.PersistentVolumeClaim, error // Adds owner ref and ACM "do-not-delete" annotation to indicate that when the appsub is removed, ACM // should not cleanup this PVC - we want it left behind so we can run a final sync -func (v *VSHandler) validatePVCAndAddVRGOwnerRef(pvcName string) (*corev1.PersistentVolumeClaim, error) { - pvc, err := v.getPVC(pvcName) +func (v *VSHandler) validatePVCAndAddVRGOwnerRef(pvcNamespacedName types.NamespacedName) ( + *corev1.PersistentVolumeClaim, error, +) { + pvc, err := v.getPVC(pvcNamespacedName) if err != nil { return nil, err } - v.log.Info("PVC exists", "pvcName", pvcName) + v.log.Info("PVC exists", "pvcName", pvcNamespacedName.Name, "pvcNamespaceName", pvcNamespacedName.Namespace) // Add annotation to indicate that ACM should not delete/cleanup this pvc when the appsub is removed // and add VRG as owner @@ -593,7 +587,7 @@ func (v *VSHandler) validatePVCAndAddVRGOwnerRef(pvcName string) (*corev1.Persis return nil, err } - v.log.V(1).Info("PVC validated", "pvc name", pvcName) + v.log.V(1).Info("PVC validated", "pvcName", pvcNamespacedName.Name, "pvcNamespaceName", pvcNamespacedName.Namespace) return pvc, nil } @@ -633,13 +627,13 @@ func (v *VSHandler) validateSecretAndAddVRGOwnerRef(secretName string) (bool, er return true, nil } -func (v *VSHandler) getRS(name string) (*volsyncv1alpha1.ReplicationSource, error) { +func (v *VSHandler) getRS(name, namespace string) (*volsyncv1alpha1.ReplicationSource, error) { rs := &volsyncv1alpha1.ReplicationSource{} err := v.client.Get(v.ctx, types.NamespacedName{ Name: name, - Namespace: v.owner.GetNamespace(), + Namespace: namespace, }, rs) if err != nil { return nil, fmt.Errorf("%w", err) @@ -703,7 +697,7 @@ func (v *VSHandler) DeleteRD(pvcName string) error { //nolint:gocognit func (v *VSHandler) deleteLocalRDAndRS(rd *volsyncv1alpha1.ReplicationDestination) error { - latestRDImage, err := v.getRDLatestImage(rd.GetName()) + latestRDImage, err := v.getRDLatestImage(rd.GetName(), rd.GetNamespace()) if err != nil { return err } @@ -713,7 +707,7 @@ func (v *VSHandler) deleteLocalRDAndRS(rd *volsyncv1alpha1.ReplicationDestinatio lrs := &volsyncv1alpha1.ReplicationSource{ ObjectMeta: metav1.ObjectMeta{ Name: getLocalReplicationName(rd.GetName()), - Namespace: v.owner.GetNamespace(), + Namespace: rd.GetNamespace(), }, } @@ -723,7 +717,7 @@ func (v *VSHandler) deleteLocalRDAndRS(rd *volsyncv1alpha1.ReplicationDestinatio }, lrs) if err != nil { if kerrors.IsNotFound(err) { - return v.deleteLocalRD(getLocalReplicationName(rd.GetName())) + return v.deleteLocalRD(getLocalReplicationName(rd.GetName()), rd.GetNamespace()) } return err @@ -875,7 +869,7 @@ func (v *VSHandler) listByOwner(list client.ObjectList) error { func (v *VSHandler) EnsurePVCfromRD(rdSpec ramendrv1alpha1.VolSyncReplicationDestinationSpec, failoverAction bool, ) error { - latestImage, err := v.getRDLatestImage(rdSpec.ProtectedPVC.Name) + latestImage, err := v.getRDLatestImage(rdSpec.ProtectedPVC.Name, rdSpec.ProtectedPVC.Namespace) if err != nil { return err } @@ -913,7 +907,7 @@ func (v *VSHandler) EnsurePVCforDirectCopy(ctx context.Context, return fmt.Errorf("capacity must be provided %v", rdSpec.ProtectedPVC) } - pvc, err := v.getPVC(rdSpec.ProtectedPVC.Name) + pvc, err := v.getPVC(util.ProtectedPVCNamespacedName(rdSpec.ProtectedPVC)) if err != nil && !kerrors.IsNotFound(err) { return err } @@ -925,7 +919,7 @@ func (v *VSHandler) EnsurePVCforDirectCopy(ctx context.Context, pvc = &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: rdSpec.ProtectedPVC.Name, - Namespace: v.owner.GetNamespace(), + Namespace: rdSpec.ProtectedPVC.Namespace, }, } @@ -966,7 +960,7 @@ func (v *VSHandler) EnsurePVCforDirectCopy(ctx context.Context, func (v *VSHandler) validateSnapshotAndEnsurePVC(rdSpec ramendrv1alpha1.VolSyncReplicationDestinationSpec, snapshotRef corev1.TypedLocalObjectReference, failoverAction bool, ) error { - snap, err := v.validateAndProtectSnapshot(snapshotRef) + snap, err := v.validateAndProtectSnapshot(snapshotRef, rdSpec.ProtectedPVC.Namespace) if err != nil { return err } @@ -979,7 +973,7 @@ func (v *VSHandler) validateSnapshotAndEnsurePVC(rdSpec ramendrv1alpha1.VolSyncR pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: rdSpec.ProtectedPVC.Name, - Namespace: v.owner.GetNamespace(), + Namespace: rdSpec.ProtectedPVC.Namespace, }, } @@ -1010,7 +1004,7 @@ func (v *VSHandler) validateSnapshotAndEnsurePVC(rdSpec ramendrv1alpha1.VolSyncR } } - pvc, err := v.getPVC(rdSpec.ProtectedPVC.Name) + pvc, err := v.getPVC(util.ProtectedPVCNamespacedName(rdSpec.ProtectedPVC)) if err != nil { return err } @@ -1031,7 +1025,7 @@ func (v *VSHandler) rollbackToLastSnapshot(rdSpec ramendrv1alpha1.VolSyncReplica v.log.Info(fmt.Sprintf("Rollback to the last snapshot %s for pvc %s", snapshotRef.Name, rdSpec.ProtectedPVC.Name)) // 1. Pause the main RD. Any inprogress sync will be terminated. - rd, err := v.pauseRD(getReplicationDestinationName(rdSpec.ProtectedPVC.Name)) + rd, err := v.pauseRD(getReplicationDestinationName(rdSpec.ProtectedPVC.Name), rdSpec.ProtectedPVC.Namespace) if err != nil { return err } @@ -1042,7 +1036,7 @@ func (v *VSHandler) rollbackToLastSnapshot(rdSpec ramendrv1alpha1.VolSyncReplica return err } - lrd, err := v.getRD(getLocalReplicationName(rdSpec.ProtectedPVC.Name)) + lrd, err := v.getRD(getLocalReplicationName(rdSpec.ProtectedPVC.Name), rdSpec.ProtectedPVC.Namespace) if err != nil { return err } @@ -1067,7 +1061,7 @@ func (v *VSHandler) rollbackToLastSnapshot(rdSpec ramendrv1alpha1.VolSyncReplica // Now pause LocalRD so that a new pod does not start and uses the PVC. // At this point, we want only the app to use the PVC. - _, err = v.pauseRD(lrd.GetName()) + _, err = v.pauseRD(lrd.GetName(), lrd.GetNamespace()) if err != nil { return err } @@ -1090,7 +1084,7 @@ func (v *VSHandler) ensurePVCFromSnapshot(rdSpec ramendrv1alpha1.VolSyncReplicat pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: rdSpec.ProtectedPVC.Name, - Namespace: v.owner.GetNamespace(), + Namespace: rdSpec.ProtectedPVC.Namespace, }, } @@ -1169,12 +1163,13 @@ func (v *VSHandler) ensurePVCFromSnapshot(rdSpec ramendrv1alpha1.VolSyncReplicat // adds VolSync "do-not-delete" label to indicate volsync should not cleanup this snapshot func (v *VSHandler) validateAndProtectSnapshot( volumeSnapshotRef corev1.TypedLocalObjectReference, + volumeSnapshotNamespace string, ) (*snapv1.VolumeSnapshot, error) { volSnap := &snapv1.VolumeSnapshot{} err := v.client.Get(v.ctx, types.NamespacedName{ Name: volumeSnapshotRef.Name, - Namespace: v.owner.GetNamespace(), + Namespace: volumeSnapshotNamespace, }, volSnap) if err != nil { v.log.Error(err, "Unable to get VolumeSnapshot", "volumeSnapshotRef", volumeSnapshotRef) @@ -1462,7 +1457,7 @@ func ConvertSchedulingIntervalToCronSpec(schedulingInterval string) (*string, er return &cronSpec, nil } -func (v *VSHandler) IsRSDataProtected(pvcName string) (bool, error) { +func (v *VSHandler) IsRSDataProtected(pvcName, pvcNamespace string) (bool, error) { l := v.log.WithValues("pvcName", pvcName) // Get RD instance @@ -1471,7 +1466,7 @@ func (v *VSHandler) IsRSDataProtected(pvcName string) (bool, error) { err := v.client.Get(v.ctx, types.NamespacedName{ Name: getReplicationSourceName(pvcName), - Namespace: v.owner.GetNamespace(), + Namespace: pvcNamespace, }, rs) if err != nil { if !kerrors.IsNotFound(err) { @@ -1496,8 +1491,8 @@ func isRSLastSyncTimeReady(rsStatus *volsyncv1alpha1.ReplicationSourceStatus) bo return false } -func (v *VSHandler) getRDLatestImage(pvcName string) (*corev1.TypedLocalObjectReference, error) { - rd, err := v.getRD(pvcName) +func (v *VSHandler) getRDLatestImage(pvcName, pvcNamespace string) (*corev1.TypedLocalObjectReference, error) { + rd, err := v.getRD(pvcName, pvcNamespace) if err != nil || rd == nil { return nil, err } @@ -1511,8 +1506,8 @@ func (v *VSHandler) getRDLatestImage(pvcName string) (*corev1.TypedLocalObjectRe } // Returns true if at least one sync has completed (we'll consider this "data protected") -func (v *VSHandler) IsRDDataProtected(pvcName string) (bool, error) { - latestImage, err := v.getRDLatestImage(pvcName) +func (v *VSHandler) IsRDDataProtected(pvcName, pvcNamespace string) (bool, error) { + latestImage, err := v.getRDLatestImage(pvcName, pvcNamespace) if err != nil { return false, err } @@ -1535,7 +1530,7 @@ func (v *VSHandler) PrecreateDestPVCIfEnabled(rdSpec ramendrv1alpha1.VolSyncRepl } // PVC must not be in-use before creating the RD - inUse, err := v.isPVCInUseByNonRDPod(rdSpec.ProtectedPVC.Name) + inUse, err := v.isPVCInUseByNonRDPod(util.ProtectedPVCNamespacedName(rdSpec.ProtectedPVC)) if err != nil { return nil, err } @@ -1544,10 +1539,12 @@ func (v *VSHandler) PrecreateDestPVCIfEnabled(rdSpec ramendrv1alpha1.VolSyncRepl // on this cluster). That race condition will be ignored. That would be a user error to deploy the // same app in the same namespace and on the destination cluster... if inUse { - return nil, fmt.Errorf("pvc %v is mounted by others. Checking later", rdSpec.ProtectedPVC.Name) + return nil, fmt.Errorf("pvc %v is mounted by others. Checking later", + util.ProtectedPVCNamespacedName(rdSpec.ProtectedPVC)) } - v.log.Info(fmt.Sprintf("Using App PVC %s for syncing directly to it", rdSpec.ProtectedPVC.Name)) + v.log.Info(fmt.Sprintf("Using App PVC %s for syncing directly to it", + util.ProtectedPVCNamespacedName(rdSpec.ProtectedPVC))) // Using the application PVC for syncing from source to destination and save a snapshot // everytime a sync is successful return &rdSpec.ProtectedPVC.Name, nil @@ -1661,7 +1658,7 @@ func (v *VSHandler) reconcileLocalRD(rdSpec ramendrv1alpha1.VolSyncReplicationDe lrd := &volsyncv1alpha1.ReplicationDestination{ ObjectMeta: metav1.ObjectMeta{ Name: getLocalReplicationName(rdSpec.ProtectedPVC.Name), - Namespace: v.owner.GetNamespace(), + Namespace: rdSpec.ProtectedPVC.Namespace, }, } @@ -1742,7 +1739,7 @@ func (v *VSHandler) reconcileLocalRS(rd *volsyncv1alpha1.ReplicationDestination, lrs := &volsyncv1alpha1.ReplicationSource{ ObjectMeta: metav1.ObjectMeta{ Name: getLocalReplicationName(rsSpec.ProtectedPVC.Name), - Namespace: v.owner.GetNamespace(), + Namespace: rsSpec.ProtectedPVC.Namespace, }, } @@ -1784,13 +1781,13 @@ func (v *VSHandler) reconcileLocalRS(rd *volsyncv1alpha1.ReplicationDestination, func (v *VSHandler) cleanupLocalResources(lrs *volsyncv1alpha1.ReplicationSource) error { // delete the snapshot taken by local RD - err := v.deleteSnapshot(v.ctx, v.client, lrs.Spec.Trigger.Manual, v.owner.GetNamespace(), v.log) + err := v.deleteSnapshot(v.ctx, v.client, lrs.Spec.Trigger.Manual, lrs.GetNamespace(), v.log) if err != nil { return err } // delete RO PVC created for localRS - err = util.DeletePVC(v.ctx, v.client, lrs.Spec.SourcePVC, v.owner.GetNamespace(), v.log) + err = util.DeletePVC(v.ctx, v.client, lrs.Spec.SourcePVC, lrs.GetNamespace(), v.log) if err != nil { return err } @@ -1801,14 +1798,14 @@ func (v *VSHandler) cleanupLocalResources(lrs *volsyncv1alpha1.ReplicationSource } // Delete the localRD. The name of the localRD is the same as the name of the localRS - return v.deleteLocalRD(lrs.GetName()) + return v.deleteLocalRD(lrs.GetName(), lrs.GetNamespace()) } -func (v *VSHandler) deleteLocalRD(lrdName string) error { +func (v *VSHandler) deleteLocalRD(lrdName, lrdNamespace string) error { lrd := &volsyncv1alpha1.ReplicationDestination{ ObjectMeta: metav1.ObjectMeta{ Name: lrdName, - Namespace: v.owner.GetNamespace(), + Namespace: lrdNamespace, }, } @@ -1829,7 +1826,7 @@ func (v *VSHandler) deleteLocalRD(lrdName string) error { func (v *VSHandler) setupLocalRS(rd *volsyncv1alpha1.ReplicationDestination, ) (*corev1.PersistentVolumeClaim, error) { - latestImage, err := v.getRDLatestImage(rd.GetName()) + latestImage, err := v.getRDLatestImage(rd.GetName(), rd.GetNamespace()) if err != nil { return nil, err } @@ -1853,7 +1850,7 @@ func (v *VSHandler) setupLocalRS(rd *volsyncv1alpha1.ReplicationDestination, lrs := &volsyncv1alpha1.ReplicationSource{ ObjectMeta: metav1.ObjectMeta{ Name: getLocalReplicationName(rd.GetName()), - Namespace: v.owner.GetNamespace(), + Namespace: rd.GetNamespace(), }, } @@ -1869,7 +1866,7 @@ func (v *VSHandler) setupLocalRS(rd *volsyncv1alpha1.ReplicationDestination, } } - snap, err := v.validateAndProtectSnapshot(*vsImageRef) + snap, err := v.validateAndProtectSnapshot(*vsImageRef, lrs.Namespace) if err != nil { return nil, err } @@ -1893,7 +1890,7 @@ func (v *VSHandler) createReadOnlyPVCFromSnapshot(rd *volsyncv1alpha1.Replicatio pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: snapshotRef.Name, - Namespace: v.owner.GetNamespace(), + Namespace: rd.GetNamespace(), }, } @@ -1975,7 +1972,7 @@ func (v *VSHandler) deleteSnapshot(ctx context.Context, return nil } -func (v *VSHandler) getRD(pvcName string) (*volsyncv1alpha1.ReplicationDestination, error) { +func (v *VSHandler) getRD(pvcName, pvcNamespace string) (*volsyncv1alpha1.ReplicationDestination, error) { l := v.log.WithValues("pvcName", pvcName) // Get RD instance @@ -1984,7 +1981,7 @@ func (v *VSHandler) getRD(pvcName string) (*volsyncv1alpha1.ReplicationDestinati err := v.client.Get(v.ctx, types.NamespacedName{ Name: getReplicationDestinationName(pvcName), - Namespace: v.owner.GetNamespace(), + Namespace: pvcNamespace, }, rdInst) if err != nil { if !kerrors.IsNotFound(err) { @@ -2001,8 +1998,8 @@ func (v *VSHandler) getRD(pvcName string) (*volsyncv1alpha1.ReplicationDestinati return rdInst, nil } -func (v *VSHandler) pauseRD(rdName string) (*volsyncv1alpha1.ReplicationDestination, error) { - rd, err := v.getRD(rdName) +func (v *VSHandler) pauseRD(rdName, rdNamespace string) (*volsyncv1alpha1.ReplicationDestination, error) { + rd, err := v.getRD(rdName, rdNamespace) if err != nil || rd == nil { return nil, err } diff --git a/controllers/vrg_volsync.go b/controllers/vrg_volsync.go index d594bc3ab..22c65e1e1 100644 --- a/controllers/vrg_volsync.go +++ b/controllers/vrg_volsync.go @@ -10,6 +10,7 @@ import ( "github.com/go-logr/logr" ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" + "github.com/ramendr/ramen/controllers/util" "github.com/ramendr/ramen/controllers/volsync" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -138,7 +139,8 @@ func (v *VRGInstance) reconcilePVCAsVolSyncPrimary(pvc corev1.PersistentVolumeCl ProtectedPVC: *protectedPVC, } - err := v.volSyncHandler.PreparePVC(pvc.Name, v.instance.Spec.PrepareForFinalSync, + err := v.volSyncHandler.PreparePVC(util.ProtectedPVCNamespacedName(*protectedPVC), + v.instance.Spec.PrepareForFinalSync, v.volSyncHandler.IsCopyMethodDirect()) if err != nil { return true @@ -321,7 +323,7 @@ func (v *VRGInstance) buildDataProtectedCondition() *metav1.Condition { } // Check now if we have synced up at least once for this PVC - rsDataProtected, err := v.volSyncHandler.IsRSDataProtected(protectedPVC.Name) + rsDataProtected, err := v.volSyncHandler.IsRSDataProtected(protectedPVC.Name, protectedPVC.Namespace) if err != nil || !rsDataProtected { ready = false From edafd9fa411a3388002ae89188e96437df9d0d1b Mon Sep 17 00:00:00 2001 From: Raghavendra Talur Date: Mon, 13 May 2024 06:28:55 -0400 Subject: [PATCH 22/32] volsync: skip ownerRef on pvc,RD,RS when in multinamespace mode Signed-off-by: Raghavendra Talur --- controllers/volsync/vshandler.go | 54 ++++++++++++++++++++------------ 1 file changed, 34 insertions(+), 20 deletions(-) diff --git a/controllers/volsync/vshandler.go b/controllers/volsync/vshandler.go index 632bf8771..f380e3cfc 100644 --- a/controllers/volsync/vshandler.go +++ b/controllers/volsync/vshandler.go @@ -191,10 +191,12 @@ func (v *VSHandler) createOrUpdateRD( } op, err := ctrlutil.CreateOrUpdate(v.ctx, v.client, rd, func() error { - if err := ctrl.SetControllerReference(v.owner, rd, v.client.Scheme()); err != nil { - l.Error(err, "unable to set controller reference") + if !v.vrgInAdminNamespace { + if err := ctrl.SetControllerReference(v.owner, rd, v.client.Scheme()); err != nil { + l.Error(err, "unable to set controller reference") - return fmt.Errorf("%w", err) + return fmt.Errorf("%w", err) + } } util.AddLabel(rd, VRGOwnerLabel, v.owner.GetName()) @@ -437,10 +439,12 @@ func (v *VSHandler) createOrUpdateRS(rsSpec ramendrv1alpha1.VolSyncReplicationSo } op, err := ctrlutil.CreateOrUpdate(v.ctx, v.client, rs, func() error { - if err := ctrl.SetControllerReference(v.owner, rs, v.client.Scheme()); err != nil { - l.Error(err, "unable to set controller reference") + if !v.vrgInAdminNamespace { + if err := ctrl.SetControllerReference(v.owner, rs, v.client.Scheme()); err != nil { + l.Error(err, "unable to set controller reference") - return fmt.Errorf("%w", err) + return fmt.Errorf("%w", err) + } } util.AddLabel(rs, VRGOwnerLabel, v.owner.GetName()) @@ -893,7 +897,7 @@ func (v *VSHandler) EnsurePVCfromRD(rdSpec ramendrv1alpha1.VolSyncReplicationDes return v.validateSnapshotAndEnsurePVC(rdSpec, *vsImageRef, failoverAction) } -//nolint:cyclop +//nolint:cyclop,funlen,gocognit func (v *VSHandler) EnsurePVCforDirectCopy(ctx context.Context, rdSpec ramendrv1alpha1.VolSyncReplicationDestinationSpec, ) error { @@ -924,8 +928,10 @@ func (v *VSHandler) EnsurePVCforDirectCopy(ctx context.Context, } op, err := ctrlutil.CreateOrUpdate(ctx, v.client, pvc, func() error { - if err := ctrl.SetControllerReference(v.owner, pvc, v.client.Scheme()); err != nil { - return fmt.Errorf("failed to set controller reference %w", err) + if !v.vrgInAdminNamespace { + if err := ctrl.SetControllerReference(v.owner, pvc, v.client.Scheme()); err != nil { + return fmt.Errorf("failed to set controller reference %w", err) + } } if pvc.CreationTimestamp.IsZero() { @@ -1183,7 +1189,6 @@ func (v *VSHandler) validateAndProtectSnapshot( AddOwner(v.owner, v.client.Scheme()). AddLabel(VolSyncDoNotDeleteLabel, VolSyncDoNotDeleteLabelVal). Update(v.ctx, v.client) - if err != nil { return nil, fmt.Errorf("failed to add owner/label to snapshot %s (%w)", volSnap.GetName(), err) } @@ -1195,12 +1200,16 @@ func (v *VSHandler) validateAndProtectSnapshot( func (v *VSHandler) addAnnotationAndVRGOwnerRefAndUpdate(obj client.Object, annotationName, annotationValue string, -) error { +) (err error) { + var ownerRefUpdated bool + annotationsUpdated := util.AddAnnotation(obj, annotationName, annotationValue) - ownerRefUpdated, err := util.AddOwnerReference(obj, v.owner, v.client.Scheme()) // VRG as owner - if err != nil { - return err + if !v.vrgInAdminNamespace { + ownerRefUpdated, err = util.AddOwnerReference(obj, v.owner, v.client.Scheme()) // VRG as owner + if err != nil { + return err + } } if annotationsUpdated || ownerRefUpdated { @@ -1650,6 +1659,7 @@ func (v *VSHandler) reconcileLocalReplication(rd *volsyncv1alpha1.ReplicationDes return lrd, lrs, nil } +//nolint:funlen func (v *VSHandler) reconcileLocalRD(rdSpec ramendrv1alpha1.VolSyncReplicationDestinationSpec, pskSecretName string) (*volsyncv1alpha1.ReplicationDestination, error, ) { @@ -1668,10 +1678,12 @@ func (v *VSHandler) reconcileLocalRD(rdSpec ramendrv1alpha1.VolSyncReplicationDe } op, err := ctrlutil.CreateOrUpdate(v.ctx, v.client, lrd, func() error { - if err := ctrl.SetControllerReference(v.owner, lrd, v.client.Scheme()); err != nil { - v.log.Error(err, "unable to set controller reference") + if !v.vrgInAdminNamespace { + if err := ctrl.SetControllerReference(v.owner, lrd, v.client.Scheme()); err != nil { + v.log.Error(err, "unable to set controller reference") - return err + return err + } } util.AddLabel(lrd, VRGOwnerLabel, v.owner.GetName()) @@ -1744,10 +1756,12 @@ func (v *VSHandler) reconcileLocalRS(rd *volsyncv1alpha1.ReplicationDestination, } op, err := ctrlutil.CreateOrUpdate(v.ctx, v.client, lrs, func() error { - if err := ctrl.SetControllerReference(v.owner, lrs, v.client.Scheme()); err != nil { - v.log.Error(err, "unable to set controller reference") + if !v.vrgInAdminNamespace { + if err := ctrl.SetControllerReference(v.owner, lrs, v.client.Scheme()); err != nil { + v.log.Error(err, "unable to set controller reference") - return err + return err + } } util.AddLabel(lrs, VRGOwnerLabel, v.owner.GetName()) From e481c2edb6274d9a511305e8b8f62bf06c44cd47 Mon Sep 17 00:00:00 2001 From: Raghavendra Talur Date: Wed, 15 May 2024 16:52:07 -0400 Subject: [PATCH 23/32] volsync: create volsync secret in the pvc ns not vrg Signed-off-by: Raghavendra Talur --- controllers/volsync/vshandler.go | 70 +++++++++++++++++++++++++++++++- 1 file changed, 69 insertions(+), 1 deletion(-) diff --git a/controllers/volsync/vshandler.go b/controllers/volsync/vshandler.go index f380e3cfc..b7ca2ef5a 100644 --- a/controllers/volsync/vshandler.go +++ b/controllers/volsync/vshandler.go @@ -98,6 +98,8 @@ func NewVSHandler(ctx context.Context, client client.Client, log logr.Logger, ow // returns replication destination only if create/update is successful and the RD is considered available. // Callers should assume getting a nil replication destination back means they should retry/requeue. +// +//nolint:cyclop func (v *VSHandler) ReconcileRD( rdSpec ramendrv1alpha1.VolSyncReplicationDestinationSpec) (*volsyncv1alpha1.ReplicationDestination, error, ) { @@ -115,6 +117,14 @@ func (v *VSHandler) ReconcileRD( return nil, err } + if v.vrgInAdminNamespace { + // copy th secret to the namespace where the PVC is + err = v.copySecretToPVCNamespace(pskSecretName, util.ProtectedPVCNamespacedName(rdSpec.ProtectedPVC)) + if err != nil { + return nil, err + } + } + // Check if a ReplicationSource is still here (Can happen if transitioning from primary to secondary) // Before creating a new RD for this PVC, make sure any ReplicationSource for this PVC is cleaned up first // This avoids a scenario where we create an RD that immediately syncs with an RS that still exists locally @@ -260,7 +270,7 @@ func (v *VSHandler) isPVCInUseByNonRDPod(pvcNamespacedName types.NamespacedName) // Callers should assume getting a nil replication source back means they should retry/requeue. // Returns true/false if final sync is complete, and also returns an RS if one was reconciled. // -//nolint:cyclop +//nolint:cyclop,funlen func (v *VSHandler) ReconcileRS(rsSpec ramendrv1alpha1.VolSyncReplicationSourceSpec, runFinalSync bool) (bool /* finalSyncComplete */, *volsyncv1alpha1.ReplicationSource, error, ) { @@ -281,6 +291,14 @@ func (v *VSHandler) ReconcileRS(rsSpec ramendrv1alpha1.VolSyncReplicationSourceS return false, nil, err } + if v.vrgInAdminNamespace { + // copy th secret to the namespace where the PVC is + err = v.copySecretToPVCNamespace(pskSecretName, util.ProtectedPVCNamespacedName(rsSpec.ProtectedPVC)) + if err != nil { + return false, nil, err + } + } + // Check if a ReplicationDestination is still here (Can happen if transitioning from secondary to primary) // Before creating a new RS for this PVC, make sure any ReplicationDestination for this PVC is cleaned up first // This avoids a scenario where we create an RS that immediately connects back to an RD that still exists locally @@ -631,6 +649,56 @@ func (v *VSHandler) validateSecretAndAddVRGOwnerRef(secretName string) (bool, er return true, nil } +func (v *VSHandler) copySecretToPVCNamespace(secretName string, pvcNamespacedName types.NamespacedName) error { + secret := &corev1.Secret{} + + err := v.client.Get(v.ctx, + types.NamespacedName{ + Name: secretName, + Namespace: pvcNamespacedName.Namespace, + }, secret) + if err != nil && !kerrors.IsNotFound(err) { + v.log.Error(err, "Failed to get secret", "secretName", secretName) + + return fmt.Errorf("error getting secret (%w)", err) + } + + if err == nil { + v.log.Info("Secret already exists in the PVC namespace", "secretName", secretName, "pvcNamespace", + pvcNamespacedName.Namespace) + + return nil + } + + v.log.Info("volsync secret not found in the pvc namespace, will create it", "secretName", secretName, + "pvcNamespace", pvcNamespacedName.Namespace) + + err = v.client.Get(v.ctx, + types.NamespacedName{ + Name: secretName, + Namespace: v.owner.GetNamespace(), + }, secret) + if err != nil { + return fmt.Errorf("error getting secret from the admin namespace (%w)", err) + } + + secretCopy := secret.DeepCopy() + + secretCopy.ObjectMeta = metav1.ObjectMeta{ + Name: secretName, + Namespace: pvcNamespacedName.Namespace, + Labels: secret.Labels, + Annotations: secret.Annotations, + } + + err = v.client.Create(v.ctx, secretCopy) + if err != nil { + return fmt.Errorf("error creating secret (%w)", err) + } + + return nil +} + func (v *VSHandler) getRS(name, namespace string) (*volsyncv1alpha1.ReplicationSource, error) { rs := &volsyncv1alpha1.ReplicationSource{} From 6057eb116455aa7a8a730422df046921774093da Mon Sep 17 00:00:00 2001 From: Raghavendra Talur Date: Sat, 18 May 2024 00:19:56 -0400 Subject: [PATCH 24/32] tests: fix tests for NewVSHandler Signed-off-by: Raghavendra Talur --- controllers/volsync/vshandler_test.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/controllers/volsync/vshandler_test.go b/controllers/volsync/vshandler_test.go index 6ffa0a105..b58cd9f3b 100644 --- a/controllers/volsync/vshandler_test.go +++ b/controllers/volsync/vshandler_test.go @@ -87,7 +87,7 @@ var _ = Describe("VolSync Handler - Volume Replication Class tests", func() { var vsHandler *volsync.VSHandler BeforeEach(func() { - vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, nil, asyncSpec, "none", "Snapshot") + vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, nil, asyncSpec, "none", "Snapshot", false) }) It("GetVolumeSnapshotClasses() should find all volume snapshot classes", func() { @@ -116,7 +116,7 @@ var _ = Describe("VolSync Handler - Volume Replication Class tests", func() { }, } - vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, nil, asyncSpec, "none", "Snapshot") + vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, nil, asyncSpec, "none", "Snapshot", false) }) It("GetVolumeSnapshotClasses() should find matching volume snapshot classes", func() { @@ -159,7 +159,7 @@ var _ = Describe("VolSync Handler - Volume Replication Class tests", func() { }, } - vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, nil, asyncSpec, "none", "Snapshot") + vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, nil, asyncSpec, "none", "Snapshot", false) }) It("GetVolumeSnapshotClasses() should find matching volume snapshot classes", func() { @@ -216,7 +216,7 @@ var _ = Describe("VolSync Handler - Volume Replication Class tests", func() { // Initialize a vshandler vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, nil, asyncSpec, - "openshift-storage.cephfs.csi.ceph.com", "Snapshot") + "openshift-storage.cephfs.csi.ceph.com", "Snapshot", false) }) JustBeforeEach(func() { @@ -431,7 +431,7 @@ var _ = Describe("VolSync_Handler", func() { Expect(ownerCm.GetName()).NotTo(BeEmpty()) owner = ownerCm - vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, owner, asyncSpec, "none", "Snapshot") + vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, owner, asyncSpec, "none", "Snapshot", false) }) AfterEach(func() { @@ -653,7 +653,7 @@ var _ = Describe("VolSync_Handler", func() { var vsHandler *volsync.VSHandler BeforeEach(func() { - vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, owner, asyncSpec, "none", "Direct") + vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, owner, asyncSpec, "none", "Direct", false) }) It("PrecreateDestPVCIfEnabled() should return CopyMethod Snapshot and App PVC name", func() { @@ -1461,7 +1461,7 @@ var _ = Describe("VolSync_Handler", func() { Expect(k8sClient.Create(ctx, otherOwnerCm)).To(Succeed()) Expect(otherOwnerCm.GetName()).NotTo(BeEmpty()) otherVSHandler := volsync.NewVSHandler(ctx, k8sClient, logger, otherOwnerCm, asyncSpec, - "none", "Snapshot") + "none", "Snapshot", false) for i := 0; i < 2; i++ { otherOwnerRdSpec := ramendrv1alpha1.VolSyncReplicationDestinationSpec{ @@ -1651,7 +1651,7 @@ var _ = Describe("VolSync_Handler", func() { Expect(k8sClient.Create(ctx, otherOwnerCm)).To(Succeed()) Expect(otherOwnerCm.GetName()).NotTo(BeEmpty()) otherVSHandler := volsync.NewVSHandler(ctx, k8sClient, logger, otherOwnerCm, asyncSpec, - "none", "Snapshot") + "none", "Snapshot", false) for i := 0; i < 2; i++ { otherOwnerRsSpec := ramendrv1alpha1.VolSyncReplicationSourceSpec{ From 7d26db371f0311de2d50f138aaa426cf5ac67308 Mon Sep 17 00:00:00 2001 From: Raghavendra Talur Date: Sat, 18 May 2024 01:17:21 -0400 Subject: [PATCH 25/32] tests: fix test for TakePVCOwnership Signed-off-by: Raghavendra Talur --- controllers/volsync/vshandler_test.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/controllers/volsync/vshandler_test.go b/controllers/volsync/vshandler_test.go index b58cd9f3b..c0ec9bfbd 100644 --- a/controllers/volsync/vshandler_test.go +++ b/controllers/volsync/vshandler_test.go @@ -1764,7 +1764,11 @@ var _ = Describe("VolSync_Handler", func() { Describe("Prepare PVC for final sync", func() { Context("When the PVC does not exist", func() { It("Should assume preparationForFinalSync is complete", func() { - pvcPreparationComplete, err := vsHandler.TakePVCOwnership("this-pvc-does-not-exist") + pvcNamespacedName := types.NamespacedName{ + Name: "this-pvc-does-not-exist", + Namespace: testNamespace.GetName(), + } + pvcPreparationComplete, err := vsHandler.TakePVCOwnership(pvcNamespacedName) Expect(err).To(HaveOccurred()) Expect(kerrors.IsNotFound(err)).To(BeTrue()) Expect(pvcPreparationComplete).To(BeFalse()) @@ -1791,7 +1795,12 @@ var _ = Describe("VolSync_Handler", func() { var pvcPreparationErr error JustBeforeEach(func() { - pvcPreparationComplete, pvcPreparationErr = vsHandler.TakePVCOwnership(testPVC.GetName()) + pvcNamespacedName := types.NamespacedName{ + Name: testPVC.GetName(), + Namespace: testPVC.GetNamespace(), + } + + pvcPreparationComplete, pvcPreparationErr = vsHandler.TakePVCOwnership(pvcNamespacedName) // In all cases at this point we should expect that the PVC has ownership taken over by our owner VRG Eventually(func() bool { From a5e356f4f4acca682ba1a9dab5fc5d2fa4f4f072 Mon Sep 17 00:00:00 2001 From: Raghavendra Talur Date: Sat, 18 May 2024 03:10:14 -0400 Subject: [PATCH 26/32] tests: make sure protected pvc in test have ns Signed-off-by: Raghavendra Talur --- controllers/volsync/vshandler_test.go | 10 ++++++++++ controllers/vrg_volsync_test.go | 2 ++ 2 files changed, 12 insertions(+) diff --git a/controllers/volsync/vshandler_test.go b/controllers/volsync/vshandler_test.go index c0ec9bfbd..001bc6f98 100644 --- a/controllers/volsync/vshandler_test.go +++ b/controllers/volsync/vshandler_test.go @@ -465,6 +465,7 @@ var _ = Describe("VolSync_Handler", func() { JustBeforeEach(func() { // Run ReconcileRD var err error + rdSpec.ProtectedPVC.Namespace = testNamespace.GetName() returnedRD, err = vsHandler.ReconcileRD(rdSpec) Expect(err).ToNot(HaveOccurred()) }) @@ -483,6 +484,7 @@ var _ = Describe("VolSync_Handler", func() { Context("When the psk secret for volsync exists (will be pushed down by drpc from hub", func() { var dummyPSKSecret *corev1.Secret JustBeforeEach(func() { + rdSpec.ProtectedPVC.Namespace = testNamespace.GetName() // Create a dummy volsync psk secret so the reconcile can proceed properly dummyPSKSecret = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -653,6 +655,7 @@ var _ = Describe("VolSync_Handler", func() { var vsHandler *volsync.VSHandler BeforeEach(func() { + rdSpec.ProtectedPVC.Namespace = testNamespace.GetName() vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, owner, asyncSpec, "none", "Direct", false) }) @@ -703,6 +706,7 @@ var _ = Describe("VolSync_Handler", func() { // Run ReconcileRD var err error var finalSyncCompl bool + rsSpec.ProtectedPVC.Namespace = testNamespace.GetName() finalSyncCompl, returnedRS, err = vsHandler.ReconcileRS(rsSpec, false) Expect(err).ToNot(HaveOccurred()) Expect(finalSyncCompl).To(BeFalse()) @@ -722,6 +726,7 @@ var _ = Describe("VolSync_Handler", func() { Context("When the psk secret for volsync exists (will be pushed down by drpc from hub", func() { var dummyPSKSecret *corev1.Secret JustBeforeEach(func() { + rsSpec.ProtectedPVC.Namespace = testNamespace.GetName() // Create a dummy volsync psk secret so the reconcile can proceed properly dummyPSKSecret = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -1102,6 +1107,7 @@ var _ = Describe("VolSync_Handler", func() { rdSpec = ramendrv1alpha1.VolSyncReplicationDestinationSpec{ ProtectedPVC: ramendrv1alpha1.ProtectedPVC{ Name: pvcName, + Namespace: testNamespace.GetName(), ProtectedByVolSync: true, StorageClassName: &testStorageClassName, Resources: corev1.VolumeResourceRequirements{ @@ -1438,6 +1444,7 @@ var _ = Describe("VolSync_Handler", func() { rdSpec := ramendrv1alpha1.VolSyncReplicationDestinationSpec{ ProtectedPVC: ramendrv1alpha1.ProtectedPVC{ Name: pvcNamePrefix + strconv.Itoa(i), + Namespace: testNamespace.GetName(), ProtectedByVolSync: true, StorageClassName: &testStorageClassName, Resources: corev1.VolumeResourceRequirements{ @@ -1467,6 +1474,7 @@ var _ = Describe("VolSync_Handler", func() { otherOwnerRdSpec := ramendrv1alpha1.VolSyncReplicationDestinationSpec{ ProtectedPVC: ramendrv1alpha1.ProtectedPVC{ Name: pvcNamePrefixOtherOwner + strconv.Itoa(i), + Namespace: testNamespace.GetName(), ProtectedByVolSync: true, StorageClassName: &testStorageClassName, Resources: corev1.VolumeResourceRequirements{ @@ -1632,6 +1640,7 @@ var _ = Describe("VolSync_Handler", func() { rsSpec := ramendrv1alpha1.VolSyncReplicationSourceSpec{ ProtectedPVC: ramendrv1alpha1.ProtectedPVC{ Name: pvcNamePrefix + strconv.Itoa(i), + Namespace: testNamespace.GetName(), ProtectedByVolSync: true, StorageClassName: &testStorageClassName, }, @@ -1657,6 +1666,7 @@ var _ = Describe("VolSync_Handler", func() { otherOwnerRsSpec := ramendrv1alpha1.VolSyncReplicationSourceSpec{ ProtectedPVC: ramendrv1alpha1.ProtectedPVC{ Name: pvcNamePrefixOtherOwner + strconv.Itoa(i), + Namespace: testNamespace.GetName(), ProtectedByVolSync: true, StorageClassName: &testStorageClassName, }, diff --git a/controllers/vrg_volsync_test.go b/controllers/vrg_volsync_test.go index c358e4ef6..b9cc20576 100644 --- a/controllers/vrg_volsync_test.go +++ b/controllers/vrg_volsync_test.go @@ -281,6 +281,7 @@ var _ = Describe("VolumeReplicationGroupVolSyncController", func() { { ProtectedPVC: ramendrv1alpha1.ProtectedPVC{ Name: "testingpvc-a", + Namespace: testNamespace.GetName(), ProtectedByVolSync: true, StorageClassName: &storageClassName, AccessModes: testAccessModes, @@ -291,6 +292,7 @@ var _ = Describe("VolumeReplicationGroupVolSyncController", func() { { ProtectedPVC: ramendrv1alpha1.ProtectedPVC{ Name: "testingpvc-b", + Namespace: testNamespace.GetName(), ProtectedByVolSync: true, StorageClassName: &storageClassName, AccessModes: testAccessModes, From d93ac1f556b195d88ed93581f2cfa6d70ebb1e40 Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Wed, 22 May 2024 15:05:58 +0300 Subject: [PATCH 27/32] Fail e2e workflow if tests failed Now that that we have basic test running, we want to fail the workflow if the tests failed. Without this people assumes code changes are passed the tests. Signed-off-by: Nir Soffer --- e2e/e2e-rdr.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/e2e/e2e-rdr.sh b/e2e/e2e-rdr.sh index 7d2320204..862da3528 100755 --- a/e2e/e2e-rdr.sh +++ b/e2e/e2e-rdr.sh @@ -6,5 +6,3 @@ echo "Running tests..." go test -kubeconfig-c1 ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-dr1 -kubeconfig-c2 ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-dr2 -kubeconfig-hub ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-hub -timeout 0 -v "$@" - -exit 0 From 691062c763c25f3714f514439f1fb8401643b630 Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Wed, 22 May 2024 14:15:07 +0300 Subject: [PATCH 28/32] Don't cleanup nil request kubeObjectsRecoveryStartOrResume() error handling is very confusing - the code tries to avoid duplicating error handling in 2 unrelated code paths (ok=true, ok=false), leading to referencing a nil request when ok is false. We need to refactor this later, for now just skip cleanup if there is nothing to cleanup. Bug: https://bugzilla.redhat.com/2282284 Signed-off-by: Nir Soffer --- controllers/vrg_kubeobjects.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/controllers/vrg_kubeobjects.go b/controllers/vrg_kubeobjects.go index ef748a3af..2241fb8c5 100644 --- a/controllers/vrg_kubeobjects.go +++ b/controllers/vrg_kubeobjects.go @@ -609,7 +609,10 @@ func (v *VRGInstance) kubeObjectsRecoveryStartOrResume( } log1.Error(err, "Kube objects group recover error") - cleanup(request) + + if ok { + cleanup(request) + } result.Requeue = true From 4c943bd8293c198154cad210bd102266c7fc25d1 Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Tue, 28 May 2024 17:24:46 +0300 Subject: [PATCH 29/32] Upgrade kubevirt to 1.2.1 Consume the latest release instead of the temporary build from my private drenv-addons repo. Update the cache file name to force a cache refresh. Signed-off-by: Nir Soffer --- docs/user-quick-start.md | 2 +- test/README.md | 2 +- test/addons/kubevirt/cr/kustomization.yaml | 2 +- test/addons/kubevirt/fetch | 4 ++-- test/addons/kubevirt/operator/kustomization.yaml | 3 +-- test/addons/kubevirt/start | 4 ++-- 6 files changed, 8 insertions(+), 9 deletions(-) diff --git a/docs/user-quick-start.md b/docs/user-quick-start.md index fedde1f11..79a73797c 100644 --- a/docs/user-quick-start.md +++ b/docs/user-quick-start.md @@ -152,7 +152,7 @@ enough resources: 1. Install the `virtctl` tool. ``` - curl -L -o virtctl https://github.com/kubevirt/kubevirt/releases/download/v1.2.0/virtctl-v1.2.0-linux-amd64 + curl -L -o virtctl https://github.com/kubevirt/kubevirt/releases/download/v1.2.1/virtctl-v1.2.1-linux-amd64 sudo install virtctl /usr/local/bin rm virtctl ``` diff --git a/test/README.md b/test/README.md index 8965a6a49..f6a2ab06d 100644 --- a/test/README.md +++ b/test/README.md @@ -60,7 +60,7 @@ environment. 1. Install the `virtctl` tool ``` - curl -L -o virtctl https://github.com/kubevirt/kubevirt/releases/download/v1.2.0/virtctl-v1.2.0-linux-amd64 + curl -L -o virtctl https://github.com/kubevirt/kubevirt/releases/download/v1.2.1/virtctl-v1.2.1-linux-amd64 sudo install virtctl /usr/local/bin rm virtctl ``` diff --git a/test/addons/kubevirt/cr/kustomization.yaml b/test/addons/kubevirt/cr/kustomization.yaml index c1817be8f..ac193e0f3 100644 --- a/test/addons/kubevirt/cr/kustomization.yaml +++ b/test/addons/kubevirt/cr/kustomization.yaml @@ -4,7 +4,7 @@ # yamllint disable rule:line-length --- resources: - - https://raw.githubusercontent.com/nirs/drenv-addons/main/kubevirt/9c3a52d67c6782cd48ae0702df5493f5023a04cc/kubevirt-cr.yaml + - https://github.com/kubevirt/kubevirt/releases/download/v1.2.1/kubevirt-cr.yaml patches: # Incrase certificate duration to avoid certificates renewals while a cluster # is suspended and resumed. diff --git a/test/addons/kubevirt/fetch b/test/addons/kubevirt/fetch index a7fd5cba5..4fe101c4f 100755 --- a/test/addons/kubevirt/fetch +++ b/test/addons/kubevirt/fetch @@ -9,8 +9,8 @@ from drenv import cache os.chdir(os.path.dirname(__file__)) -path = cache.path("addons/kubevirt-operator.yaml") +path = cache.path("addons/kubevirt-operator-1.2.1.yaml") cache.fetch("operator", path) -path = cache.path("addons/kubevirt-cr.yaml") +path = cache.path("addons/kubevirt-cr-1.2.1.yaml") cache.fetch("cr", path) diff --git a/test/addons/kubevirt/operator/kustomization.yaml b/test/addons/kubevirt/operator/kustomization.yaml index 43483f12a..ab36b7f44 100644 --- a/test/addons/kubevirt/operator/kustomization.yaml +++ b/test/addons/kubevirt/operator/kustomization.yaml @@ -4,5 +4,4 @@ # yamllint disable rule:line-length --- resources: - # TODO: use latest release. - - https://raw.githubusercontent.com/nirs/drenv-addons/main/kubevirt/9c3a52d67c6782cd48ae0702df5493f5023a04cc/kubevirt-operator.yaml + - https://github.com/kubevirt/kubevirt/releases/download/v1.2.1/kubevirt-operator.yaml diff --git a/test/addons/kubevirt/start b/test/addons/kubevirt/start index 981df4585..5346ef1d0 100755 --- a/test/addons/kubevirt/start +++ b/test/addons/kubevirt/start @@ -14,7 +14,7 @@ NAMESPACE = "kubevirt" def deploy(cluster): print("Deploying kubevirt operator") - path = cache.path("addons/kubevirt-operator.yaml") + path = cache.path("addons/kubevirt-operator-1.2.1.yaml") cache.fetch("operator", path) kubectl.apply("--filename", path, context=cluster) @@ -28,7 +28,7 @@ def deploy(cluster): ) print("Deploying kubevirt cr") - path = cache.path("addons/kubevirt-cr.yaml") + path = cache.path("addons/kubevirt-cr-1.2.1.yaml") cache.fetch("cr", path) kubectl.apply("--filename", path, context=cluster) From 78f3c3f235216dc1fcbeafc61f9c883ec787baaf Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Tue, 28 May 2024 17:31:57 +0300 Subject: [PATCH 30/32] Upgrade CDI to 1.59.0 We want to test with the latest release to find issues early. Updating the cache name to force a refresh. Signed-off-by: Nir Soffer --- test/addons/cdi/cr/kustomization.yaml | 2 +- test/addons/cdi/fetch | 4 ++-- test/addons/cdi/operator/kustomization.yaml | 2 +- test/addons/cdi/start | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/test/addons/cdi/cr/kustomization.yaml b/test/addons/cdi/cr/kustomization.yaml index de94a7bac..7353c111f 100644 --- a/test/addons/cdi/cr/kustomization.yaml +++ b/test/addons/cdi/cr/kustomization.yaml @@ -4,7 +4,7 @@ # yamllint disable rule:line-length --- resources: - - https://github.com/kubevirt/containerized-data-importer/releases/download/v1.58.3/cdi-cr.yaml + - https://github.com/kubevirt/containerized-data-importer/releases/download/v1.59.0/cdi-cr.yaml patches: # Allow pulling from local insecure registry. - target: diff --git a/test/addons/cdi/fetch b/test/addons/cdi/fetch index 59036749b..cc4ecae15 100755 --- a/test/addons/cdi/fetch +++ b/test/addons/cdi/fetch @@ -9,8 +9,8 @@ from drenv import cache os.chdir(os.path.dirname(__file__)) -path = cache.path("addons/cdi-operator.yaml") +path = cache.path("addons/cdi-operator-1.59.0.yaml") cache.fetch("operator", path) -path = cache.path("addons/cdi-cr.yaml") +path = cache.path("addons/cdi-cr-1.59.0.yaml") cache.fetch("cr", path) diff --git a/test/addons/cdi/operator/kustomization.yaml b/test/addons/cdi/operator/kustomization.yaml index 2bc536aff..2ba25dc0b 100644 --- a/test/addons/cdi/operator/kustomization.yaml +++ b/test/addons/cdi/operator/kustomization.yaml @@ -4,4 +4,4 @@ # yamllint disable rule:line-length --- resources: - - https://github.com/kubevirt/containerized-data-importer/releases/download/v1.58.3/cdi-operator.yaml + - https://github.com/kubevirt/containerized-data-importer/releases/download/v1.59.0/cdi-operator.yaml diff --git a/test/addons/cdi/start b/test/addons/cdi/start index 9407936bd..6c4f63443 100755 --- a/test/addons/cdi/start +++ b/test/addons/cdi/start @@ -14,7 +14,7 @@ NAMESPACE = "cdi" def deploy(cluster): print("Deploying cdi operator") - path = cache.path("addons/cdi-operator.yaml") + path = cache.path("addons/cdi-operator-1.59.0.yaml") cache.fetch("operator", path) kubectl.apply("--filename", path, context=cluster) @@ -28,7 +28,7 @@ def deploy(cluster): ) print("Deploying cdi cr") - path = cache.path("addons/cdi-cr.yaml") + path = cache.path("addons/cdi-cr-1.59.0.yaml") cache.fetch("cr", path) kubectl.apply("--filename", path, context=cluster) From d35ecf047d14a35a2ad9ff1dd18261453935544b Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Wed, 15 May 2024 21:57:23 +0300 Subject: [PATCH 31/32] Use single replica in virt-operator We don't need HA in a testing environment. Use single replica for the virt-operator deployment. Signed-off-by: Nir Soffer --- test/addons/kubevirt/operator/kustomization.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/test/addons/kubevirt/operator/kustomization.yaml b/test/addons/kubevirt/operator/kustomization.yaml index ab36b7f44..ba14faf3e 100644 --- a/test/addons/kubevirt/operator/kustomization.yaml +++ b/test/addons/kubevirt/operator/kustomization.yaml @@ -5,3 +5,12 @@ --- resources: - https://github.com/kubevirt/kubevirt/releases/download/v1.2.1/kubevirt-operator.yaml +patches: + # Use single replica to minimize resource usage. + - target: + kind: Deployment + name: virt-operator + patch: |- + - op: replace + path: /spec/replicas + value: 1 From 999e108e4b94b6b89f8eb12f41d0fbb4fbfe95d6 Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Wed, 15 May 2024 22:05:57 +0300 Subject: [PATCH 32/32] Use single replica for virt-controller To minimize resource usage in our testing environment. Signed-off-by: Nir Soffer --- test/addons/kubevirt/cr/kustomization.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/test/addons/kubevirt/cr/kustomization.yaml b/test/addons/kubevirt/cr/kustomization.yaml index ac193e0f3..c7af8c96e 100644 --- a/test/addons/kubevirt/cr/kustomization.yaml +++ b/test/addons/kubevirt/cr/kustomization.yaml @@ -23,3 +23,15 @@ patches: duration: 168h server: duration: 168h + # Use single replica to minimize resource usage. + - target: + kind: KubeVirt + name: kubevirt + patch: |- + apiVersion: kubevirt.io/v1 + kind: Kubevirt + metadata: + name: not-used + spec: + infra: + replicas: 1