From 3da821e7a53017b140f59b92e8a1247780004f3d Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Wed, 30 Oct 2024 19:20:14 +0200 Subject: [PATCH] Use per-test logger for more clear logs We used to log everything using the global logger. This makes the log messy and hard to follow, since we run 5 dr flows concurrently. This will be worse as we add new workloads (e.g. flattened pvcs, cloned pvcs, volsync with block pvc). We use now per-test logger created using the name of the workload (e.g. disapp-deploy-rbd-busybox). Every log message from test flow start with this name. The rest of the log does not specify resources names since we use the same name for all resources (drpc, placement, subscription, etc). Clean up log messages, replacing noisy text with simple description of the event. More work is needed, we still log too much uninteresting details, and do no not log what we wait for, but this is already much easier to follow. Example log with this change: === RUN TestSuites/Exhaustive/Deploy-rbd/Subscr/Enable 2024-10-31T00:37:41.913+0200 INFO subscr-deploy-rbd-busybox Protecting workload 2024-10-31T00:37:41.913+0200 INFO subscr-deploy-cephfs-busybox Protecting workload 2024-10-31T00:37:41.915+0200 INFO appset-deploy-rbd-busybox Workload running on dr2 2024-10-31T00:37:41.915+0200 INFO appset-deploy-rbd-busybox Annotating placement 2024-10-31T00:37:41.917+0200 INFO subscr-deploy-rbd-busybox Workload running on dr2 2024-10-31T00:37:41.917+0200 INFO subscr-deploy-rbd-busybox Annotating placement 2024-10-31T00:37:41.918+0200 INFO subscr-deploy-cephfs-busybox Workload running on dr1 2024-10-31T00:37:41.918+0200 INFO subscr-deploy-cephfs-busybox Annotating placement 2024-10-31T00:37:41.926+0200 INFO appset-deploy-rbd-busybox Creating drpc 2024-10-31T00:37:41.930+0200 INFO subscr-deploy-rbd-busybox Creating drpc 2024-10-31T00:37:41.932+0200 INFO subscr-deploy-cephfs-busybox Creating drpc 2024-10-31T00:38:52.038+0200 INFO appset-deploy-cephfs-busybox drpc is ready Fixes: #1597 Signed-off-by: Nir Soffer --- e2e/deployers/applicationset.go | 22 ++++++----- e2e/deployers/crud.go | 47 ++++++++++++------------ e2e/deployers/discoveredapps.go | 30 ++++++++------- e2e/deployers/retry.go | 13 ++++--- e2e/deployers/subscription.go | 20 +++++----- e2e/dractions/actions.go | 49 ++++++++++++++----------- e2e/dractions/actionsdiscoveredapps.go | 51 ++++++++++++++------------ e2e/dractions/crud.go | 5 ++- e2e/dractions/retry.go | 31 +++++++++------- e2e/util/crud.go | 19 +++++----- e2e/workloads/deployment.go | 6 +-- e2e/workloads/workload.go | 7 +++- 12 files changed, 164 insertions(+), 136 deletions(-) diff --git a/e2e/deployers/applicationset.go b/e2e/deployers/applicationset.go index 74a1a9fc4..f3cc2e3bd 100644 --- a/e2e/deployers/applicationset.go +++ b/e2e/deployers/applicationset.go @@ -13,25 +13,26 @@ type ApplicationSet struct{} func (a ApplicationSet) Deploy(w workloads.Workload) error { name := GetCombinedName(a, w) namespace := util.ArgocdNamespace + log := util.Ctx.Log.WithName(name) - util.Ctx.Log.Info("enter Deploy " + name) + log.Info("Deploying workload") err := CreateManagedClusterSetBinding(McsbName, namespace) if err != nil { return err } - err = CreatePlacement(name, namespace) + err = CreatePlacement(name, namespace, log) if err != nil { return err } - err = CreatePlacementDecisionConfigMap(name, namespace) + err = CreatePlacementDecisionConfigMap(name, namespace, log) if err != nil { return err } - err = CreateApplicationSet(a, w) + err = CreateApplicationSet(a, w, log) if err != nil { return err } @@ -42,33 +43,34 @@ func (a ApplicationSet) Deploy(w workloads.Workload) error { func (a ApplicationSet) Undeploy(w workloads.Workload) error { name := GetCombinedName(a, w) namespace := util.ArgocdNamespace + log := util.Ctx.Log.WithName(name) - util.Ctx.Log.Info("enter Undeploy " + name) + log.Info("Undeploying workload") - err := DeleteApplicationSet(a, w) + err := DeleteApplicationSet(a, w, log) if err != nil { return err } - err = DeleteConfigMap(name, namespace) + err = DeleteConfigMap(name, namespace, log) if err != nil { return err } - err = DeletePlacement(name, namespace) + err = DeletePlacement(name, namespace, log) if err != nil { return err } // multiple appsets could use the same mcsb in argocd ns. // so delete mcsb if only 1 appset is in argocd ns - lastAppset, err := isLastAppsetInArgocdNs(namespace) + lastAppset, err := isLastAppsetInArgocdNs(namespace, log) if err != nil { return err } if lastAppset { - err = DeleteManagedClusterSetBinding(McsbName, namespace) + err = DeleteManagedClusterSetBinding(McsbName, namespace, log) if err != nil { return err } diff --git a/e2e/deployers/crud.go b/e2e/deployers/crud.go index 9857f7f97..3dca636c6 100644 --- a/e2e/deployers/crud.go +++ b/e2e/deployers/crud.go @@ -11,6 +11,7 @@ import ( "os/exec" "strings" + "github.com/go-logr/logr" "github.com/ramendr/ramen/e2e/util" "github.com/ramendr/ramen/e2e/workloads" "k8s.io/apimachinery/pkg/api/errors" @@ -58,7 +59,7 @@ func CreateManagedClusterSetBinding(name, namespace string) error { return nil } -func DeleteManagedClusterSetBinding(name, namespace string) error { +func DeleteManagedClusterSetBinding(name, namespace string, log logr.Logger) error { mcsb := &ocmv1b2.ManagedClusterSetBinding{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -72,13 +73,13 @@ func DeleteManagedClusterSetBinding(name, namespace string) error { return err } - util.Ctx.Log.Info("managedClusterSetBinding " + name + " not found") + log.Info("ManagedClusterSetBinding " + name + " not found") } return nil } -func CreatePlacement(name, namespace string) error { +func CreatePlacement(name, namespace string, log logr.Logger) error { labels := make(map[string]string) labels[AppLabelKey] = name clusterSet := []string{ClusterSetName} @@ -102,13 +103,13 @@ func CreatePlacement(name, namespace string) error { return err } - util.Ctx.Log.Info("placement " + placement.Name + " already Exists") + log.Info("Placement already Exists") } return nil } -func DeletePlacement(name, namespace string) error { +func DeletePlacement(name, namespace string, log logr.Logger) error { placement := &ocmv1b1.Placement{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -122,13 +123,13 @@ func DeletePlacement(name, namespace string) error { return err } - util.Ctx.Log.Info("placement " + name + " not found") + log.Info("Placement not found") } return nil } -func CreateSubscription(s Subscription, w workloads.Workload) error { +func CreateSubscription(s Subscription, w workloads.Workload, log logr.Logger) error { name := GetCombinedName(s, w) namespace := name @@ -173,18 +174,18 @@ func CreateSubscription(s Subscription, w workloads.Workload) error { err := util.Ctx.Hub.CtrlClient.Create(context.Background(), subscription) if err != nil { if !errors.IsAlreadyExists(err) { - util.Ctx.Log.Info(fmt.Sprintf("create subscription with error: %v", err)) + log.Info(fmt.Sprintf("create subscription with error: %v", err)) return err } - util.Ctx.Log.Info("subscription " + subscription.Name + " already Exists") + log.Info("Subscription already Exists") } return nil } -func DeleteSubscription(s Subscription, w workloads.Workload) error { +func DeleteSubscription(s Subscription, w workloads.Workload, log logr.Logger) error { name := GetCombinedName(s, w) namespace := name @@ -201,7 +202,7 @@ func DeleteSubscription(s Subscription, w workloads.Workload) error { return err } - util.Ctx.Log.Info("subscription " + name + " not found") + log.Info("Subscription not found") } return nil @@ -237,7 +238,7 @@ func getSubscription(client client.Client, namespace, name string) (*subscriptio return subscription, nil } -func CreatePlacementDecisionConfigMap(cmName string, cmNamespace string) error { +func CreatePlacementDecisionConfigMap(cmName string, cmNamespace string, log logr.Logger) error { object := metav1.ObjectMeta{Name: cmName, Namespace: cmNamespace} data := map[string]string{ @@ -255,13 +256,13 @@ func CreatePlacementDecisionConfigMap(cmName string, cmNamespace string) error { return fmt.Errorf("could not create configMap " + cmName) } - util.Ctx.Log.Info("configMap " + cmName + " already Exists") + log.Info("ConfigMap " + cmName + " already Exists") } return nil } -func DeleteConfigMap(cmName string, cmNamespace string) error { +func DeleteConfigMap(cmName string, cmNamespace string, log logr.Logger) error { object := metav1.ObjectMeta{Name: cmName, Namespace: cmNamespace} configMap := &corev1.ConfigMap{ @@ -274,14 +275,14 @@ func DeleteConfigMap(cmName string, cmNamespace string) error { return fmt.Errorf("could not delete configMap " + cmName) } - util.Ctx.Log.Info("configMap " + cmName + " not found") + log.Info("ConfigMap " + cmName + " not found") } return nil } // nolint:funlen -func CreateApplicationSet(a ApplicationSet, w workloads.Workload) error { +func CreateApplicationSet(a ApplicationSet, w workloads.Workload, log logr.Logger) error { var requeueSeconds int64 = 180 name := GetCombinedName(a, w) @@ -353,13 +354,13 @@ func CreateApplicationSet(a ApplicationSet, w workloads.Workload) error { return err } - util.Ctx.Log.Info("applicationset " + appset.Name + " already Exists") + log.Info("Applicationset already Exists") } return nil } -func DeleteApplicationSet(a ApplicationSet, w workloads.Workload) error { +func DeleteApplicationSet(a ApplicationSet, w workloads.Workload, log logr.Logger) error { name := GetCombinedName(a, w) namespace := util.ArgocdNamespace @@ -376,20 +377,20 @@ func DeleteApplicationSet(a ApplicationSet, w workloads.Workload) error { return err } - util.Ctx.Log.Info("applicationset " + appset.Name + " not found") + log.Info("Applicationset not found") } return nil } // check if only the last appset is in the argocd namespace -func isLastAppsetInArgocdNs(namespace string) (bool, error) { +func isLastAppsetInArgocdNs(namespace string, log logr.Logger) (bool, error) { appsetList := &argocdv1alpha1hack.ApplicationSetList{} err := util.Ctx.Hub.CtrlClient.List( context.Background(), appsetList, client.InNamespace(namespace)) if err != nil { - util.Ctx.Log.Info("error in getting application sets") + log.Info("Failed to get application sets") return false, err } @@ -397,7 +398,7 @@ func isLastAppsetInArgocdNs(namespace string) (bool, error) { return len(appsetList.Items) == 1, nil } -func DeleteDiscoveredApps(w workloads.Workload, namespace, cluster string) error { +func DeleteDiscoveredApps(w workloads.Workload, namespace, cluster string, log logr.Logger) error { tempDir, err := os.MkdirTemp("", "ramen-") if err != nil { return err @@ -415,7 +416,7 @@ func DeleteDiscoveredApps(w workloads.Workload, namespace, cluster string) error // Run the command and capture the output if out, err := cmd.Output(); err != nil { - util.Ctx.Log.Info(string(out)) + log.Info(string(out)) return err } diff --git a/e2e/deployers/discoveredapps.go b/e2e/deployers/discoveredapps.go index 96b668de5..b7a4d8494 100644 --- a/e2e/deployers/discoveredapps.go +++ b/e2e/deployers/discoveredapps.go @@ -20,8 +20,9 @@ func (d DiscoveredApps) GetName() string { func (d DiscoveredApps) Deploy(w workloads.Workload) error { name := GetCombinedName(d, w) namespace := name + log := util.Ctx.Log.WithName(name) - util.Ctx.Log.Info("enter Deploy " + name) + log.Info("Deploying workload") // create namespace in both dr clusters if err := util.CreateNamespaceAndAddAnnotation(namespace); err != nil { @@ -50,16 +51,16 @@ func (d DiscoveredApps) Deploy(w workloads.Workload) error { // Run the command and capture the output if out, err := cmd.Output(); err != nil { - util.Ctx.Log.Info(string(out)) + log.Info(string(out)) return err } - if err = WaitWorkloadHealth(util.Ctx.C1.CtrlClient, namespace, w); err != nil { + if err = WaitWorkloadHealth(util.Ctx.C1.CtrlClient, namespace, w, log); err != nil { return err } - util.Ctx.Log.Info(name + " is deployed") + log.Info("Workload deployed") return nil } @@ -67,41 +68,42 @@ func (d DiscoveredApps) Deploy(w workloads.Workload) error { func (d DiscoveredApps) Undeploy(w workloads.Workload) error { name := GetCombinedName(d, w) namespace := name // this namespace is in dr clusters + log := util.Ctx.Log.WithName(name) - util.Ctx.Log.Info("enter Undeploy " + name) + log.Info("Undeploying workload") drpolicy, err := util.GetDRPolicy(util.Ctx.Hub.CtrlClient, util.DefaultDRPolicyName) if err != nil { return err } - util.Ctx.Log.Info("starts to delete discovered apps on " + drpolicy.Spec.DRClusters[0]) + log.Info("Deleting discovered apps on " + drpolicy.Spec.DRClusters[0]) // delete app on both clusters - if err := DeleteDiscoveredApps(w, namespace, drpolicy.Spec.DRClusters[0]); err != nil { + if err := DeleteDiscoveredApps(w, namespace, drpolicy.Spec.DRClusters[0], log); err != nil { return err } - util.Ctx.Log.Info("starts to delete discovered apps on " + drpolicy.Spec.DRClusters[1]) + log.Info("Deletting discovered apps on " + drpolicy.Spec.DRClusters[1]) - if err := DeleteDiscoveredApps(w, namespace, drpolicy.Spec.DRClusters[1]); err != nil { + if err := DeleteDiscoveredApps(w, namespace, drpolicy.Spec.DRClusters[1], log); err != nil { return err } - util.Ctx.Log.Info("starts to delete namespace " + namespace + " on " + drpolicy.Spec.DRClusters[0]) + log.Info("Deleting namespace " + namespace + " on " + drpolicy.Spec.DRClusters[0]) // delete namespace on both clusters - if err := util.DeleteNamespace(util.Ctx.C1.CtrlClient, namespace); err != nil { + if err := util.DeleteNamespace(util.Ctx.C1.CtrlClient, namespace, log); err != nil { return err } - util.Ctx.Log.Info("starts to delete namespace " + namespace + " on " + drpolicy.Spec.DRClusters[1]) + log.Info("Deleting namespace " + namespace + " on " + drpolicy.Spec.DRClusters[1]) - if err := util.DeleteNamespace(util.Ctx.C2.CtrlClient, namespace); err != nil { + if err := util.DeleteNamespace(util.Ctx.C2.CtrlClient, namespace, log); err != nil { return err } - util.Ctx.Log.Info(name + " is undeployed") + log.Info("Workload undeployed") return nil } diff --git a/e2e/deployers/retry.go b/e2e/deployers/retry.go index 34ab78025..a5911afc2 100644 --- a/e2e/deployers/retry.go +++ b/e2e/deployers/retry.go @@ -7,13 +7,14 @@ import ( "fmt" "time" + "github.com/go-logr/logr" "github.com/ramendr/ramen/e2e/util" "github.com/ramendr/ramen/e2e/workloads" subscriptionv1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) -func waitSubscriptionPhase(namespace, name string, phase subscriptionv1.SubscriptionPhase) error { +func waitSubscriptionPhase(namespace, name string, phase subscriptionv1.SubscriptionPhase, log logr.Logger) error { startTime := time.Now() for { @@ -24,7 +25,7 @@ func waitSubscriptionPhase(namespace, name string, phase subscriptionv1.Subscrip currentPhase := sub.Status.Phase if currentPhase == phase { - util.Ctx.Log.Info(fmt.Sprintf("subscription %s phase is %s", name, phase)) + log.Info(fmt.Sprintf("Subscription phase is %s", phase)) return nil } @@ -37,19 +38,19 @@ func waitSubscriptionPhase(namespace, name string, phase subscriptionv1.Subscrip } } -func WaitWorkloadHealth(client client.Client, namespace string, w workloads.Workload) error { +func WaitWorkloadHealth(client client.Client, namespace string, w workloads.Workload, log logr.Logger) error { startTime := time.Now() for { - err := w.Health(client, namespace) + err := w.Health(client, namespace, log) if err == nil { - util.Ctx.Log.Info(fmt.Sprintf("workload %s is ready", w.GetName())) + log.Info("Workload is ready") return nil } if time.Since(startTime) > util.Timeout { - util.Ctx.Log.Info(err.Error()) + log.Info(err.Error()) return fmt.Errorf("workload %s is not ready yet before timeout of %v", w.GetName(), util.Timeout) diff --git a/e2e/deployers/subscription.go b/e2e/deployers/subscription.go index 9142a35e6..67284ed4d 100644 --- a/e2e/deployers/subscription.go +++ b/e2e/deployers/subscription.go @@ -27,8 +27,9 @@ func (s Subscription) Deploy(w workloads.Workload) error { // Address namespace/label/suffix as needed for various resources name := GetCombinedName(s, w) namespace := name + log := util.Ctx.Log.WithName(name) - util.Ctx.Log.Info("enter Deploy " + name) + log.Info("Deploying workload") // create subscription namespace err := util.CreateNamespace(util.Ctx.Hub.CtrlClient, namespace) @@ -41,42 +42,43 @@ func (s Subscription) Deploy(w workloads.Workload) error { return err } - err = CreatePlacement(name, namespace) + err = CreatePlacement(name, namespace, log) if err != nil { return err } - err = CreateSubscription(s, w) + err = CreateSubscription(s, w, log) if err != nil { return err } - return waitSubscriptionPhase(namespace, name, subscriptionv1.SubscriptionPropagated) + return waitSubscriptionPhase(namespace, name, subscriptionv1.SubscriptionPropagated, log) } // Delete Subscription, Placement, Binding func (s Subscription) Undeploy(w workloads.Workload) error { name := GetCombinedName(s, w) namespace := name + log := util.Ctx.Log.WithName(name) - util.Ctx.Log.Info("enter Undeploy " + name) + log.Info("Undeploying workload") - err := DeleteSubscription(s, w) + err := DeleteSubscription(s, w, log) if err != nil { return err } - err = DeletePlacement(name, namespace) + err = DeletePlacement(name, namespace, log) if err != nil { return err } - err = DeleteManagedClusterSetBinding(McsbName, namespace) + err = DeleteManagedClusterSetBinding(McsbName, namespace, log) if err != nil { return err } - return util.DeleteNamespace(util.Ctx.Hub.CtrlClient, namespace) + return util.DeleteNamespace(util.Ctx.Hub.CtrlClient, namespace, log) } func (s Subscription) IsWorkloadSupported(w workloads.Workload) bool { diff --git a/e2e/dractions/actions.go b/e2e/dractions/actions.go index bee5bb5f0..6e6168fb5 100644 --- a/e2e/dractions/actions.go +++ b/e2e/dractions/actions.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "github.com/go-logr/logr" ramen "github.com/ramendr/ramen/api/v1alpha1" "github.com/ramendr/ramen/e2e/deployers" "github.com/ramendr/ramen/e2e/util" @@ -35,8 +36,9 @@ func EnableProtection(w workloads.Workload, d deployers.Deployer) error { name := GetCombinedName(d, w) namespace := GetNamespace(d, w) + log := util.Ctx.Log.WithName(name) - util.Ctx.Log.Info("enter EnableProtection " + name) + log.Info("Protecting workload") drPolicyName := util.DefaultDRPolicyName appname := w.GetAppName() @@ -49,9 +51,9 @@ func EnableProtection(w workloads.Workload, d deployers.Deployer) error { } clusterName := placementDecision.Status.Decisions[0].ClusterName - util.Ctx.Log.Info("got clusterName " + clusterName + " from " + placementDecision.Name) + log.Info("Workload running on " + clusterName) - util.Ctx.Log.Info("update placement " + placementName + " annotation") + log.Info("Annotating placement") err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { placement, err := getPlacement(util.Ctx.Hub.CtrlClient, namespace, placementName) @@ -70,7 +72,7 @@ func EnableProtection(w workloads.Workload, d deployers.Deployer) error { return err } - util.Ctx.Log.Info("create drpc " + drpcName) + log.Info("Creating drpc") drpc := generateDRPC(name, namespace, clusterName, drPolicyName, placementName, appname) if err = createDRPC(util.Ctx.Hub.CtrlClient, drpc); err != nil { @@ -83,7 +85,7 @@ func EnableProtection(w workloads.Workload, d deployers.Deployer) error { return err } - return waitDRPCReady(util.Ctx.Hub.CtrlClient, namespace, drpcName) + return waitDRPCReady(util.Ctx.Hub.CtrlClient, namespace, drpcName, log) } // remove DRPC @@ -95,30 +97,33 @@ func DisableProtection(w workloads.Workload, d deployers.Deployer) error { name := GetCombinedName(d, w) namespace := GetNamespace(d, w) + log := util.Ctx.Log.WithName(name) - util.Ctx.Log.Info("enter DisableProtection " + name) + log.Info("Unprotecting workload") drpcName := name client := util.Ctx.Hub.CtrlClient - util.Ctx.Log.Info("delete drpc " + drpcName) + log.Info("Deleting drpc") if err := deleteDRPC(client, namespace, drpcName); err != nil { return err } - return waitDRPCDeleted(client, namespace, drpcName) + return waitDRPCDeleted(client, namespace, drpcName, log) } func Failover(w workloads.Workload, d deployers.Deployer) error { name := GetCombinedName(d, w) - util.Ctx.Log.Info("enter Failover " + name) + log := util.Ctx.Log.WithName(name) + + log.Info("Failing over workload") if _, isDiscoveredApps := d.(*deployers.DiscoveredApps); isDiscoveredApps { - return FailoverDiscoveredApps(w, d) + return FailoverDiscoveredApps(w, d, log) } - return failoverRelocate(w, d, ramen.ActionFailover) + return failoverRelocate(w, d, ramen.ActionFailover, log) } // Determine DRPC @@ -127,36 +132,38 @@ func Failover(w workloads.Workload, d deployers.Deployer) error { // Update DRPC func Relocate(w workloads.Workload, d deployers.Deployer) error { name := GetCombinedName(d, w) - util.Ctx.Log.Info("enter Relocate " + name) + log := util.Ctx.Log.WithName(name) + + log.Info("Relocating workload") if _, isDiscoveredApps := d.(*deployers.DiscoveredApps); isDiscoveredApps { - return RelocateDiscoveredApps(w, d) + return RelocateDiscoveredApps(w, d, log) } - return failoverRelocate(w, d, ramen.ActionRelocate) + return failoverRelocate(w, d, ramen.ActionRelocate, log) } -func failoverRelocate(w workloads.Workload, d deployers.Deployer, action ramen.DRAction) error { +func failoverRelocate(w workloads.Workload, d deployers.Deployer, action ramen.DRAction, log logr.Logger) error { name := GetCombinedName(d, w) namespace := GetNamespace(d, w) drpcName := name client := util.Ctx.Hub.CtrlClient - if err := waitAndUpdateDRPC(client, namespace, drpcName, action); err != nil { + if err := waitAndUpdateDRPC(client, namespace, drpcName, action, log); err != nil { return err } if action == ramen.ActionFailover { - return waitDRPC(client, namespace, name, ramen.FailedOver) + return waitDRPC(client, namespace, name, ramen.FailedOver, log) } - return waitDRPC(client, namespace, name, ramen.Relocated) + return waitDRPC(client, namespace, name, ramen.Relocated, log) } -func waitAndUpdateDRPC(client client.Client, namespace, drpcName string, action ramen.DRAction) error { +func waitAndUpdateDRPC(client client.Client, namespace, drpcName string, action ramen.DRAction, log logr.Logger) error { // here we expect drpc should be ready before action - if err := waitDRPCReady(client, namespace, drpcName); err != nil { + if err := waitDRPCReady(client, namespace, drpcName, log); err != nil { return err } @@ -172,7 +179,7 @@ func waitAndUpdateDRPC(client client.Client, namespace, drpcName string, action return err } - util.Ctx.Log.Info("update drpc " + drpcName + " " + strings.ToLower(string(action)) + " to " + targetCluster) + log.Info("Updating drpc " + strings.ToLower(string(action)) + " to " + targetCluster) return retry.RetryOnConflict(retry.DefaultBackoff, func() error { drpc, err := getDRPC(client, namespace, drpcName) diff --git a/e2e/dractions/actionsdiscoveredapps.go b/e2e/dractions/actionsdiscoveredapps.go index 0d28906d4..0cd47683b 100644 --- a/e2e/dractions/actionsdiscoveredapps.go +++ b/e2e/dractions/actionsdiscoveredapps.go @@ -4,6 +4,7 @@ package dractions import ( + "github.com/go-logr/logr" ramen "github.com/ramendr/ramen/api/v1alpha1" "github.com/ramendr/ramen/e2e/deployers" "github.com/ramendr/ramen/e2e/util" @@ -14,8 +15,9 @@ func EnableProtectionDiscoveredApps(w workloads.Workload, d deployers.Deployer) name := GetCombinedName(d, w) namespace := GetNamespace(d, w) // this namespace is in hub namespaceInDrCluster := name // this namespace is in dr clusters + log := util.Ctx.Log.WithName(name) - util.Ctx.Log.Info("enter EnableProtectionDiscoveredApps " + name) + log.Info("Protecting workload") drPolicyName := util.DefaultDRPolicyName appname := w.GetAppName() @@ -28,7 +30,7 @@ func EnableProtectionDiscoveredApps(w workloads.Workload, d deployers.Deployer) } // create placement - if err := createPlacementManagedByRamen(placementName, namespace); err != nil { + if err := createPlacementManagedByRamen(placementName, namespace, log); err != nil { return err } @@ -38,7 +40,7 @@ func EnableProtectionDiscoveredApps(w workloads.Workload, d deployers.Deployer) return err } - util.Ctx.Log.Info("create drpc " + drpcName) + log.Info("Creating drpc") clusterName := drpolicy.Spec.DRClusters[0] @@ -49,7 +51,7 @@ func EnableProtectionDiscoveredApps(w workloads.Workload, d deployers.Deployer) } // wait for drpc ready - return waitDRPCReady(util.Ctx.Hub.CtrlClient, namespace, drpcName) + return waitDRPCReady(util.Ctx.Hub.CtrlClient, namespace, drpcName, log) } // remove DRPC @@ -57,46 +59,49 @@ func EnableProtectionDiscoveredApps(w workloads.Workload, d deployers.Deployer) func DisableProtectionDiscoveredApps(w workloads.Workload, d deployers.Deployer) error { name := GetCombinedName(d, w) namespace := GetNamespace(d, w) // this namespace is in hub + log := util.Ctx.Log.WithName(name) - util.Ctx.Log.Info("enter DisableProtectionDiscoveredApps " + name) + log.Info("Unprotecting workload") placementName := name drpcName := name client := util.Ctx.Hub.CtrlClient - util.Ctx.Log.Info("delete drpc " + drpcName) + log.Info("Deleting drpc") if err := deleteDRPC(client, namespace, drpcName); err != nil { return err } - if err := waitDRPCDeleted(client, namespace, drpcName); err != nil { + if err := waitDRPCDeleted(client, namespace, drpcName, log); err != nil { return err } // delete placement - if err := deployers.DeletePlacement(placementName, namespace); err != nil { + if err := deployers.DeletePlacement(placementName, namespace, log); err != nil { return err } - return deployers.DeleteManagedClusterSetBinding(deployers.McsbName, namespace) + return deployers.DeleteManagedClusterSetBinding(deployers.McsbName, namespace, log) } -func FailoverDiscoveredApps(w workloads.Workload, d deployers.Deployer) error { - util.Ctx.Log.Info("enter DRActions FailoverDiscoveredApps") +func FailoverDiscoveredApps(w workloads.Workload, d deployers.Deployer, log logr.Logger) error { + log.Info("Failing over workload") - return failoverRelocateDiscoveredApps(w, d, ramen.ActionFailover) + return failoverRelocateDiscoveredApps(w, d, ramen.ActionFailover, log) } -func RelocateDiscoveredApps(w workloads.Workload, d deployers.Deployer) error { - util.Ctx.Log.Info("enter DRActions RelocateDiscoveredApps") +func RelocateDiscoveredApps(w workloads.Workload, d deployers.Deployer, log logr.Logger) error { + log.Info("Relocating workload") - return failoverRelocateDiscoveredApps(w, d, ramen.ActionRelocate) + return failoverRelocateDiscoveredApps(w, d, ramen.ActionRelocate, log) } // nolint:funlen,cyclop -func failoverRelocateDiscoveredApps(w workloads.Workload, d deployers.Deployer, action ramen.DRAction) error { +func failoverRelocateDiscoveredApps(w workloads.Workload, d deployers.Deployer, action ramen.DRAction, + log logr.Logger, +) error { name := GetCombinedName(d, w) namespace := GetNamespace(d, w) // this namespace is in hub namespaceInDrCluster := name // this namespace is in dr clusters @@ -121,30 +126,30 @@ func failoverRelocateDiscoveredApps(w workloads.Workload, d deployers.Deployer, return err } - if err := waitAndUpdateDRPC(client, namespace, drpcName, action); err != nil { + if err := waitAndUpdateDRPC(client, namespace, drpcName, action, log); err != nil { return err } - if err := waitDRPCProgression(client, namespace, name, ramen.ProgressionWaitOnUserToCleanUp); err != nil { + if err := waitDRPCProgression(client, namespace, name, ramen.ProgressionWaitOnUserToCleanUp, log); err != nil { return err } // delete pvc and deployment from dr cluster - util.Ctx.Log.Info("start to clean up discovered apps from " + currentCluster) + log.Info("Cleaning up discovered apps from " + currentCluster) - if err = deployers.DeleteDiscoveredApps(w, namespaceInDrCluster, currentCluster); err != nil { + if err = deployers.DeleteDiscoveredApps(w, namespaceInDrCluster, currentCluster, log); err != nil { return err } - if err = waitDRPCProgression(client, namespace, name, ramen.ProgressionCompleted); err != nil { + if err = waitDRPCProgression(client, namespace, name, ramen.ProgressionCompleted, log); err != nil { return err } - if err = waitDRPCReady(client, namespace, name); err != nil { + if err = waitDRPCReady(client, namespace, name, log); err != nil { return err } drClient := getDRClusterClient(targetCluster, drpolicy) - return deployers.WaitWorkloadHealth(drClient, namespaceInDrCluster, w) + return deployers.WaitWorkloadHealth(drClient, namespaceInDrCluster, w, log) } diff --git a/e2e/dractions/crud.go b/e2e/dractions/crud.go index b2867ae66..2ca8ec6a3 100644 --- a/e2e/dractions/crud.go +++ b/e2e/dractions/crud.go @@ -6,6 +6,7 @@ package dractions import ( "context" + "github.com/go-logr/logr" ramen "github.com/ramendr/ramen/api/v1alpha1" "github.com/ramendr/ramen/e2e/deployers" "github.com/ramendr/ramen/e2e/util" @@ -106,7 +107,7 @@ func generateDRPC(name, namespace, clusterName, drPolicyName, placementName, app return drpc } -func createPlacementManagedByRamen(name, namespace string) error { +func createPlacementManagedByRamen(name, namespace string, log logr.Logger) error { labels := make(map[string]string) labels[deployers.AppLabelKey] = name clusterSet := []string{"default"} @@ -133,7 +134,7 @@ func createPlacementManagedByRamen(name, namespace string) error { return err } - util.Ctx.Log.Info("placement " + placement.Name + " already Exists") + log.Info("Placement already Exists") } return nil diff --git a/e2e/dractions/retry.go b/e2e/dractions/retry.go index 1d30160ae..e55302540 100644 --- a/e2e/dractions/retry.go +++ b/e2e/dractions/retry.go @@ -8,6 +8,7 @@ import ( "fmt" "time" + "github.com/go-logr/logr" ramen "github.com/ramendr/ramen/api/v1alpha1" "github.com/ramendr/ramen/e2e/util" "k8s.io/apimachinery/pkg/api/errors" @@ -43,7 +44,7 @@ func waitPlacementDecision(client client.Client, namespace string, placementName } } -func waitDRPCReady(client client.Client, namespace string, drpcName string) error { +func waitDRPCReady(client client.Client, namespace string, drpcName string, log logr.Logger) error { startTime := time.Now() for { @@ -54,18 +55,18 @@ func waitDRPCReady(client client.Client, namespace string, drpcName string) erro conditionReady := checkDRPCConditions(drpc) if conditionReady && drpc.Status.LastGroupSyncTime != nil { - util.Ctx.Log.Info("drpc " + drpcName + " is ready") + log.Info("drpc is ready") return nil } if time.Since(startTime) > util.Timeout { if !conditionReady { - util.Ctx.Log.Info("drpc " + drpcName + " condition 'Available' or 'PeerReady' is not True") + log.Info("drpc condition 'Available' or 'PeerReady' is not True") } if conditionReady && drpc.Status.LastGroupSyncTime == nil { - util.Ctx.Log.Info("drpc " + drpcName + " LastGroupSyncTime is nil") + log.Info("drpc LastGroupSyncTime is nil") } return fmt.Errorf("drpc " + drpcName + " is not ready yet before timeout, fail") @@ -100,7 +101,7 @@ func checkDRPCConditions(drpc *ramen.DRPlacementControl) bool { return available && peerReady } -func waitDRPCPhase(client client.Client, namespace, name string, phase ramen.DRState) error { +func waitDRPCPhase(client client.Client, namespace, name string, phase ramen.DRState, log logr.Logger) error { startTime := time.Now() for { @@ -111,7 +112,7 @@ func waitDRPCPhase(client client.Client, namespace, name string, phase ramen.DRS currentPhase := drpc.Status.Phase if currentPhase == phase { - util.Ctx.Log.Info(fmt.Sprintf("drpc %s phase is %s", name, phase)) + log.Info(fmt.Sprintf("drpc phase is %s", phase)) return nil } @@ -159,28 +160,28 @@ func getTargetCluster(client client.Client, namespace, placementName string, drp } // first wait DRPC to have the expected phase, then check DRPC conditions -func waitDRPC(client client.Client, namespace, name string, expectedPhase ramen.DRState) error { +func waitDRPC(client client.Client, namespace, name string, expectedPhase ramen.DRState, log logr.Logger) error { // check Phase - if err := waitDRPCPhase(client, namespace, name, expectedPhase); err != nil { + if err := waitDRPCPhase(client, namespace, name, expectedPhase, log); err != nil { return err } // then check Conditions - return waitDRPCReady(client, namespace, name) + return waitDRPCReady(client, namespace, name, log) } -func waitDRPCDeleted(client client.Client, namespace string, name string) error { +func waitDRPCDeleted(client client.Client, namespace string, name string, log logr.Logger) error { startTime := time.Now() for { _, err := getDRPC(client, namespace, name) if err != nil { if errors.IsNotFound(err) { - util.Ctx.Log.Info("drpc " + name + " is deleted") + log.Info("drpc is deleted") return nil } - util.Ctx.Log.Info(fmt.Sprintf("error to get drpc %s: %v", name, err)) + log.Info(fmt.Sprintf("failed to get drpc: %v", err)) } if time.Since(startTime) > util.Timeout { @@ -192,7 +193,9 @@ func waitDRPCDeleted(client client.Client, namespace string, name string) error } // nolint:unparam -func waitDRPCProgression(client client.Client, namespace, name string, progression ramen.ProgressionStatus) error { +func waitDRPCProgression(client client.Client, namespace, name string, progression ramen.ProgressionStatus, + log logr.Logger, +) error { startTime := time.Now() for { @@ -203,7 +206,7 @@ func waitDRPCProgression(client client.Client, namespace, name string, progressi currentProgression := drpc.Status.Progression if currentProgression == progression { - util.Ctx.Log.Info(fmt.Sprintf("drpc %s progression is %s", name, progression)) + log.Info(fmt.Sprintf("drpc progression is %s", progression)) return nil } diff --git a/e2e/util/crud.go b/e2e/util/crud.go index ea58b310b..92886c19d 100644 --- a/e2e/util/crud.go +++ b/e2e/util/crud.go @@ -12,6 +12,7 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/go-logr/logr" ramen "github.com/ramendr/ramen/api/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,7 +38,7 @@ func CreateNamespace(client client.Client, namespace string) error { return nil } -func DeleteNamespace(client client.Client, namespace string) error { +func DeleteNamespace(client client.Client, namespace string, log logr.Logger) error { ns := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: namespace, @@ -50,12 +51,12 @@ func DeleteNamespace(client client.Client, namespace string) error { return err } - Ctx.Log.Info("namespace " + namespace + " not found") + log.Info("Namespace " + namespace + " not found") return nil } - Ctx.Log.Info("waiting until namespace " + namespace + " is deleted") + log.Info("Waiting until namespace " + namespace + " is deleted") startTime := time.Now() key := types.NamespacedName{Name: namespace} @@ -66,7 +67,7 @@ func DeleteNamespace(client client.Client, namespace string) error { return err } - Ctx.Log.Info("namespace " + namespace + " deleted") + log.Info("Namespace " + namespace + " deleted") return nil } @@ -97,9 +98,9 @@ func createChannel() error { return err } - Ctx.Log.Info("channel " + GetChannelName() + " already exists") + Ctx.Log.Info("Channel " + GetChannelName() + " already exists") } else { - Ctx.Log.Info("channel " + GetChannelName() + " is created") + Ctx.Log.Info("Created channel " + GetChannelName()) } return nil @@ -119,9 +120,9 @@ func deleteChannel() error { return err } - Ctx.Log.Info("channel " + GetChannelName() + " not found") + Ctx.Log.Info("Channel " + GetChannelName() + " not found") } else { - Ctx.Log.Info("channel " + GetChannelName() + " is deleted") + Ctx.Log.Info("Channel " + GetChannelName() + " is deleted") } return nil @@ -142,7 +143,7 @@ func EnsureChannelDeleted() error { return err } - return DeleteNamespace(Ctx.Hub.CtrlClient, GetChannelNamespace()) + return DeleteNamespace(Ctx.Hub.CtrlClient, GetChannelNamespace(), *Ctx.Log) } // Problem: currently we must manually add an annotation to application’s namespace to make volsync work. diff --git a/e2e/workloads/deployment.go b/e2e/workloads/deployment.go index 4596f267d..25ece80e8 100644 --- a/e2e/workloads/deployment.go +++ b/e2e/workloads/deployment.go @@ -5,8 +5,8 @@ package workloads import ( "context" - "fmt" + "github.com/go-logr/logr" "github.com/ramendr/ramen/e2e/util" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/types" @@ -72,14 +72,14 @@ func (w Deployment) GetResources() error { } // Check the workload health deployed in a cluster namespace -func (w Deployment) Health(client client.Client, namespace string) error { +func (w Deployment) Health(client client.Client, namespace string, log logr.Logger) error { deploy, err := getDeployment(client, namespace, w.GetAppName()) if err != nil { return err } if deploy.Status.Replicas == deploy.Status.ReadyReplicas { - util.Ctx.Log.Info(fmt.Sprintf("deployment %s is ready", w.GetAppName())) + log.Info("Deployment is ready") return nil } diff --git a/e2e/workloads/workload.go b/e2e/workloads/workload.go index b6dfa413b..89bf783c6 100644 --- a/e2e/workloads/workload.go +++ b/e2e/workloads/workload.go @@ -3,7 +3,10 @@ package workloads -import "sigs.k8s.io/controller-runtime/pkg/client" +import ( + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/pkg/client" +) type Workload interface { Kustomize() string // Can differ based on the workload, hence part of the Workload interface @@ -16,5 +19,5 @@ type Workload interface { GetPath() string GetRevision() string - Health(client client.Client, namespace string) error + Health(client client.Client, namespace string, log logr.Logger) error }