diff --git a/Makefile b/Makefile index 34f3b0111..63499b212 100644 --- a/Makefile +++ b/Makefile @@ -189,7 +189,7 @@ test-ramenctl: ## Run ramenctl tests. $(MAKE) -C ramenctl e2e-rdr: generate manifests ## Run rdr-e2e tests. - ./e2e/rdr-e2e.sh + cd e2e && ./e2e-rdr.sh coverage: go tool cover -html=cover.out diff --git a/controllers/util/objectmeta.go b/controllers/util/objectmeta.go index c84e448b3..ed71dfb41 100644 --- a/controllers/util/objectmeta.go +++ b/controllers/util/objectmeta.go @@ -4,7 +4,9 @@ package util import ( + ramen "github.com/ramendr/ramen/api/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -23,3 +25,10 @@ func ObjectMetaEmbedded(objectMeta *metav1.ObjectMeta) metav1.ObjectMeta { func ResourceIsDeleted(obj client.Object) bool { return !obj.GetDeletionTimestamp().IsZero() } + +func ProtectedPVCNamespacedName(pvc ramen.ProtectedPVC) types.NamespacedName { + return types.NamespacedName{ + Namespace: pvc.Namespace, + Name: pvc.Name, + } +} diff --git a/controllers/volsync/vshandler.go b/controllers/volsync/vshandler.go index 96d94e237..b7ca2ef5a 100644 --- a/controllers/volsync/vshandler.go +++ b/controllers/volsync/vshandler.go @@ -70,10 +70,12 @@ type VSHandler struct { defaultCephFSCSIDriverName string destinationCopyMethod volsyncv1alpha1.CopyMethodType volumeSnapshotClassList *snapv1.VolumeSnapshotClassList + vrgInAdminNamespace bool } func NewVSHandler(ctx context.Context, client client.Client, log logr.Logger, owner metav1.Object, asyncSpec *ramendrv1alpha1.VRGAsyncSpec, defaultCephFSCSIDriverName string, copyMethod string, + adminNamespaceVRG bool, ) *VSHandler { vsHandler := &VSHandler{ ctx: ctx, @@ -83,6 +85,7 @@ func NewVSHandler(ctx context.Context, client client.Client, log logr.Logger, ow defaultCephFSCSIDriverName: defaultCephFSCSIDriverName, destinationCopyMethod: volsyncv1alpha1.CopyMethodType(copyMethod), volumeSnapshotClassList: nil, // Do not initialize until we need it + vrgInAdminNamespace: adminNamespaceVRG, } if asyncSpec != nil { @@ -95,6 +98,8 @@ func NewVSHandler(ctx context.Context, client client.Client, log logr.Logger, ow // returns replication destination only if create/update is successful and the RD is considered available. // Callers should assume getting a nil replication destination back means they should retry/requeue. +// +//nolint:cyclop func (v *VSHandler) ReconcileRD( rdSpec ramendrv1alpha1.VolSyncReplicationDestinationSpec) (*volsyncv1alpha1.ReplicationDestination, error, ) { @@ -112,6 +117,14 @@ func (v *VSHandler) ReconcileRD( return nil, err } + if v.vrgInAdminNamespace { + // copy th secret to the namespace where the PVC is + err = v.copySecretToPVCNamespace(pskSecretName, util.ProtectedPVCNamespacedName(rdSpec.ProtectedPVC)) + if err != nil { + return nil, err + } + } + // Check if a ReplicationSource is still here (Can happen if transitioning from primary to secondary) // Before creating a new RD for this PVC, make sure any ReplicationSource for this PVC is cleaned up first // This avoids a scenario where we create an RD that immediately syncs with an RS that still exists locally @@ -183,15 +196,17 @@ func (v *VSHandler) createOrUpdateRD( rd := &volsyncv1alpha1.ReplicationDestination{ ObjectMeta: metav1.ObjectMeta{ Name: getReplicationDestinationName(rdSpec.ProtectedPVC.Name), - Namespace: v.owner.GetNamespace(), + Namespace: rdSpec.ProtectedPVC.Namespace, }, } op, err := ctrlutil.CreateOrUpdate(v.ctx, v.client, rd, func() error { - if err := ctrl.SetControllerReference(v.owner, rd, v.client.Scheme()); err != nil { - l.Error(err, "unable to set controller reference") + if !v.vrgInAdminNamespace { + if err := ctrl.SetControllerReference(v.owner, rd, v.client.Scheme()); err != nil { + l.Error(err, "unable to set controller reference") - return fmt.Errorf("%w", err) + return fmt.Errorf("%w", err) + } } util.AddLabel(rd, VRGOwnerLabel, v.owner.GetName()) @@ -223,14 +238,10 @@ func (v *VSHandler) createOrUpdateRD( return rd, nil } -func (v *VSHandler) isPVCInUseByNonRDPod(pvcName string) (bool, error) { +func (v *VSHandler) isPVCInUseByNonRDPod(pvcNamespacedName types.NamespacedName) (bool, error) { rd := &volsyncv1alpha1.ReplicationDestination{} - err := v.client.Get(v.ctx, - types.NamespacedName{ - Name: getReplicationDestinationName(pvcName), - Namespace: v.owner.GetNamespace(), - }, rd) + err := v.client.Get(v.ctx, pvcNamespacedName, rd) // IF RD is Found, then no more checks are needed. We'll assume that the RD // was created when the PVC was Not in use. @@ -241,7 +252,7 @@ func (v *VSHandler) isPVCInUseByNonRDPod(pvcName string) (bool, error) { } // PVC must not be in use - pvcInUse, err := v.pvcExistsAndInUse(pvcName, false) + pvcInUse, err := v.pvcExistsAndInUse(pvcNamespacedName, false) if err != nil { return false, err } @@ -259,7 +270,7 @@ func (v *VSHandler) isPVCInUseByNonRDPod(pvcName string) (bool, error) { // Callers should assume getting a nil replication source back means they should retry/requeue. // Returns true/false if final sync is complete, and also returns an RS if one was reconciled. // -//nolint:cyclop +//nolint:cyclop,funlen func (v *VSHandler) ReconcileRS(rsSpec ramendrv1alpha1.VolSyncReplicationSourceSpec, runFinalSync bool) (bool /* finalSyncComplete */, *volsyncv1alpha1.ReplicationSource, error, ) { @@ -280,6 +291,14 @@ func (v *VSHandler) ReconcileRS(rsSpec ramendrv1alpha1.VolSyncReplicationSourceS return false, nil, err } + if v.vrgInAdminNamespace { + // copy th secret to the namespace where the PVC is + err = v.copySecretToPVCNamespace(pskSecretName, util.ProtectedPVCNamespacedName(rsSpec.ProtectedPVC)) + if err != nil { + return false, nil, err + } + } + // Check if a ReplicationDestination is still here (Can happen if transitioning from secondary to primary) // Before creating a new RS for this PVC, make sure any ReplicationDestination for this PVC is cleaned up first // This avoids a scenario where we create an RS that immediately connects back to an RD that still exists locally @@ -292,7 +311,7 @@ func (v *VSHandler) ReconcileRS(rsSpec ramendrv1alpha1.VolSyncReplicationSourceS pvcOk, err := v.validatePVCBeforeRS(rsSpec, runFinalSync) if !pvcOk || err != nil { // Return the replicationSource if it already exists - existingRS, getRSErr := v.getRS(getReplicationSourceName(rsSpec.ProtectedPVC.Name)) + existingRS, getRSErr := v.getRS(getReplicationSourceName(rsSpec.ProtectedPVC.Name), rsSpec.ProtectedPVC.Namespace) if getRSErr != nil { return false, nil, err } @@ -333,7 +352,7 @@ func (v *VSHandler) validatePVCBeforeRS(rsSpec ramendrv1alpha1.VolSyncReplicatio if runFinalSync { // If runFinalSync, check the PVC and make sure it's not mounted to a pod // as we want the app to be quiesced/removed before running final sync - pvcIsMounted, err := v.pvcExistsAndInUse(rsSpec.ProtectedPVC.Name, false) + pvcIsMounted, err := v.pvcExistsAndInUse(util.ProtectedPVCNamespacedName(rsSpec.ProtectedPVC), false) if err != nil { return false, err } @@ -348,12 +367,13 @@ func (v *VSHandler) validatePVCBeforeRS(rsSpec ramendrv1alpha1.VolSyncReplicatio // Not running final sync - if we have not yet created an RS for this PVC, then make sure a pod has mounted // the PVC and is in "Running" state before attempting to create an RS. // This is a best effort to confirm the app that is using the PVC is started before trying to replicate the PVC. - _, err := v.getRS(getReplicationSourceName(rsSpec.ProtectedPVC.Name)) + _, err := v.getRS(getReplicationSourceName(rsSpec.ProtectedPVC.Name), rsSpec.ProtectedPVC.Namespace) if err != nil && kerrors.IsNotFound(err) { l.Info("ReplicationSource does not exist yet. " + "validating that the PVC to be protected is in use by a ready pod ...") // RS does not yet exist - consider PVC is ok if it's mounted and in use by running pod - inUseByReadyPod, err := v.pvcExistsAndInUse(rsSpec.ProtectedPVC.Name, true /* Check mounting pod is Ready */) + inUseByReadyPod, err := v.pvcExistsAndInUse(util.ProtectedPVCNamespacedName(rsSpec.ProtectedPVC), + true /* Check mounting pod is Ready */) if err != nil { return false, err } @@ -400,7 +420,7 @@ func (v *VSHandler) cleanupAfterRSFinalSync(rsSpec ramendrv1alpha1.VolSyncReplic v.log.Info("Cleanup after final sync", "pvcName", rsSpec.ProtectedPVC.Name) - return util.DeletePVC(v.ctx, v.client, rsSpec.ProtectedPVC.Name, v.owner.GetNamespace(), v.log) + return util.DeletePVC(v.ctx, v.client, rsSpec.ProtectedPVC.Name, rsSpec.ProtectedPVC.Namespace, v.log) } //nolint:funlen @@ -427,20 +447,22 @@ func (v *VSHandler) createOrUpdateRS(rsSpec ramendrv1alpha1.VolSyncReplicationSo // Remote service address created for the ReplicationDestination on the secondary // The secondary namespace will be the same as primary namespace so use the vrg.Namespace - remoteAddress := getRemoteServiceNameForRDFromPVCName(rsSpec.ProtectedPVC.Name, v.owner.GetNamespace()) + remoteAddress := getRemoteServiceNameForRDFromPVCName(rsSpec.ProtectedPVC.Name, rsSpec.ProtectedPVC.Namespace) rs := &volsyncv1alpha1.ReplicationSource{ ObjectMeta: metav1.ObjectMeta{ Name: getReplicationSourceName(rsSpec.ProtectedPVC.Name), - Namespace: v.owner.GetNamespace(), + Namespace: rsSpec.ProtectedPVC.Namespace, }, } op, err := ctrlutil.CreateOrUpdate(v.ctx, v.client, rs, func() error { - if err := ctrl.SetControllerReference(v.owner, rs, v.client.Scheme()); err != nil { - l.Error(err, "unable to set controller reference") + if !v.vrgInAdminNamespace { + if err := ctrl.SetControllerReference(v.owner, rs, v.client.Scheme()); err != nil { + l.Error(err, "unable to set controller reference") - return fmt.Errorf("%w", err) + return fmt.Errorf("%w", err) + } } util.AddLabel(rs, VRGOwnerLabel, v.owner.GetName()) @@ -492,9 +514,9 @@ func (v *VSHandler) createOrUpdateRS(rsSpec ramendrv1alpha1.VolSyncReplicationSo return rs, nil } -func (v *VSHandler) PreparePVC(pvcName string, prepFinalSync, copyMethodDirect bool) error { +func (v *VSHandler) PreparePVC(pvcNamespacedName types.NamespacedName, prepFinalSync, copyMethodDirect bool) error { if prepFinalSync || copyMethodDirect { - prepared, err := v.TakePVCOwnership(pvcName) + prepared, err := v.TakePVCOwnership(pvcNamespacedName) if err != nil || !prepared { return fmt.Errorf("waiting to take pvc ownership (%w), prepFinalSync: %t, Direct: %t", err, prepFinalSync, copyMethodDirect) @@ -506,11 +528,11 @@ func (v *VSHandler) PreparePVC(pvcName string, prepFinalSync, copyMethodDirect b // TakePVCOwnership adds do-not-delete annotation to indicate that ACM should not delete/cleanup this pvc // when the appsub is removed and adds VRG as owner so the PVC is garbage collected when the VRG is deleted. -func (v *VSHandler) TakePVCOwnership(pvcName string) (bool, error) { - l := v.log.WithValues("pvcName", pvcName) +func (v *VSHandler) TakePVCOwnership(pvcNamespacedName types.NamespacedName) (bool, error) { + l := v.log.WithValues("pvc", pvcNamespacedName) // Confirm PVC exists and add our VRG as ownerRef - pvc, err := v.validatePVCAndAddVRGOwnerRef(pvcName) + pvc, err := v.validatePVCAndAddVRGOwnerRef(pvcNamespacedName) if err != nil { l.Error(err, "unable to validate PVC or add ownership") @@ -531,11 +553,10 @@ func (v *VSHandler) TakePVCOwnership(pvcName string) (bool, error) { // If inUsePodMustBeReady is true, will only return true if the pod mounting the PVC is in Ready state // If inUsePodMustBeReady is false, will run an additional volume attachment check to make sure the PV underlying // the PVC is really detached (i.e. I/O operations complete) and therefore we can assume, quiesced. -func (v *VSHandler) pvcExistsAndInUse(pvcName string, inUsePodMustBeReady bool) (bool, error) { - pvcNamespacedName := types.NamespacedName{Namespace: v.owner.GetNamespace(), Name: pvcName} +func (v *VSHandler) pvcExistsAndInUse(pvcNamespacedName types.NamespacedName, inUsePodMustBeReady bool) (bool, error) { log := v.log.WithValues("pvc", pvcNamespacedName.String()) - pvc, err := v.getPVC(pvcName) + pvc, err := v.getPVC(pvcNamespacedName) if err != nil { if kerrors.IsNotFound(err) { log.Info("PVC not found") @@ -558,14 +579,10 @@ func (v *VSHandler) pvcExistsAndInUse(pvcName string, inUsePodMustBeReady bool) return util.IsPVAttachedToNode(v.ctx, v.client, v.log, pvc) } -func (v *VSHandler) getPVC(pvcName string) (*corev1.PersistentVolumeClaim, error) { +func (v *VSHandler) getPVC(pvcNamespacedName types.NamespacedName) (*corev1.PersistentVolumeClaim, error) { pvc := &corev1.PersistentVolumeClaim{} - err := v.client.Get(v.ctx, - types.NamespacedName{ - Name: pvcName, - Namespace: v.owner.GetNamespace(), - }, pvc) + err := v.client.Get(v.ctx, pvcNamespacedName, pvc) if err != nil { return nil, fmt.Errorf("%w", err) } @@ -575,13 +592,15 @@ func (v *VSHandler) getPVC(pvcName string) (*corev1.PersistentVolumeClaim, error // Adds owner ref and ACM "do-not-delete" annotation to indicate that when the appsub is removed, ACM // should not cleanup this PVC - we want it left behind so we can run a final sync -func (v *VSHandler) validatePVCAndAddVRGOwnerRef(pvcName string) (*corev1.PersistentVolumeClaim, error) { - pvc, err := v.getPVC(pvcName) +func (v *VSHandler) validatePVCAndAddVRGOwnerRef(pvcNamespacedName types.NamespacedName) ( + *corev1.PersistentVolumeClaim, error, +) { + pvc, err := v.getPVC(pvcNamespacedName) if err != nil { return nil, err } - v.log.Info("PVC exists", "pvcName", pvcName) + v.log.Info("PVC exists", "pvcName", pvcNamespacedName.Name, "pvcNamespaceName", pvcNamespacedName.Namespace) // Add annotation to indicate that ACM should not delete/cleanup this pvc when the appsub is removed // and add VRG as owner @@ -590,7 +609,7 @@ func (v *VSHandler) validatePVCAndAddVRGOwnerRef(pvcName string) (*corev1.Persis return nil, err } - v.log.V(1).Info("PVC validated", "pvc name", pvcName) + v.log.V(1).Info("PVC validated", "pvcName", pvcNamespacedName.Name, "pvcNamespaceName", pvcNamespacedName.Namespace) return pvc, nil } @@ -630,13 +649,63 @@ func (v *VSHandler) validateSecretAndAddVRGOwnerRef(secretName string) (bool, er return true, nil } -func (v *VSHandler) getRS(name string) (*volsyncv1alpha1.ReplicationSource, error) { +func (v *VSHandler) copySecretToPVCNamespace(secretName string, pvcNamespacedName types.NamespacedName) error { + secret := &corev1.Secret{} + + err := v.client.Get(v.ctx, + types.NamespacedName{ + Name: secretName, + Namespace: pvcNamespacedName.Namespace, + }, secret) + if err != nil && !kerrors.IsNotFound(err) { + v.log.Error(err, "Failed to get secret", "secretName", secretName) + + return fmt.Errorf("error getting secret (%w)", err) + } + + if err == nil { + v.log.Info("Secret already exists in the PVC namespace", "secretName", secretName, "pvcNamespace", + pvcNamespacedName.Namespace) + + return nil + } + + v.log.Info("volsync secret not found in the pvc namespace, will create it", "secretName", secretName, + "pvcNamespace", pvcNamespacedName.Namespace) + + err = v.client.Get(v.ctx, + types.NamespacedName{ + Name: secretName, + Namespace: v.owner.GetNamespace(), + }, secret) + if err != nil { + return fmt.Errorf("error getting secret from the admin namespace (%w)", err) + } + + secretCopy := secret.DeepCopy() + + secretCopy.ObjectMeta = metav1.ObjectMeta{ + Name: secretName, + Namespace: pvcNamespacedName.Namespace, + Labels: secret.Labels, + Annotations: secret.Annotations, + } + + err = v.client.Create(v.ctx, secretCopy) + if err != nil { + return fmt.Errorf("error creating secret (%w)", err) + } + + return nil +} + +func (v *VSHandler) getRS(name, namespace string) (*volsyncv1alpha1.ReplicationSource, error) { rs := &volsyncv1alpha1.ReplicationSource{} err := v.client.Get(v.ctx, types.NamespacedName{ Name: name, - Namespace: v.owner.GetNamespace(), + Namespace: namespace, }, rs) if err != nil { return nil, fmt.Errorf("%w", err) @@ -700,7 +769,7 @@ func (v *VSHandler) DeleteRD(pvcName string) error { //nolint:gocognit func (v *VSHandler) deleteLocalRDAndRS(rd *volsyncv1alpha1.ReplicationDestination) error { - latestRDImage, err := v.getRDLatestImage(rd.GetName()) + latestRDImage, err := v.getRDLatestImage(rd.GetName(), rd.GetNamespace()) if err != nil { return err } @@ -710,7 +779,7 @@ func (v *VSHandler) deleteLocalRDAndRS(rd *volsyncv1alpha1.ReplicationDestinatio lrs := &volsyncv1alpha1.ReplicationSource{ ObjectMeta: metav1.ObjectMeta{ Name: getLocalReplicationName(rd.GetName()), - Namespace: v.owner.GetNamespace(), + Namespace: rd.GetNamespace(), }, } @@ -720,7 +789,7 @@ func (v *VSHandler) deleteLocalRDAndRS(rd *volsyncv1alpha1.ReplicationDestinatio }, lrs) if err != nil { if kerrors.IsNotFound(err) { - return v.deleteLocalRD(getLocalReplicationName(rd.GetName())) + return v.deleteLocalRD(getLocalReplicationName(rd.GetName()), rd.GetNamespace()) } return err @@ -872,7 +941,7 @@ func (v *VSHandler) listByOwner(list client.ObjectList) error { func (v *VSHandler) EnsurePVCfromRD(rdSpec ramendrv1alpha1.VolSyncReplicationDestinationSpec, failoverAction bool, ) error { - latestImage, err := v.getRDLatestImage(rdSpec.ProtectedPVC.Name) + latestImage, err := v.getRDLatestImage(rdSpec.ProtectedPVC.Name, rdSpec.ProtectedPVC.Namespace) if err != nil { return err } @@ -896,7 +965,7 @@ func (v *VSHandler) EnsurePVCfromRD(rdSpec ramendrv1alpha1.VolSyncReplicationDes return v.validateSnapshotAndEnsurePVC(rdSpec, *vsImageRef, failoverAction) } -//nolint:cyclop +//nolint:cyclop,funlen,gocognit func (v *VSHandler) EnsurePVCforDirectCopy(ctx context.Context, rdSpec ramendrv1alpha1.VolSyncReplicationDestinationSpec, ) error { @@ -910,7 +979,7 @@ func (v *VSHandler) EnsurePVCforDirectCopy(ctx context.Context, return fmt.Errorf("capacity must be provided %v", rdSpec.ProtectedPVC) } - pvc, err := v.getPVC(rdSpec.ProtectedPVC.Name) + pvc, err := v.getPVC(util.ProtectedPVCNamespacedName(rdSpec.ProtectedPVC)) if err != nil && !kerrors.IsNotFound(err) { return err } @@ -922,13 +991,15 @@ func (v *VSHandler) EnsurePVCforDirectCopy(ctx context.Context, pvc = &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: rdSpec.ProtectedPVC.Name, - Namespace: v.owner.GetNamespace(), + Namespace: rdSpec.ProtectedPVC.Namespace, }, } op, err := ctrlutil.CreateOrUpdate(ctx, v.client, pvc, func() error { - if err := ctrl.SetControllerReference(v.owner, pvc, v.client.Scheme()); err != nil { - return fmt.Errorf("failed to set controller reference %w", err) + if !v.vrgInAdminNamespace { + if err := ctrl.SetControllerReference(v.owner, pvc, v.client.Scheme()); err != nil { + return fmt.Errorf("failed to set controller reference %w", err) + } } if pvc.CreationTimestamp.IsZero() { @@ -963,7 +1034,7 @@ func (v *VSHandler) EnsurePVCforDirectCopy(ctx context.Context, func (v *VSHandler) validateSnapshotAndEnsurePVC(rdSpec ramendrv1alpha1.VolSyncReplicationDestinationSpec, snapshotRef corev1.TypedLocalObjectReference, failoverAction bool, ) error { - snap, err := v.validateAndProtectSnapshot(snapshotRef) + snap, err := v.validateAndProtectSnapshot(snapshotRef, rdSpec.ProtectedPVC.Namespace) if err != nil { return err } @@ -976,7 +1047,7 @@ func (v *VSHandler) validateSnapshotAndEnsurePVC(rdSpec ramendrv1alpha1.VolSyncR pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: rdSpec.ProtectedPVC.Name, - Namespace: v.owner.GetNamespace(), + Namespace: rdSpec.ProtectedPVC.Namespace, }, } @@ -1007,7 +1078,7 @@ func (v *VSHandler) validateSnapshotAndEnsurePVC(rdSpec ramendrv1alpha1.VolSyncR } } - pvc, err := v.getPVC(rdSpec.ProtectedPVC.Name) + pvc, err := v.getPVC(util.ProtectedPVCNamespacedName(rdSpec.ProtectedPVC)) if err != nil { return err } @@ -1028,7 +1099,7 @@ func (v *VSHandler) rollbackToLastSnapshot(rdSpec ramendrv1alpha1.VolSyncReplica v.log.Info(fmt.Sprintf("Rollback to the last snapshot %s for pvc %s", snapshotRef.Name, rdSpec.ProtectedPVC.Name)) // 1. Pause the main RD. Any inprogress sync will be terminated. - rd, err := v.pauseRD(getReplicationDestinationName(rdSpec.ProtectedPVC.Name)) + rd, err := v.pauseRD(getReplicationDestinationName(rdSpec.ProtectedPVC.Name), rdSpec.ProtectedPVC.Namespace) if err != nil { return err } @@ -1039,7 +1110,7 @@ func (v *VSHandler) rollbackToLastSnapshot(rdSpec ramendrv1alpha1.VolSyncReplica return err } - lrd, err := v.getRD(getLocalReplicationName(rdSpec.ProtectedPVC.Name)) + lrd, err := v.getRD(getLocalReplicationName(rdSpec.ProtectedPVC.Name), rdSpec.ProtectedPVC.Namespace) if err != nil { return err } @@ -1064,7 +1135,7 @@ func (v *VSHandler) rollbackToLastSnapshot(rdSpec ramendrv1alpha1.VolSyncReplica // Now pause LocalRD so that a new pod does not start and uses the PVC. // At this point, we want only the app to use the PVC. - _, err = v.pauseRD(lrd.GetName()) + _, err = v.pauseRD(lrd.GetName(), lrd.GetNamespace()) if err != nil { return err } @@ -1087,7 +1158,7 @@ func (v *VSHandler) ensurePVCFromSnapshot(rdSpec ramendrv1alpha1.VolSyncReplicat pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: rdSpec.ProtectedPVC.Name, - Namespace: v.owner.GetNamespace(), + Namespace: rdSpec.ProtectedPVC.Namespace, }, } @@ -1166,12 +1237,13 @@ func (v *VSHandler) ensurePVCFromSnapshot(rdSpec ramendrv1alpha1.VolSyncReplicat // adds VolSync "do-not-delete" label to indicate volsync should not cleanup this snapshot func (v *VSHandler) validateAndProtectSnapshot( volumeSnapshotRef corev1.TypedLocalObjectReference, + volumeSnapshotNamespace string, ) (*snapv1.VolumeSnapshot, error) { volSnap := &snapv1.VolumeSnapshot{} err := v.client.Get(v.ctx, types.NamespacedName{ Name: volumeSnapshotRef.Name, - Namespace: v.owner.GetNamespace(), + Namespace: volumeSnapshotNamespace, }, volSnap) if err != nil { v.log.Error(err, "Unable to get VolumeSnapshot", "volumeSnapshotRef", volumeSnapshotRef) @@ -1185,7 +1257,6 @@ func (v *VSHandler) validateAndProtectSnapshot( AddOwner(v.owner, v.client.Scheme()). AddLabel(VolSyncDoNotDeleteLabel, VolSyncDoNotDeleteLabelVal). Update(v.ctx, v.client) - if err != nil { return nil, fmt.Errorf("failed to add owner/label to snapshot %s (%w)", volSnap.GetName(), err) } @@ -1197,12 +1268,16 @@ func (v *VSHandler) validateAndProtectSnapshot( func (v *VSHandler) addAnnotationAndVRGOwnerRefAndUpdate(obj client.Object, annotationName, annotationValue string, -) error { +) (err error) { + var ownerRefUpdated bool + annotationsUpdated := util.AddAnnotation(obj, annotationName, annotationValue) - ownerRefUpdated, err := util.AddOwnerReference(obj, v.owner, v.client.Scheme()) // VRG as owner - if err != nil { - return err + if !v.vrgInAdminNamespace { + ownerRefUpdated, err = util.AddOwnerReference(obj, v.owner, v.client.Scheme()) // VRG as owner + if err != nil { + return err + } } if annotationsUpdated || ownerRefUpdated { @@ -1459,7 +1534,7 @@ func ConvertSchedulingIntervalToCronSpec(schedulingInterval string) (*string, er return &cronSpec, nil } -func (v *VSHandler) IsRSDataProtected(pvcName string) (bool, error) { +func (v *VSHandler) IsRSDataProtected(pvcName, pvcNamespace string) (bool, error) { l := v.log.WithValues("pvcName", pvcName) // Get RD instance @@ -1468,7 +1543,7 @@ func (v *VSHandler) IsRSDataProtected(pvcName string) (bool, error) { err := v.client.Get(v.ctx, types.NamespacedName{ Name: getReplicationSourceName(pvcName), - Namespace: v.owner.GetNamespace(), + Namespace: pvcNamespace, }, rs) if err != nil { if !kerrors.IsNotFound(err) { @@ -1493,8 +1568,8 @@ func isRSLastSyncTimeReady(rsStatus *volsyncv1alpha1.ReplicationSourceStatus) bo return false } -func (v *VSHandler) getRDLatestImage(pvcName string) (*corev1.TypedLocalObjectReference, error) { - rd, err := v.getRD(pvcName) +func (v *VSHandler) getRDLatestImage(pvcName, pvcNamespace string) (*corev1.TypedLocalObjectReference, error) { + rd, err := v.getRD(pvcName, pvcNamespace) if err != nil || rd == nil { return nil, err } @@ -1508,8 +1583,8 @@ func (v *VSHandler) getRDLatestImage(pvcName string) (*corev1.TypedLocalObjectRe } // Returns true if at least one sync has completed (we'll consider this "data protected") -func (v *VSHandler) IsRDDataProtected(pvcName string) (bool, error) { - latestImage, err := v.getRDLatestImage(pvcName) +func (v *VSHandler) IsRDDataProtected(pvcName, pvcNamespace string) (bool, error) { + latestImage, err := v.getRDLatestImage(pvcName, pvcNamespace) if err != nil { return false, err } @@ -1532,7 +1607,7 @@ func (v *VSHandler) PrecreateDestPVCIfEnabled(rdSpec ramendrv1alpha1.VolSyncRepl } // PVC must not be in-use before creating the RD - inUse, err := v.isPVCInUseByNonRDPod(rdSpec.ProtectedPVC.Name) + inUse, err := v.isPVCInUseByNonRDPod(util.ProtectedPVCNamespacedName(rdSpec.ProtectedPVC)) if err != nil { return nil, err } @@ -1541,10 +1616,12 @@ func (v *VSHandler) PrecreateDestPVCIfEnabled(rdSpec ramendrv1alpha1.VolSyncRepl // on this cluster). That race condition will be ignored. That would be a user error to deploy the // same app in the same namespace and on the destination cluster... if inUse { - return nil, fmt.Errorf("pvc %v is mounted by others. Checking later", rdSpec.ProtectedPVC.Name) + return nil, fmt.Errorf("pvc %v is mounted by others. Checking later", + util.ProtectedPVCNamespacedName(rdSpec.ProtectedPVC)) } - v.log.Info(fmt.Sprintf("Using App PVC %s for syncing directly to it", rdSpec.ProtectedPVC.Name)) + v.log.Info(fmt.Sprintf("Using App PVC %s for syncing directly to it", + util.ProtectedPVCNamespacedName(rdSpec.ProtectedPVC))) // Using the application PVC for syncing from source to destination and save a snapshot // everytime a sync is successful return &rdSpec.ProtectedPVC.Name, nil @@ -1650,6 +1727,7 @@ func (v *VSHandler) reconcileLocalReplication(rd *volsyncv1alpha1.ReplicationDes return lrd, lrs, nil } +//nolint:funlen func (v *VSHandler) reconcileLocalRD(rdSpec ramendrv1alpha1.VolSyncReplicationDestinationSpec, pskSecretName string) (*volsyncv1alpha1.ReplicationDestination, error, ) { @@ -1658,7 +1736,7 @@ func (v *VSHandler) reconcileLocalRD(rdSpec ramendrv1alpha1.VolSyncReplicationDe lrd := &volsyncv1alpha1.ReplicationDestination{ ObjectMeta: metav1.ObjectMeta{ Name: getLocalReplicationName(rdSpec.ProtectedPVC.Name), - Namespace: v.owner.GetNamespace(), + Namespace: rdSpec.ProtectedPVC.Namespace, }, } @@ -1668,10 +1746,12 @@ func (v *VSHandler) reconcileLocalRD(rdSpec ramendrv1alpha1.VolSyncReplicationDe } op, err := ctrlutil.CreateOrUpdate(v.ctx, v.client, lrd, func() error { - if err := ctrl.SetControllerReference(v.owner, lrd, v.client.Scheme()); err != nil { - v.log.Error(err, "unable to set controller reference") + if !v.vrgInAdminNamespace { + if err := ctrl.SetControllerReference(v.owner, lrd, v.client.Scheme()); err != nil { + v.log.Error(err, "unable to set controller reference") - return err + return err + } } util.AddLabel(lrd, VRGOwnerLabel, v.owner.GetName()) @@ -1739,15 +1819,17 @@ func (v *VSHandler) reconcileLocalRS(rd *volsyncv1alpha1.ReplicationDestination, lrs := &volsyncv1alpha1.ReplicationSource{ ObjectMeta: metav1.ObjectMeta{ Name: getLocalReplicationName(rsSpec.ProtectedPVC.Name), - Namespace: v.owner.GetNamespace(), + Namespace: rsSpec.ProtectedPVC.Namespace, }, } op, err := ctrlutil.CreateOrUpdate(v.ctx, v.client, lrs, func() error { - if err := ctrl.SetControllerReference(v.owner, lrs, v.client.Scheme()); err != nil { - v.log.Error(err, "unable to set controller reference") + if !v.vrgInAdminNamespace { + if err := ctrl.SetControllerReference(v.owner, lrs, v.client.Scheme()); err != nil { + v.log.Error(err, "unable to set controller reference") - return err + return err + } } util.AddLabel(lrs, VRGOwnerLabel, v.owner.GetName()) @@ -1781,13 +1863,13 @@ func (v *VSHandler) reconcileLocalRS(rd *volsyncv1alpha1.ReplicationDestination, func (v *VSHandler) cleanupLocalResources(lrs *volsyncv1alpha1.ReplicationSource) error { // delete the snapshot taken by local RD - err := v.deleteSnapshot(v.ctx, v.client, lrs.Spec.Trigger.Manual, v.owner.GetNamespace(), v.log) + err := v.deleteSnapshot(v.ctx, v.client, lrs.Spec.Trigger.Manual, lrs.GetNamespace(), v.log) if err != nil { return err } // delete RO PVC created for localRS - err = util.DeletePVC(v.ctx, v.client, lrs.Spec.SourcePVC, v.owner.GetNamespace(), v.log) + err = util.DeletePVC(v.ctx, v.client, lrs.Spec.SourcePVC, lrs.GetNamespace(), v.log) if err != nil { return err } @@ -1798,14 +1880,14 @@ func (v *VSHandler) cleanupLocalResources(lrs *volsyncv1alpha1.ReplicationSource } // Delete the localRD. The name of the localRD is the same as the name of the localRS - return v.deleteLocalRD(lrs.GetName()) + return v.deleteLocalRD(lrs.GetName(), lrs.GetNamespace()) } -func (v *VSHandler) deleteLocalRD(lrdName string) error { +func (v *VSHandler) deleteLocalRD(lrdName, lrdNamespace string) error { lrd := &volsyncv1alpha1.ReplicationDestination{ ObjectMeta: metav1.ObjectMeta{ Name: lrdName, - Namespace: v.owner.GetNamespace(), + Namespace: lrdNamespace, }, } @@ -1826,7 +1908,7 @@ func (v *VSHandler) deleteLocalRD(lrdName string) error { func (v *VSHandler) setupLocalRS(rd *volsyncv1alpha1.ReplicationDestination, ) (*corev1.PersistentVolumeClaim, error) { - latestImage, err := v.getRDLatestImage(rd.GetName()) + latestImage, err := v.getRDLatestImage(rd.GetName(), rd.GetNamespace()) if err != nil { return nil, err } @@ -1850,7 +1932,7 @@ func (v *VSHandler) setupLocalRS(rd *volsyncv1alpha1.ReplicationDestination, lrs := &volsyncv1alpha1.ReplicationSource{ ObjectMeta: metav1.ObjectMeta{ Name: getLocalReplicationName(rd.GetName()), - Namespace: v.owner.GetNamespace(), + Namespace: rd.GetNamespace(), }, } @@ -1866,7 +1948,7 @@ func (v *VSHandler) setupLocalRS(rd *volsyncv1alpha1.ReplicationDestination, } } - snap, err := v.validateAndProtectSnapshot(*vsImageRef) + snap, err := v.validateAndProtectSnapshot(*vsImageRef, lrs.Namespace) if err != nil { return nil, err } @@ -1890,7 +1972,7 @@ func (v *VSHandler) createReadOnlyPVCFromSnapshot(rd *volsyncv1alpha1.Replicatio pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: snapshotRef.Name, - Namespace: v.owner.GetNamespace(), + Namespace: rd.GetNamespace(), }, } @@ -1972,7 +2054,7 @@ func (v *VSHandler) deleteSnapshot(ctx context.Context, return nil } -func (v *VSHandler) getRD(pvcName string) (*volsyncv1alpha1.ReplicationDestination, error) { +func (v *VSHandler) getRD(pvcName, pvcNamespace string) (*volsyncv1alpha1.ReplicationDestination, error) { l := v.log.WithValues("pvcName", pvcName) // Get RD instance @@ -1981,7 +2063,7 @@ func (v *VSHandler) getRD(pvcName string) (*volsyncv1alpha1.ReplicationDestinati err := v.client.Get(v.ctx, types.NamespacedName{ Name: getReplicationDestinationName(pvcName), - Namespace: v.owner.GetNamespace(), + Namespace: pvcNamespace, }, rdInst) if err != nil { if !kerrors.IsNotFound(err) { @@ -1998,8 +2080,8 @@ func (v *VSHandler) getRD(pvcName string) (*volsyncv1alpha1.ReplicationDestinati return rdInst, nil } -func (v *VSHandler) pauseRD(rdName string) (*volsyncv1alpha1.ReplicationDestination, error) { - rd, err := v.getRD(rdName) +func (v *VSHandler) pauseRD(rdName, rdNamespace string) (*volsyncv1alpha1.ReplicationDestination, error) { + rd, err := v.getRD(rdName, rdNamespace) if err != nil || rd == nil { return nil, err } diff --git a/controllers/volsync/vshandler_test.go b/controllers/volsync/vshandler_test.go index 6ffa0a105..001bc6f98 100644 --- a/controllers/volsync/vshandler_test.go +++ b/controllers/volsync/vshandler_test.go @@ -87,7 +87,7 @@ var _ = Describe("VolSync Handler - Volume Replication Class tests", func() { var vsHandler *volsync.VSHandler BeforeEach(func() { - vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, nil, asyncSpec, "none", "Snapshot") + vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, nil, asyncSpec, "none", "Snapshot", false) }) It("GetVolumeSnapshotClasses() should find all volume snapshot classes", func() { @@ -116,7 +116,7 @@ var _ = Describe("VolSync Handler - Volume Replication Class tests", func() { }, } - vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, nil, asyncSpec, "none", "Snapshot") + vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, nil, asyncSpec, "none", "Snapshot", false) }) It("GetVolumeSnapshotClasses() should find matching volume snapshot classes", func() { @@ -159,7 +159,7 @@ var _ = Describe("VolSync Handler - Volume Replication Class tests", func() { }, } - vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, nil, asyncSpec, "none", "Snapshot") + vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, nil, asyncSpec, "none", "Snapshot", false) }) It("GetVolumeSnapshotClasses() should find matching volume snapshot classes", func() { @@ -216,7 +216,7 @@ var _ = Describe("VolSync Handler - Volume Replication Class tests", func() { // Initialize a vshandler vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, nil, asyncSpec, - "openshift-storage.cephfs.csi.ceph.com", "Snapshot") + "openshift-storage.cephfs.csi.ceph.com", "Snapshot", false) }) JustBeforeEach(func() { @@ -431,7 +431,7 @@ var _ = Describe("VolSync_Handler", func() { Expect(ownerCm.GetName()).NotTo(BeEmpty()) owner = ownerCm - vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, owner, asyncSpec, "none", "Snapshot") + vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, owner, asyncSpec, "none", "Snapshot", false) }) AfterEach(func() { @@ -465,6 +465,7 @@ var _ = Describe("VolSync_Handler", func() { JustBeforeEach(func() { // Run ReconcileRD var err error + rdSpec.ProtectedPVC.Namespace = testNamespace.GetName() returnedRD, err = vsHandler.ReconcileRD(rdSpec) Expect(err).ToNot(HaveOccurred()) }) @@ -483,6 +484,7 @@ var _ = Describe("VolSync_Handler", func() { Context("When the psk secret for volsync exists (will be pushed down by drpc from hub", func() { var dummyPSKSecret *corev1.Secret JustBeforeEach(func() { + rdSpec.ProtectedPVC.Namespace = testNamespace.GetName() // Create a dummy volsync psk secret so the reconcile can proceed properly dummyPSKSecret = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -653,7 +655,8 @@ var _ = Describe("VolSync_Handler", func() { var vsHandler *volsync.VSHandler BeforeEach(func() { - vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, owner, asyncSpec, "none", "Direct") + rdSpec.ProtectedPVC.Namespace = testNamespace.GetName() + vsHandler = volsync.NewVSHandler(ctx, k8sClient, logger, owner, asyncSpec, "none", "Direct", false) }) It("PrecreateDestPVCIfEnabled() should return CopyMethod Snapshot and App PVC name", func() { @@ -703,6 +706,7 @@ var _ = Describe("VolSync_Handler", func() { // Run ReconcileRD var err error var finalSyncCompl bool + rsSpec.ProtectedPVC.Namespace = testNamespace.GetName() finalSyncCompl, returnedRS, err = vsHandler.ReconcileRS(rsSpec, false) Expect(err).ToNot(HaveOccurred()) Expect(finalSyncCompl).To(BeFalse()) @@ -722,6 +726,7 @@ var _ = Describe("VolSync_Handler", func() { Context("When the psk secret for volsync exists (will be pushed down by drpc from hub", func() { var dummyPSKSecret *corev1.Secret JustBeforeEach(func() { + rsSpec.ProtectedPVC.Namespace = testNamespace.GetName() // Create a dummy volsync psk secret so the reconcile can proceed properly dummyPSKSecret = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -1102,6 +1107,7 @@ var _ = Describe("VolSync_Handler", func() { rdSpec = ramendrv1alpha1.VolSyncReplicationDestinationSpec{ ProtectedPVC: ramendrv1alpha1.ProtectedPVC{ Name: pvcName, + Namespace: testNamespace.GetName(), ProtectedByVolSync: true, StorageClassName: &testStorageClassName, Resources: corev1.VolumeResourceRequirements{ @@ -1438,6 +1444,7 @@ var _ = Describe("VolSync_Handler", func() { rdSpec := ramendrv1alpha1.VolSyncReplicationDestinationSpec{ ProtectedPVC: ramendrv1alpha1.ProtectedPVC{ Name: pvcNamePrefix + strconv.Itoa(i), + Namespace: testNamespace.GetName(), ProtectedByVolSync: true, StorageClassName: &testStorageClassName, Resources: corev1.VolumeResourceRequirements{ @@ -1461,12 +1468,13 @@ var _ = Describe("VolSync_Handler", func() { Expect(k8sClient.Create(ctx, otherOwnerCm)).To(Succeed()) Expect(otherOwnerCm.GetName()).NotTo(BeEmpty()) otherVSHandler := volsync.NewVSHandler(ctx, k8sClient, logger, otherOwnerCm, asyncSpec, - "none", "Snapshot") + "none", "Snapshot", false) for i := 0; i < 2; i++ { otherOwnerRdSpec := ramendrv1alpha1.VolSyncReplicationDestinationSpec{ ProtectedPVC: ramendrv1alpha1.ProtectedPVC{ Name: pvcNamePrefixOtherOwner + strconv.Itoa(i), + Namespace: testNamespace.GetName(), ProtectedByVolSync: true, StorageClassName: &testStorageClassName, Resources: corev1.VolumeResourceRequirements{ @@ -1632,6 +1640,7 @@ var _ = Describe("VolSync_Handler", func() { rsSpec := ramendrv1alpha1.VolSyncReplicationSourceSpec{ ProtectedPVC: ramendrv1alpha1.ProtectedPVC{ Name: pvcNamePrefix + strconv.Itoa(i), + Namespace: testNamespace.GetName(), ProtectedByVolSync: true, StorageClassName: &testStorageClassName, }, @@ -1651,12 +1660,13 @@ var _ = Describe("VolSync_Handler", func() { Expect(k8sClient.Create(ctx, otherOwnerCm)).To(Succeed()) Expect(otherOwnerCm.GetName()).NotTo(BeEmpty()) otherVSHandler := volsync.NewVSHandler(ctx, k8sClient, logger, otherOwnerCm, asyncSpec, - "none", "Snapshot") + "none", "Snapshot", false) for i := 0; i < 2; i++ { otherOwnerRsSpec := ramendrv1alpha1.VolSyncReplicationSourceSpec{ ProtectedPVC: ramendrv1alpha1.ProtectedPVC{ Name: pvcNamePrefixOtherOwner + strconv.Itoa(i), + Namespace: testNamespace.GetName(), ProtectedByVolSync: true, StorageClassName: &testStorageClassName, }, @@ -1764,7 +1774,11 @@ var _ = Describe("VolSync_Handler", func() { Describe("Prepare PVC for final sync", func() { Context("When the PVC does not exist", func() { It("Should assume preparationForFinalSync is complete", func() { - pvcPreparationComplete, err := vsHandler.TakePVCOwnership("this-pvc-does-not-exist") + pvcNamespacedName := types.NamespacedName{ + Name: "this-pvc-does-not-exist", + Namespace: testNamespace.GetName(), + } + pvcPreparationComplete, err := vsHandler.TakePVCOwnership(pvcNamespacedName) Expect(err).To(HaveOccurred()) Expect(kerrors.IsNotFound(err)).To(BeTrue()) Expect(pvcPreparationComplete).To(BeFalse()) @@ -1791,7 +1805,12 @@ var _ = Describe("VolSync_Handler", func() { var pvcPreparationErr error JustBeforeEach(func() { - pvcPreparationComplete, pvcPreparationErr = vsHandler.TakePVCOwnership(testPVC.GetName()) + pvcNamespacedName := types.NamespacedName{ + Name: testPVC.GetName(), + Namespace: testPVC.GetNamespace(), + } + + pvcPreparationComplete, pvcPreparationErr = vsHandler.TakePVCOwnership(pvcNamespacedName) // In all cases at this point we should expect that the PVC has ownership taken over by our owner VRG Eventually(func() bool { diff --git a/controllers/volumereplicationgroup_controller.go b/controllers/volumereplicationgroup_controller.go index 464374f0f..d9e1d9290 100644 --- a/controllers/volumereplicationgroup_controller.go +++ b/controllers/volumereplicationgroup_controller.go @@ -427,9 +427,11 @@ func (r *VolumeReplicationGroupReconciler) Reconcile(ctx context.Context, req ct } v.ramenConfig = ramenConfig + adminNamespaceVRG := vrgInAdminNamespace(v.instance, v.ramenConfig) + v.volSyncHandler = volsync.NewVSHandler(ctx, r.Client, log, v.instance, v.instance.Spec.Async, cephFSCSIDriverNameOrDefault(v.ramenConfig), - volSyncDestinationCopyMethodOrDefault(v.ramenConfig)) + volSyncDestinationCopyMethodOrDefault(v.ramenConfig), adminNamespaceVRG) if v.instance.Status.ProtectedPVCs == nil { v.instance.Status.ProtectedPVCs = []ramendrv1alpha1.ProtectedPVC{} diff --git a/controllers/vrg_kubeobjects.go b/controllers/vrg_kubeobjects.go index ef748a3af..2241fb8c5 100644 --- a/controllers/vrg_kubeobjects.go +++ b/controllers/vrg_kubeobjects.go @@ -609,7 +609,10 @@ func (v *VRGInstance) kubeObjectsRecoveryStartOrResume( } log1.Error(err, "Kube objects group recover error") - cleanup(request) + + if ok { + cleanup(request) + } result.Requeue = true diff --git a/controllers/vrg_status_pvcs.go b/controllers/vrg_status_pvcs.go index cb08e10e9..1ee53c0e7 100644 --- a/controllers/vrg_status_pvcs.go +++ b/controllers/vrg_status_pvcs.go @@ -50,17 +50,6 @@ func FindProtectedPvcAndIndex( return nil, len(vrg.Status.ProtectedPVCs) } -func (v *VRGInstance) findFirstProtectedPVCWithName(pvcName string) *ramen.ProtectedPVC { - for index := range v.instance.Status.ProtectedPVCs { - protectedPVC := &v.instance.Status.ProtectedPVCs[index] - if protectedPVC.Name == pvcName { - return protectedPVC - } - } - - return nil -} - func (v *VRGInstance) vrgStatusPvcNamespacesSetIfUnset() { vrg := v.instance diff --git a/controllers/vrg_volsync.go b/controllers/vrg_volsync.go index cc2e2c87d..22c65e1e1 100644 --- a/controllers/vrg_volsync.go +++ b/controllers/vrg_volsync.go @@ -10,6 +10,7 @@ import ( "github.com/go-logr/logr" ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" + "github.com/ramendr/ramen/controllers/util" "github.com/ramendr/ramen/controllers/volsync" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -33,7 +34,7 @@ func (v *VRGInstance) restorePVsAndPVCsForVolSync() (int, error) { if err != nil { v.log.Info(fmt.Sprintf("Unable to ensure PVC %v -- err: %v", rdSpec, err)) - protectedPVC := v.findFirstProtectedPVCWithName(rdSpec.ProtectedPVC.Name) + protectedPVC := FindProtectedPVC(v.instance, rdSpec.ProtectedPVC.Namespace, rdSpec.ProtectedPVC.Name) if protectedPVC == nil { protectedPVC = &ramendrv1alpha1.ProtectedPVC{} rdSpec.ProtectedPVC.DeepCopyInto(protectedPVC) @@ -48,7 +49,7 @@ func (v *VRGInstance) restorePVsAndPVCsForVolSync() (int, error) { numPVsRestored++ - protectedPVC := v.findFirstProtectedPVCWithName(rdSpec.ProtectedPVC.Name) + protectedPVC := FindProtectedPVC(v.instance, rdSpec.ProtectedPVC.Namespace, rdSpec.ProtectedPVC.Name) if protectedPVC == nil { protectedPVC = &ramendrv1alpha1.ProtectedPVC{} rdSpec.ProtectedPVC.DeepCopyInto(protectedPVC) @@ -123,7 +124,7 @@ func (v *VRGInstance) reconcilePVCAsVolSyncPrimary(pvc corev1.PersistentVolumeCl Resources: pvc.Spec.Resources, } - protectedPVC := v.findFirstProtectedPVCWithName(pvc.Name) + protectedPVC := FindProtectedPVC(v.instance, pvc.Namespace, pvc.Name) if protectedPVC == nil { protectedPVC = newProtectedPVC v.instance.Status.ProtectedPVCs = append(v.instance.Status.ProtectedPVCs, *protectedPVC) @@ -138,7 +139,8 @@ func (v *VRGInstance) reconcilePVCAsVolSyncPrimary(pvc corev1.PersistentVolumeCl ProtectedPVC: *protectedPVC, } - err := v.volSyncHandler.PreparePVC(pvc.Name, v.instance.Spec.PrepareForFinalSync, + err := v.volSyncHandler.PreparePVC(util.ProtectedPVCNamespacedName(*protectedPVC), + v.instance.Spec.PrepareForFinalSync, v.volSyncHandler.IsCopyMethodDirect()) if err != nil { return true @@ -321,7 +323,7 @@ func (v *VRGInstance) buildDataProtectedCondition() *metav1.Condition { } // Check now if we have synced up at least once for this PVC - rsDataProtected, err := v.volSyncHandler.IsRSDataProtected(protectedPVC.Name) + rsDataProtected, err := v.volSyncHandler.IsRSDataProtected(protectedPVC.Name, protectedPVC.Namespace) if err != nil || !rsDataProtected { ready = false diff --git a/controllers/vrg_volsync_test.go b/controllers/vrg_volsync_test.go index c358e4ef6..b9cc20576 100644 --- a/controllers/vrg_volsync_test.go +++ b/controllers/vrg_volsync_test.go @@ -281,6 +281,7 @@ var _ = Describe("VolumeReplicationGroupVolSyncController", func() { { ProtectedPVC: ramendrv1alpha1.ProtectedPVC{ Name: "testingpvc-a", + Namespace: testNamespace.GetName(), ProtectedByVolSync: true, StorageClassName: &storageClassName, AccessModes: testAccessModes, @@ -291,6 +292,7 @@ var _ = Describe("VolumeReplicationGroupVolSyncController", func() { { ProtectedPVC: ramendrv1alpha1.ProtectedPVC{ Name: "testingpvc-b", + Namespace: testNamespace.GetName(), ProtectedByVolSync: true, StorageClassName: &storageClassName, AccessModes: testAccessModes, diff --git a/docs/user-quick-start.md b/docs/user-quick-start.md index cfd6d818f..79a73797c 100644 --- a/docs/user-quick-start.md +++ b/docs/user-quick-start.md @@ -97,7 +97,7 @@ enough resources: sudo dnf install https://storage.googleapis.com/minikube/releases/latest/minikube-latest.x86_64.rpm ``` - Tested with version v1.33.0. + Tested with version v1.33.1. 1. Validate the installation @@ -152,7 +152,7 @@ enough resources: 1. Install the `virtctl` tool. ``` - curl -L -o virtctl https://github.com/kubevirt/kubevirt/releases/download/v1.2.0/virtctl-v1.2.0-linux-amd64 + curl -L -o virtctl https://github.com/kubevirt/kubevirt/releases/download/v1.2.1/virtctl-v1.2.1-linux-amd64 sudo install virtctl /usr/local/bin rm virtctl ``` diff --git a/e2e/config.yaml b/e2e/config.yaml new file mode 100644 index 000000000..8d41d6138 --- /dev/null +++ b/e2e/config.yaml @@ -0,0 +1,4 @@ +--- +channelname: "ramen-gitops" +channelnamespace: "ramen-samples" +giturl: "https://github.com/RamenDR/ocm-ramen-samples.git" diff --git a/e2e/deployers/applicationset.go b/e2e/deployers/applicationset.go index daa205c2e..9abd3d29a 100644 --- a/e2e/deployers/applicationset.go +++ b/e2e/deployers/applicationset.go @@ -10,8 +10,8 @@ import ( type ApplicationSet struct{} -func (a *ApplicationSet) Init() { -} +// func (a *ApplicationSet) Init() { +// } func (a ApplicationSet) Deploy(w workloads.Workload) error { util.Ctx.Log.Info("enter Deploy " + w.GetName() + "/Appset") diff --git a/e2e/deployers/crud.go b/e2e/deployers/crud.go index 03aa238dd..0e66212a4 100644 --- a/e2e/deployers/crud.go +++ b/e2e/deployers/crud.go @@ -3,34 +3,202 @@ package deployers -func createNamespace() error { +import ( + "context" + "strings" + + "github.com/ramendr/ramen/e2e/util" + "github.com/ramendr/ramen/e2e/workloads" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ocmv1b1 "open-cluster-management.io/api/cluster/v1beta1" + ocmv1b2 "open-cluster-management.io/api/cluster/v1beta2" + placementrulev1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/placementrule/v1" + subscriptionv1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/v1" +) + +const ( + AppLabelKey = "app" + ClusterSetName = "default" +) + +func createManagedClusterSetBinding(name, namespace string) error { + labels := make(map[string]string) + labels[AppLabelKey] = namespace + mcsb := &ocmv1b2.ManagedClusterSetBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + }, + Spec: ocmv1b2.ManagedClusterSetBindingSpec{ + ClusterSet: ClusterSetName, + }, + } + + err := util.Ctx.Hub.CtrlClient.Create(context.Background(), mcsb) + if err != nil { + if !errors.IsAlreadyExists(err) { + return err + } + } + return nil } -func createManagedClusterSetBinding() error { +func deleteManagedClusterSetBinding(name, namespace string) error { + mcsb := &ocmv1b2.ManagedClusterSetBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + + err := util.Ctx.Hub.CtrlClient.Delete(context.Background(), mcsb) + if err != nil { + if !errors.IsNotFound(err) { + return err + } + + util.Ctx.Log.Info("managedClusterSetBinding " + name + " not found") + } + return nil } -func createPlacement() error { +func createPlacement(name, namespace string) error { + labels := make(map[string]string) + labels[AppLabelKey] = name + clusterSet := []string{"default"} + + var numClusters int32 = 1 + placement := &ocmv1b1.Placement{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + }, + Spec: ocmv1b1.PlacementSpec{ + ClusterSets: clusterSet, + NumberOfClusters: &numClusters, + }, + } + + err := util.Ctx.Hub.CtrlClient.Create(context.Background(), placement) + if err != nil { + if !errors.IsAlreadyExists(err) { + return err + } + + util.Ctx.Log.Info("placement " + placement.Name + " already Exists") + } + return nil } -func createSubscription() error { +func deletePlacement(name, namespace string) error { + placement := &ocmv1b1.Placement{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + + err := util.Ctx.Hub.CtrlClient.Delete(context.Background(), placement) + if err != nil { + if !errors.IsNotFound(err) { + return err + } + + util.Ctx.Log.Info("placement " + name + " not found") + } + return nil } -func deleteNamespace() error { +func createSubscription(s Subscription, w workloads.Workload) error { + name := GetCombinedName(s, w) + namespace := name + + labels := make(map[string]string) + labels[AppLabelKey] = name + + annotations := make(map[string]string) + annotations["apps.open-cluster-management.io/github-branch"] = w.GetRevision() + annotations["apps.open-cluster-management.io/github-path"] = w.GetPath() + + placementRef := corev1.ObjectReference{ + Kind: "Placement", + Name: name, + } + + placementRulePlacement := &placementrulev1.Placement{} + placementRulePlacement.PlacementRef = &placementRef + + subscription := &subscriptionv1.Subscription{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + Annotations: annotations, + }, + Spec: subscriptionv1.SubscriptionSpec{ + Channel: util.GetChannelNamespace() + "/" + util.GetChannelName(), + Placement: placementRulePlacement, + }, + } + + err := util.Ctx.Hub.CtrlClient.Create(context.Background(), subscription) + if err != nil { + if !errors.IsAlreadyExists(err) { + return err + } + + util.Ctx.Log.Info("placement " + subscription.Name + " already Exists") + } + return nil } -func deleteManagedClusterSetBinding() error { +func deleteSubscription(s Subscription, w workloads.Workload) error { + name := GetCombinedName(s, w) + namespace := name + + subscription := &subscriptionv1.Subscription{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + + err := util.Ctx.Hub.CtrlClient.Delete(context.Background(), subscription) + if err != nil { + if !errors.IsNotFound(err) { + return err + } + + util.Ctx.Log.Info("subscription " + name + " not found") + } + return nil } -func deletePlacement() error { - return nil +func GetCombinedName(d Deployer, w workloads.Workload) string { + return strings.ToLower(d.GetName() + "-" + w.GetName() + "-" + w.GetAppName()) } -func deleteSubscription() error { - return nil +func getSubscription(ctrlClient client.Client, namespace, name string) (*subscriptionv1.Subscription, error) { + subscription := &subscriptionv1.Subscription{} + key := types.NamespacedName{Name: name, Namespace: namespace} + + err := ctrlClient.Get(context.Background(), key, subscription) + if err != nil { + return nil, err + } + + return subscription, nil } diff --git a/e2e/deployers/deployer.go b/e2e/deployers/deployer.go index 0ad4181b6..5b246ebd7 100644 --- a/e2e/deployers/deployer.go +++ b/e2e/deployers/deployer.go @@ -7,7 +7,6 @@ import "github.com/ramendr/ramen/e2e/workloads" // Deployer interface has methods to deploy a workload to a cluster type Deployer interface { - Init() Deploy(workloads.Workload) error Undeploy(workloads.Workload) error // Scale(Workload) for adding/removing PVCs; in Deployer even though scaling is a Workload interface diff --git a/e2e/deployers/retry.go b/e2e/deployers/retry.go new file mode 100644 index 000000000..8999db124 --- /dev/null +++ b/e2e/deployers/retry.go @@ -0,0 +1,48 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package deployers + +import ( + "fmt" + "time" + + "github.com/ramendr/ramen/e2e/util" + subscriptionv1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/v1" +) + +const FiveSecondsDuration = 5 * time.Second + +func waitSubscriptionPhase(namespace, name string, phase subscriptionv1.SubscriptionPhase) error { + // sleep to wait for subscription is processed + time.Sleep(FiveSecondsDuration) + + startTime := time.Now() + + for { + sub, err := getSubscription(util.Ctx.Hub.CtrlClient, namespace, name) + if err != nil { + return err + } + + currentPhase := sub.Status.Phase + if currentPhase == phase { + util.Ctx.Log.Info(fmt.Sprintf("subscription %s phase is %s", name, phase)) + + return nil + } + + if time.Since(startTime) > time.Second*time.Duration(util.Timeout) { + return fmt.Errorf(fmt.Sprintf("subscription %s status is not %s yet before timeout of %v", + name, phase, util.Timeout)) + } + + if currentPhase == "" { + currentPhase = "empty" + } + + util.Ctx.Log.Info(fmt.Sprintf("current subscription %s phase is %s, expecting %s, retry in %v seconds", + name, currentPhase, phase, util.TimeInterval)) + time.Sleep(time.Second * time.Duration(util.TimeInterval)) + } +} diff --git a/e2e/deployers/subscription.go b/e2e/deployers/subscription.go index f8e27b8fb..916dfcdc9 100644 --- a/e2e/deployers/subscription.go +++ b/e2e/deployers/subscription.go @@ -6,21 +6,13 @@ package deployers import ( "github.com/ramendr/ramen/e2e/util" "github.com/ramendr/ramen/e2e/workloads" + subscriptionv1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/v1" ) -type Subscription struct { - // NamePrefix helps when the same workload needs to run in parallel with different deployers. - // In the future we potentially would add a resource suffix that is either randomly generated - // or a hash of the full test name, to handle cases where we want to run the "same" combination - // of deployer+workload for various reasons. - NamePrefix string - McsbName string -} +// mcsb name must be same as the target ManagedClusterSet +const McsbName = ClusterSetName -func (s *Subscription) Init() { - s.NamePrefix = "sub-" - s.McsbName = "default" -} +type Subscription struct{} func (s Subscription) GetName() string { return "Subscription" @@ -33,25 +25,33 @@ func (s Subscription) Deploy(w workloads.Workload) error { // Generate a Subscription for the Workload // - Kustomize the Workload; call Workload.Kustomize(StorageType) // Address namespace/label/suffix as needed for various resources - util.Ctx.Log.Info("enter Deploy " + w.GetName() + "/Subscription") + util.Ctx.Log.Info("enter Deploy " + w.GetName() + "/" + s.GetName()) + + name := GetCombinedName(s, w) + namespace := name + + // create subscription namespace + err := util.CreateNamespace(util.Ctx.Hub.CtrlClient, namespace) + if err != nil { + return err + } - // w.Kustomize() - err := createNamespace() + err = createManagedClusterSetBinding(McsbName, namespace) if err != nil { return err } - err = createManagedClusterSetBinding() + err = createPlacement(name, namespace) if err != nil { return err } - err = createPlacement() + err = createSubscription(s, w) if err != nil { return err } - err = createSubscription() + err = waitSubscriptionPhase(namespace, name, subscriptionv1.SubscriptionPropagated) if err != nil { return err } @@ -61,24 +61,27 @@ func (s Subscription) Deploy(w workloads.Workload) error { func (s Subscription) Undeploy(w workloads.Workload) error { // Delete Subscription, Placement, Binding - util.Ctx.Log.Info("enter Undeploy " + w.GetName() + "/Subscription") + util.Ctx.Log.Info("enter Undeploy " + w.GetName() + s.GetName()) + + name := GetCombinedName(s, w) + namespace := name - err := deleteSubscription() + err := deleteSubscription(s, w) if err != nil { return err } - err = deletePlacement() + err = deletePlacement(name, namespace) if err != nil { return err } - err = deleteManagedClusterSetBinding() + err = deleteManagedClusterSetBinding(McsbName, namespace) if err != nil { return err } - err = deleteNamespace() + err = util.DeleteNamespace(util.Ctx.Hub.CtrlClient, namespace) if err != nil { return err } diff --git a/e2e/dractions/actions.go b/e2e/dractions/actions.go index 73ae41932..f2725f4d3 100644 --- a/e2e/dractions/actions.go +++ b/e2e/dractions/actions.go @@ -17,26 +17,214 @@ const ( FiveSecondsDuration = 5 * time.Second ) +// If AppSet/Subscription, find Placement +// Determine DRPolicy +// Determine preferredCluster +// Determine PVC label selector +// Determine KubeObjectProtection requirements if Imperative (?) +// Create DRPC, in desired namespace func EnableProtection(w workloads.Workload, d deployers.Deployer) error { util.Ctx.Log.Info("enter EnableProtection " + w.GetName() + "/" + d.GetName()) - return nil + name := GetCombinedName(d, w) + namespace := name + // if isAppSet { + // namespace = util.ArgocdNamespace + // } + drPolicyName := DefaultDRPolicyName + appname := w.GetAppName() + placementName := name + drpcName := name + + placement, placementDecisionName, err := waitPlacementDecision(util.Ctx.Hub.CtrlClient, namespace, placementName) + if err != nil { + return err + } + + util.Ctx.Log.Info("get placementdecision " + placementDecisionName) + + placementDecision, err := getPlacementDecision(util.Ctx.Hub.CtrlClient, namespace, placementDecisionName) + if err != nil { + return err + } + + clusterName := placementDecision.Status.Decisions[0].ClusterName + util.Ctx.Log.Info("placementdecision clusterName: " + clusterName) + + // move update placement annotation after placement has been handled + // otherwise if we first add ocm disable annotation then it might not + // yet be handled by ocm and thus PlacementSatisfied=false + if placement.Annotations == nil { + placement.Annotations = make(map[string]string) + } + + placement.Annotations[OcmSchedulingDisable] = "true" + + util.Ctx.Log.Info("update placement " + placementName + " annotation") + + if err = updatePlacement(util.Ctx.Hub.CtrlClient, placement); err != nil { + return err + } + + util.Ctx.Log.Info("create drpc " + drpcName) + + drpc := generateDRPC(name, namespace, clusterName, drPolicyName, placementName, appname) + if err = createDRPC(util.Ctx.Hub.CtrlClient, drpc); err != nil { + return err + } + + return waitDRPCReady(util.Ctx.Hub.CtrlClient, namespace, drpcName) } +// remove DRPC +// update placement annotation func DisableProtection(w workloads.Workload, d deployers.Deployer) error { - util.Ctx.Log.Info("enter DisableProtection " + w.GetName() + "/" + d.GetName()) + util.Ctx.Log.Info("enter DRActions DisableProtection") + + name := GetCombinedName(d, w) + namespace := name + placementName := name + drpcName := name + client := util.Ctx.Hub.CtrlClient - return nil + // if isAppSet { + // namespace = util.ArgocdNamespace + // } + + util.Ctx.Log.Info("delete drpc " + drpcName) + + if err := deleteDRPC(client, namespace, drpcName); err != nil { + return err + } + + if err := waitDRPCDeleted(client, namespace, drpcName); err != nil { + return err + } + + util.Ctx.Log.Info("get placement " + placementName) + + placement, err := getPlacement(client, namespace, placementName) + if err != nil { + return err + } + + delete(placement.Annotations, OcmSchedulingDisable) + util.Ctx.Log.Info("updated placement " + placementName + " annotation") + + return updatePlacement(client, placement) } func Failover(w workloads.Workload, d deployers.Deployer) error { - util.Ctx.Log.Info("enter Failover " + w.GetName() + "/" + d.GetName()) + util.Ctx.Log.Info("enter DRActions Failover") + + name := GetCombinedName(d, w) + namespace := name + + // _, isAppSet := d.(*deployers.ApplicationSet) + // if isAppSet { + // namespace = util.ArgocdNamespace + // } + + drPolicyName := DefaultDRPolicyName + drpcName := name + client := util.Ctx.Hub.CtrlClient + + // here we expect drpc should be ready before failover + if err := waitDRPCReady(client, namespace, drpcName); err != nil { + return err + } - return nil + util.Ctx.Log.Info("get drpc " + drpcName) + + drpc, err := getDRPC(client, namespace, drpcName) + if err != nil { + return err + } + + util.Ctx.Log.Info("get drpolicy " + drPolicyName) + + drpolicy, err := getDRPolicy(client, drPolicyName) + if err != nil { + return err + } + + targetCluster, err := getTargetCluster(client, namespace, name, drpolicy) + if err != nil { + return err + } + + util.Ctx.Log.Info("failover to cluster: " + targetCluster) + + drpc.Spec.Action = "Failover" + drpc.Spec.FailoverCluster = targetCluster + + util.Ctx.Log.Info("update drpc " + drpcName) + + if err = updateDRPC(client, drpc); err != nil { + return err + } + + return waitDRPC(client, namespace, name, "FailedOver") } +// Determine DRPC +// Check Placement +// Relocate to Primary in DRPolicy as the PrimaryCluster +// Update DRPC func Relocate(w workloads.Workload, d deployers.Deployer) error { - util.Ctx.Log.Info("enter Relocate " + w.GetName() + "/" + d.GetName()) + util.Ctx.Log.Info("enter DRActions Relocate") + + name := GetCombinedName(d, w) + namespace := name + + // _, isAppSet := d.(*deployers.ApplicationSet) + // if isAppSet { + // namespace = util.ArgocdNamespace + // } + drPolicyName := DefaultDRPolicyName + drpcName := name + client := util.Ctx.Hub.CtrlClient + + // here we expect drpc should be ready before relocate + err := waitDRPCReady(client, namespace, drpcName) + if err != nil { + return err + } + + util.Ctx.Log.Info("get drpc " + drpcName) + + drpc, err := getDRPC(client, namespace, drpcName) + if err != nil { + return err + } + + util.Ctx.Log.Info("get drpolicy " + drPolicyName) + + drpolicy, err := getDRPolicy(client, drPolicyName) + if err != nil { + return err + } + + targetCluster, err := getTargetCluster(client, namespace, name, drpolicy) + if err != nil { + return err + } + + util.Ctx.Log.Info("relocate to cluster: " + targetCluster) + + drpc.Spec.Action = "Relocate" + drpc.Spec.PreferredCluster = targetCluster + + util.Ctx.Log.Info("update drpc " + drpcName) + + err = updateDRPC(client, drpc) + if err != nil { + return err + } + + return waitDRPC(client, namespace, name, "Relocated") +} - return nil +func GetCombinedName(d deployers.Deployer, w workloads.Workload) string { + return deployers.GetCombinedName(d, w) } diff --git a/e2e/dractions/crud.go b/e2e/dractions/crud.go new file mode 100644 index 000000000..5b289a36e --- /dev/null +++ b/e2e/dractions/crud.go @@ -0,0 +1,144 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package dractions + +import ( + "context" + + ramen "github.com/ramendr/ramen/api/v1alpha1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func getPlacement(ctrlClient client.Client, namespace, name string) (*clusterv1beta1.Placement, error) { + placement := &clusterv1beta1.Placement{} + key := types.NamespacedName{Namespace: namespace, Name: name} + + err := ctrlClient.Get(context.Background(), key, placement) + if err != nil { + return nil, err + } + + return placement, nil +} + +func updatePlacement(ctrlClient client.Client, placement *clusterv1beta1.Placement) error { + err := ctrlClient.Update(context.Background(), placement) + if err != nil { + return err + } + + return nil +} + +func getPlacementDecision(ctrlClient client.Client, namespace, name string) (*clusterv1beta1.PlacementDecision, error) { + placementDecision := &clusterv1beta1.PlacementDecision{} + key := types.NamespacedName{Namespace: namespace, Name: name} + + err := ctrlClient.Get(context.Background(), key, placementDecision) + if err != nil { + return nil, err + } + + return placementDecision, nil +} + +func getDRPC(ctrlClient client.Client, namespace, name string) (*ramen.DRPlacementControl, error) { + drpc := &ramen.DRPlacementControl{} + key := types.NamespacedName{Namespace: namespace, Name: name} + + err := ctrlClient.Get(context.Background(), key, drpc) + if err != nil { + return nil, err + } + + return drpc, nil +} + +func createDRPC(ctrlClient client.Client, drpc *ramen.DRPlacementControl) error { + err := ctrlClient.Create(context.Background(), drpc) + if err != nil { + if !errors.IsAlreadyExists(err) { + return err + } + // ctx.Log.Info("drpc " + drpc.Name + " already Exists") + } + + return nil +} + +func updateDRPC(ctrlClient client.Client, drpc *ramen.DRPlacementControl) error { + err := ctrlClient.Update(context.Background(), drpc) + if err != nil { + return err + } + + return nil +} + +func deleteDRPC(ctrlClient client.Client, namespace, name string) error { + objDrpc := &ramen.DRPlacementControl{} + key := types.NamespacedName{Namespace: namespace, Name: name} + + err := ctrlClient.Get(context.Background(), key, objDrpc) + if err != nil { + if !errors.IsNotFound(err) { + return err + } + + return nil + } + + err = ctrlClient.Delete(context.Background(), objDrpc) + if err != nil { + return err + } + + return nil +} + +func getDRPolicy(ctrlClient client.Client, name string) (*ramen.DRPolicy, error) { + drpolicy := &ramen.DRPolicy{} + key := types.NamespacedName{Name: name} + + err := ctrlClient.Get(context.Background(), key, drpolicy) + if err != nil { + return nil, err + } + + return drpolicy, nil +} + +func generateDRPC(name, namespace, clusterName, drPolicyName, placementName, appname string) *ramen.DRPlacementControl { + drpc := &ramen.DRPlacementControl{ + TypeMeta: metav1.TypeMeta{ + Kind: "DRPlacementControl", + APIVersion: "ramendr.openshift.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{"app": name}, + }, + Spec: ramen.DRPlacementControlSpec{ + PreferredCluster: clusterName, + DRPolicyRef: v1.ObjectReference{ + Name: drPolicyName, + }, + PlacementRef: v1.ObjectReference{ + Kind: "placement", + Name: placementName, + }, + PVCSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{"appname": appname}, + }, + }, + } + + return drpc +} diff --git a/e2e/dractions/retry.go b/e2e/dractions/retry.go new file mode 100644 index 000000000..efd88cc0b --- /dev/null +++ b/e2e/dractions/retry.go @@ -0,0 +1,200 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package dractions + +import ( + "fmt" + "time" + + ramen "github.com/ramendr/ramen/api/v1alpha1" + "github.com/ramendr/ramen/e2e/util" + "k8s.io/apimachinery/pkg/api/errors" + "open-cluster-management.io/api/cluster/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// return placement object, placementDecisionName, error +func waitPlacementDecision(client client.Client, namespace string, placementName string, +) (*v1beta1.Placement, string, error) { + startTime := time.Now() + placementDecisionName := "" + + for { + placement, err := getPlacement(client, namespace, placementName) + if err != nil { + return nil, "", err + } + + for _, cond := range placement.Status.Conditions { + if cond.Type == "PlacementSatisfied" && cond.Status == "True" { + placementDecisionName = placement.Status.DecisionGroups[0].Decisions[0] + if placementDecisionName != "" { + util.Ctx.Log.Info("got placementdecision name " + placementDecisionName) + + return placement, placementDecisionName, nil + } + } + } + + if time.Since(startTime) > time.Second*time.Duration(util.Timeout) { + return nil, "", fmt.Errorf("could not get placement decision before timeout") + } + + util.Ctx.Log.Info(fmt.Sprintf("could not get placement decision, retry in %v seconds", util.TimeInterval)) + time.Sleep(time.Second * time.Duration(util.TimeInterval)) + } +} + +func waitDRPCReady(client client.Client, namespace string, drpcName string) error { + startTime := time.Now() + + for { + drpc, err := getDRPC(client, namespace, drpcName) + if err != nil { + return err + } + + conditionReady := checkDRPCConditions(drpc) + if conditionReady && drpc.Status.LastGroupSyncTime != nil { + util.Ctx.Log.Info("drpc " + drpcName + " is ready") + + return nil + } + + if conditionReady && drpc.Status.LastGroupSyncTime == nil { + util.Ctx.Log.Info("drpc " + drpcName + " LastGroupSyncTime is nil") + } + + if time.Since(startTime) > time.Second*time.Duration(util.Timeout) { + return fmt.Errorf(fmt.Sprintf("drpc %s is not ready yet before timeout of %v", drpcName, util.Timeout)) + } + + util.Ctx.Log.Info(fmt.Sprintf("drpc %s is not ready yet, retry in %v seconds", drpcName, util.TimeInterval)) + time.Sleep(time.Second * time.Duration(util.TimeInterval)) + } +} + +func checkDRPCConditions(drpc *ramen.DRPlacementControl) bool { + available := false + peerReady := false + + for _, cond := range drpc.Status.Conditions { + if cond.Type == "Available" { + if cond.Status != "True" { + util.Ctx.Log.Info("drpc " + drpc.Name + " condition Available is not True") + + return false + } + + available = true + } + + if cond.Type == "PeerReady" { + if cond.Status != "True" { + util.Ctx.Log.Info("drpc " + drpc.Name + " condition PeerReady is not True") + + return false + } + + peerReady = true + } + } + + return available && peerReady +} + +func waitDRPCPhase(client client.Client, namespace string, name string, phase string) error { + startTime := time.Now() + + for { + drpc, err := getDRPC(client, namespace, name) + if err != nil { + return err + } + + currentPhase := string(drpc.Status.Phase) + if currentPhase == phase { + util.Ctx.Log.Info("drpc " + name + " phase is " + phase) + + return nil + } + + if time.Since(startTime) > time.Second*time.Duration(util.Timeout) { + return fmt.Errorf(fmt.Sprintf("drpc %s status is not %s yet before timeout of %v", name, phase, util.Timeout)) + } + + util.Ctx.Log.Info(fmt.Sprintf("current drpc %s phase is %s, expecting %s, retry in %v seconds", + name, currentPhase, phase, util.TimeInterval)) + time.Sleep(time.Second * time.Duration(util.TimeInterval)) + } +} + +func getCurrentCluster(client client.Client, namespace string, placementName string) (string, error) { + _, placementDecisionName, err := waitPlacementDecision(client, namespace, placementName) + if err != nil { + return "", err + } + + placementDecision, err := getPlacementDecision(client, namespace, placementDecisionName) + if err != nil { + return "", err + } + + clusterName := placementDecision.Status.Decisions[0].ClusterName + util.Ctx.Log.Info("placementdecision clusterName: " + clusterName) + + return clusterName, nil +} + +func getTargetCluster(client client.Client, namespace, placementName string, drpolicy *ramen.DRPolicy) (string, error) { + currentCluster, err := getCurrentCluster(client, namespace, placementName) + if err != nil { + return "", err + } + + targetCluster := "" + if currentCluster == drpolicy.Spec.DRClusters[0] { + targetCluster = drpolicy.Spec.DRClusters[1] + } else { + targetCluster = drpolicy.Spec.DRClusters[0] + } + + return targetCluster, nil +} + +// first wait DRPC to have the expected phase, then check DRPC conditions +func waitDRPC(client client.Client, namespace, name, expectedPhase string) error { + // sleep to wait for DRPC is processed + time.Sleep(FiveSecondsDuration) + // check Phase + if err := waitDRPCPhase(client, namespace, name, expectedPhase); err != nil { + return err + } + // then check Conditions + return waitDRPCReady(client, namespace, name) +} + +func waitDRPCDeleted(client client.Client, namespace string, name string) error { + startTime := time.Now() + + for { + _, err := getDRPC(client, namespace, name) + if err != nil { + if errors.IsNotFound(err) { + util.Ctx.Log.Info("drpc " + name + " is deleted") + + return nil + } + + util.Ctx.Log.Info(fmt.Sprintf("error to get drpc %s: %v", name, err)) + } + + if time.Since(startTime) > time.Second*time.Duration(util.Timeout) { + return fmt.Errorf(fmt.Sprintf("drpc %s is not deleted yet before timeout of %v", name, util.Timeout)) + } + + util.Ctx.Log.Info(fmt.Sprintf("drpc %s is not deleted yet, retry in %v seconds", name, util.TimeInterval)) + time.Sleep(time.Second * time.Duration(util.TimeInterval)) + } +} diff --git a/e2e/rdr-e2e.sh b/e2e/e2e-rdr.sh similarity index 73% rename from e2e/rdr-e2e.sh rename to e2e/e2e-rdr.sh index e9f6c0ef6..862da3528 100755 --- a/e2e/rdr-e2e.sh +++ b/e2e/e2e-rdr.sh @@ -5,7 +5,4 @@ echo "Running tests..." -cd ./e2e/ || exit 1 -go test -kubeconfig-c1 ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-dr1 -kubeconfig-c2 ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-dr2 -kubeconfig-hub ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-hub -v - -exit 0 \ No newline at end of file +go test -kubeconfig-c1 ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-dr1 -kubeconfig-c2 ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-dr2 -kubeconfig-hub ~/.config/drenv/rdr-rdr/kubeconfigs/rdr-hub -timeout 0 -v "$@" diff --git a/e2e/exhaustive_suite_test.go b/e2e/exhaustive_suite_test.go index 6a92cf4aa..8c2763d9a 100644 --- a/e2e/exhaustive_suite_test.go +++ b/e2e/exhaustive_suite_test.go @@ -15,22 +15,25 @@ import ( // Workloads = {"Deployment", "STS", "DaemonSet"} // Classes = {"rbd", "cephfs"} -func Exhaustive(t *testing.T) { - t.Helper() - t.Parallel() +var deployment = &workloads.Deployment{ + Path: "workloads/deployment/k8s-regional-rbd", + Revision: "main", + AppName: "busybox", + Name: "Deployment", +} - deployment := &workloads.Deployment{} - deployment.Init() +var Workloads = []workloads.Workload{deployment} - Workloads := []workloads.Workload{deployment} +var subscription = &deployers.Subscription{} - subscription := &deployers.Subscription{} - subscription.Init() +// appset := &deployers.ApplicationSet{} +// Deployers := []deployers.Deployer{subscription, appset} - appset := &deployers.ApplicationSet{} - appset.Init() +var Deployers = []deployers.Deployer{subscription} - Deployers := []deployers.Deployer{subscription, appset} +func Exhaustive(t *testing.T) { + t.Helper() + t.Parallel() for _, workload := range Workloads { for _, deployer := range Deployers { diff --git a/e2e/go.mod b/e2e/go.mod index 552fc84f9..6b1efbea2 100644 --- a/e2e/go.mod +++ b/e2e/go.mod @@ -5,19 +5,24 @@ go 1.21 require ( github.com/go-logr/logr v1.4.1 github.com/ramendr/ramen/api v0.0.0-00010101000000-000000000000 + github.com/spf13/viper v1.18.2 go.uber.org/zap v1.27.0 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.29.3 k8s.io/apimachinery v0.29.3 k8s.io/client-go v12.0.0+incompatible k8s.io/kubectl v0.29.3 open-cluster-management.io/api v0.13.0 + open-cluster-management.io/multicloud-operators-channel v0.10.1-0.20230316173315-10f48e51f3aa open-cluster-management.io/multicloud-operators-subscription v0.13.0 sigs.k8s.io/controller-runtime v0.17.3 ) require ( - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch/v5 v5.8.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -28,34 +33,43 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.5.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect github.com/imdario/mergo v0.3.13 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stolostron/multicloud-operators-placementrule v1.2.4-1-20220311-8eedb3f.0.20230828200208-cd3c119a7fa0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/net v0.20.0 // indirect - golang.org/x/oauth2 v0.13.0 // indirect + golang.org/x/oauth2 v0.15.0 // indirect golang.org/x/sys v0.16.0 // indirect golang.org/x/term v0.16.0 // indirect golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.3.0 // indirect + golang.org/x/time v0.5.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.29.3 // indirect k8s.io/component-base v0.29.3 // indirect k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect - open-cluster-management.io/multicloud-operators-channel v0.10.1-0.20230316173315-10f48e51f3aa // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/e2e/go.sum b/e2e/go.sum index 14278a39d..783d7dd16 100644 --- a/e2e/go.sum +++ b/e2e/go.sum @@ -4,14 +4,17 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= @@ -50,6 +53,8 @@ github.com/google/pprof v0.0.0-20230510103437-eeec1cb781c3 h1:2XF1Vzq06X+inNqgJ9 github.com/google/pprof v0.0.0-20230510103437-eeec1cb781c3/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk= github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -65,11 +70,15 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -82,10 +91,13 @@ github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= @@ -96,8 +108,20 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/stolostron/multicloud-operators-placementrule v1.2.4-1-20220311-8eedb3f.0.20230828200208-cd3c119a7fa0 h1:qL6eeBtdjLq7ktBBg8tB44b6jTKQjFy6bdl8EM+Kq6o= github.com/stolostron/multicloud-operators-placementrule v1.2.4-1-20220311-8eedb3f.0.20230828200208-cd3c119a7fa0/go.mod h1:uMTaz9cMLe5N+yJ/PpHPtSOdlBFB00WdxAW+K5TfkVw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -109,6 +133,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= @@ -122,8 +148,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= @@ -135,8 +161,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= -golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= +golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= +golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -160,8 +186,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -186,6 +212,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= diff --git a/e2e/main_test.go b/e2e/main_test.go index 41296a892..1a0540e24 100644 --- a/e2e/main_test.go +++ b/e2e/main_test.go @@ -40,6 +40,10 @@ func TestMain(m *testing.M) { panic(err) } + if err := util.ReadConfig(); err != nil { + panic(err) + } + os.Exit(m.Run()) } @@ -49,14 +53,27 @@ type testDef struct { } var Suites = []testDef{ - {"Validate", Validate}, {"Exhaustive", Exhaustive}, } func TestSuites(t *testing.T) { util.Ctx.Log.Info(t.Name()) + if err := util.EnsureChannel(); err != nil { + t.Fatalf("failed to ensure channel: %v", err) + } + + if !t.Run("Validate", Validate) { + t.Fatal("failed to validate the test suite") + } + for _, suite := range Suites { t.Run(suite.name, suite.test) } + + t.Cleanup(func() { + if err := util.EnsureChannelDeleted(); err != nil { + t.Fatalf("failed to ensure channel deleted: %v", err) + } + }) } diff --git a/e2e/util/config.go b/e2e/util/config.go new file mode 100644 index 000000000..c31c03e44 --- /dev/null +++ b/e2e/util/config.go @@ -0,0 +1,60 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package util + +import ( + "fmt" + + "github.com/spf13/viper" +) + +type TestConfig struct { + ChannelName string + ChannelNamespace string + GitURL string +} + +var config = &TestConfig{} + +func ReadConfig() error { + viper.SetDefault("ChannelName", defaultChannelName) + viper.SetDefault("ChannelNamespace", defaultChannelNamespace) + viper.SetDefault("GitURL", defaultGitURL) + + if err := viper.BindEnv("ChannelName", "ChannelName"); err != nil { + return (err) + } + + if err := viper.BindEnv("ChannelNamespace", "ChannelNamespace"); err != nil { + return (err) + } + + if err := viper.BindEnv("GitURL", "GitURL"); err != nil { + return (err) + } + + viper.SetConfigFile("config.yaml") + + if err := viper.ReadInConfig(); err != nil { + return fmt.Errorf("failed to read config: %v", err) + } + + if err := viper.Unmarshal(config); err != nil { + return fmt.Errorf("failed to unmarshal config: %v", err) + } + + return nil +} + +func GetChannelName() string { + return config.ChannelName +} + +func GetChannelNamespace() string { + return config.ChannelNamespace +} + +func GetGitURL() string { + return config.GitURL +} diff --git a/e2e/util/const.go b/e2e/util/const.go index d53b1e040..37cc00871 100644 --- a/e2e/util/const.go +++ b/e2e/util/const.go @@ -5,6 +5,11 @@ package util const ( RamenSystemNamespace = "ramen-system" - ChannelName = "ramen-gitops" - ChannelNamespace = "ramen-samples" + + Timeout = 600 // seconds + TimeInterval = 30 // seconds + + defaultChannelName = "ramen-gitops" + defaultChannelNamespace = "ramen-samples" + defaultGitURL = "https://github.com/RamenDR/ocm-ramen-samples.git" ) diff --git a/e2e/util/context.go b/e2e/util/context.go index a95bc5a48..a8e449638 100644 --- a/e2e/util/context.go +++ b/e2e/util/context.go @@ -15,9 +15,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" // Placement - ocmclusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + ocmv1b1 "open-cluster-management.io/api/cluster/v1beta1" // ManagedClusterSetBinding - ocmclusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2" + ocmv1b2 "open-cluster-management.io/api/cluster/v1beta2" ramen "github.com/ramendr/ramen/api/v1alpha1" subscription "open-cluster-management.io/multicloud-operators-subscription/pkg/apis" @@ -45,11 +45,11 @@ type Context struct { var Ctx *Context func addToScheme(scheme *runtime.Scheme) error { - if err := ocmclusterv1beta1.AddToScheme(scheme); err != nil { + if err := ocmv1b1.AddToScheme(scheme); err != nil { return err } - if err := ocmclusterv1beta2.AddToScheme(scheme); err != nil { + if err := ocmv1b2.AddToScheme(scheme); err != nil { return err } diff --git a/e2e/util/crud.go b/e2e/util/crud.go new file mode 100644 index 000000000..650bce856 --- /dev/null +++ b/e2e/util/crud.go @@ -0,0 +1,117 @@ +// SPDX-FileCopyrightText: The RamenDR authors +// SPDX-License-Identifier: Apache-2.0 + +package util + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + channelv1 "open-cluster-management.io/multicloud-operators-channel/pkg/apis/apps/v1" +) + +func CreateNamespace(client client.Client, namespace string) error { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + + err := client.Create(context.Background(), ns) + if err != nil { + if !errors.IsAlreadyExists(err) { + return err + } + } + + return nil +} + +func DeleteNamespace(client client.Client, namespace string) error { + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + + err := client.Delete(context.Background(), ns) + if err != nil { + if !errors.IsNotFound(err) { + return err + } + + Ctx.Log.Info("namespace " + namespace + " not found") + } + + return nil +} + +func createChannel() error { + objChannel := &channelv1.Channel{ + ObjectMeta: metav1.ObjectMeta{ + Name: GetChannelName(), + Namespace: GetChannelNamespace(), + }, + Spec: channelv1.ChannelSpec{ + Pathname: GetGitURL(), + Type: channelv1.ChannelTypeGitHub, + }, + } + + err := Ctx.Hub.CtrlClient.Create(context.Background(), objChannel) + if err != nil { + if !errors.IsAlreadyExists(err) { + return err + } + + Ctx.Log.Info("channel " + GetChannelName() + " already exists") + } else { + Ctx.Log.Info("channel " + GetChannelName() + " is created") + } + + return nil +} + +func deleteChannel() error { + channel := &channelv1.Channel{ + ObjectMeta: metav1.ObjectMeta{ + Name: GetChannelName(), + Namespace: GetChannelNamespace(), + }, + } + + err := Ctx.Hub.CtrlClient.Delete(context.Background(), channel) + if err != nil { + if !errors.IsNotFound(err) { + return err + } + + Ctx.Log.Info("channel " + GetChannelName() + " not found") + } else { + Ctx.Log.Info("channel " + GetChannelName() + " is deleted") + } + + return nil +} + +func EnsureChannel() error { + // create channel namespace + err := CreateNamespace(Ctx.Hub.CtrlClient, GetChannelNamespace()) + if err != nil { + return err + } + + return createChannel() +} + +func EnsureChannelDeleted() error { + if err := deleteChannel(); err != nil { + return err + } + + return DeleteNamespace(Ctx.Hub.CtrlClient, GetChannelNamespace()) +} diff --git a/e2e/util/validation.go b/e2e/util/validation.go index 11b4f5e33..3c9be576b 100644 --- a/e2e/util/validation.go +++ b/e2e/util/validation.go @@ -24,6 +24,18 @@ func CheckRamenHubPodRunningStatus(k8sClient *kubernetes.Clientset) (bool, strin return CheckPodRunningStatus(k8sClient, ramenNameSpace, labelSelector, podIdentifier) } +func CheckRamenSpokePodRunningStatus(k8sClient *kubernetes.Clientset) (bool, string, error) { + labelSelector := "app=ramen-dr-cluster" + podIdentifier := "ramen-dr-cluster-operator" + + ramenNameSpace, err := GetRamenNameSpace(k8sClient) + if err != nil { + return false, "", err + } + + return CheckPodRunningStatus(k8sClient, ramenNameSpace, labelSelector, podIdentifier) +} + func GetRamenNameSpace(k8sClient *kubernetes.Clientset) (string, error) { isOpenShift, err := IsOpenShiftCluster(k8sClient) if err != nil { diff --git a/e2e/validation_suite_test.go b/e2e/validation_suite_test.go index 94bad7adc..4ca531e4e 100644 --- a/e2e/validation_suite_test.go +++ b/e2e/validation_suite_test.go @@ -5,15 +5,59 @@ package e2e_test import ( "testing" + + "github.com/ramendr/ramen/e2e/util" ) func Validate(t *testing.T) { t.Helper() if !t.Run("CheckRamenHubOperatorStatus", CheckRamenHubOperatorStatus) { - t.Error() + t.Error("CheckRamenHubOperatorStatus failed") + } + + if !t.Run("CheckRamenSpokeOperatorStatus", CheckRamenSpokeOperatorStatus) { + t.Error("CheckRamenHubOperatorStatus failed") } } func CheckRamenHubOperatorStatus(t *testing.T) { + util.Ctx.Log.Info("enter CheckRamenHubOperatorStatus") + + isRunning, podName, err := util.CheckRamenHubPodRunningStatus(util.Ctx.Hub.K8sClientSet) + if err != nil { + t.Error(err) + } + + if isRunning { + util.Ctx.Log.Info("Ramen Hub Operator is running", "pod", podName) + } else { + t.Error("no running Ramen Hub Operator pod found") + } +} + +func CheckRamenSpokeOperatorStatus(t *testing.T) { + util.Ctx.Log.Info("enter CheckRamenSpokeOperatorStatus") + + isRunning, podName, err := util.CheckRamenSpokePodRunningStatus(util.Ctx.C1.K8sClientSet) + if err != nil { + t.Error(err) + } + + if isRunning { + util.Ctx.Log.Info("Ramen Spoke Operator is running on cluster 1", "pod", podName) + } else { + t.Error("no running Ramen Spoke Operator pod on cluster 1") + } + + isRunning, podName, err = util.CheckRamenSpokePodRunningStatus(util.Ctx.C2.K8sClientSet) + if err != nil { + t.Error(err) + } + + if isRunning { + util.Ctx.Log.Info("Ramen Spoke Operator is running on cluster 2", "pod", podName) + } else { + t.Error("no running Ramen Spoke Operator pod on cluster 2") + } } diff --git a/e2e/workloads/deployment.go b/e2e/workloads/deployment.go index 92706a0ef..6f1a81238 100644 --- a/e2e/workloads/deployment.go +++ b/e2e/workloads/deployment.go @@ -4,17 +4,11 @@ package workloads type Deployment struct { - RepoURL string + // RepoURL string Path string Revision string AppName string -} - -func (w *Deployment) Init() { - w.RepoURL = "https://github.com/ramendr/ocm-ramen-samples.git" - w.Path = "workloads/deployment/k8s-regional-rbd" - w.Revision = "main" - w.AppName = "busybox" + Name string } func (w Deployment) GetAppName() string { @@ -22,12 +16,12 @@ func (w Deployment) GetAppName() string { } func (w Deployment) GetName() string { - return "Deployment" + return w.Name } -func (w Deployment) GetRepoURL() string { - return w.RepoURL -} +// func (w Deployment) GetRepoURL() string { +// return w.RepoURL +// } func (w Deployment) GetPath() string { return w.Path diff --git a/e2e/workloads/workload.go b/e2e/workloads/workload.go index c74f19f64..5a4f8dcc2 100644 --- a/e2e/workloads/workload.go +++ b/e2e/workloads/workload.go @@ -8,11 +8,9 @@ type Workload interface { // GetResources() error // Get the actual workload resources GetName() string - // GetAppName() string + GetAppName() string // GetRepoURL() string // Possibly all this is part of Workload than each implementation of the interfaces? - // GetPath() string - // GetRevision() string - - Init() + GetPath() string + GetRevision() string } diff --git a/test/README.md b/test/README.md index 2925db2bd..f6a2ab06d 100644 --- a/test/README.md +++ b/test/README.md @@ -27,8 +27,7 @@ environment. sudo dnf install https://storage.googleapis.com/minikube/releases/latest/minikube-latest.x86_64.rpm ``` - You need `minikube` version supporting the `--extra-disks` option. - Tested with version v1.33.0. + Tested with version v1.33.1. 1. Install the `kubectl` tool. See [Install and Set Up kubectl on Linux](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/) @@ -61,7 +60,7 @@ environment. 1. Install the `virtctl` tool ``` - curl -L -o virtctl https://github.com/kubevirt/kubevirt/releases/download/v1.2.0/virtctl-v1.2.0-linux-amd64 + curl -L -o virtctl https://github.com/kubevirt/kubevirt/releases/download/v1.2.1/virtctl-v1.2.1-linux-amd64 sudo install virtctl /usr/local/bin rm virtctl ``` @@ -652,6 +651,7 @@ simpler and faster to work with a minimal environment. - `kubevirt.yaml` - for testing kubevirt and cdi addons - `minio.yaml` - for testing `minio` deployment - `ocm.yaml` - for testing `ocm` deployment +- `olm.yaml` - for testing `olm` deployment - `rook.yaml` - for testing `rook` deployment - `submariner.yaml` - for testing `submariner` deployment - `velero.yaml` - for testing `velero` deployment diff --git a/test/addons/cdi/cr/kustomization.yaml b/test/addons/cdi/cr/kustomization.yaml index de94a7bac..7353c111f 100644 --- a/test/addons/cdi/cr/kustomization.yaml +++ b/test/addons/cdi/cr/kustomization.yaml @@ -4,7 +4,7 @@ # yamllint disable rule:line-length --- resources: - - https://github.com/kubevirt/containerized-data-importer/releases/download/v1.58.3/cdi-cr.yaml + - https://github.com/kubevirt/containerized-data-importer/releases/download/v1.59.0/cdi-cr.yaml patches: # Allow pulling from local insecure registry. - target: diff --git a/test/addons/cdi/fetch b/test/addons/cdi/fetch index 59036749b..cc4ecae15 100755 --- a/test/addons/cdi/fetch +++ b/test/addons/cdi/fetch @@ -9,8 +9,8 @@ from drenv import cache os.chdir(os.path.dirname(__file__)) -path = cache.path("addons/cdi-operator.yaml") +path = cache.path("addons/cdi-operator-1.59.0.yaml") cache.fetch("operator", path) -path = cache.path("addons/cdi-cr.yaml") +path = cache.path("addons/cdi-cr-1.59.0.yaml") cache.fetch("cr", path) diff --git a/test/addons/cdi/operator/kustomization.yaml b/test/addons/cdi/operator/kustomization.yaml index 2bc536aff..2ba25dc0b 100644 --- a/test/addons/cdi/operator/kustomization.yaml +++ b/test/addons/cdi/operator/kustomization.yaml @@ -4,4 +4,4 @@ # yamllint disable rule:line-length --- resources: - - https://github.com/kubevirt/containerized-data-importer/releases/download/v1.58.3/cdi-operator.yaml + - https://github.com/kubevirt/containerized-data-importer/releases/download/v1.59.0/cdi-operator.yaml diff --git a/test/addons/cdi/start b/test/addons/cdi/start index 9407936bd..6c4f63443 100755 --- a/test/addons/cdi/start +++ b/test/addons/cdi/start @@ -14,7 +14,7 @@ NAMESPACE = "cdi" def deploy(cluster): print("Deploying cdi operator") - path = cache.path("addons/cdi-operator.yaml") + path = cache.path("addons/cdi-operator-1.59.0.yaml") cache.fetch("operator", path) kubectl.apply("--filename", path, context=cluster) @@ -28,7 +28,7 @@ def deploy(cluster): ) print("Deploying cdi cr") - path = cache.path("addons/cdi-cr.yaml") + path = cache.path("addons/cdi-cr-1.59.0.yaml") cache.fetch("cr", path) kubectl.apply("--filename", path, context=cluster) diff --git a/test/addons/kubevirt/cr/kustomization.yaml b/test/addons/kubevirt/cr/kustomization.yaml index c1817be8f..c7af8c96e 100644 --- a/test/addons/kubevirt/cr/kustomization.yaml +++ b/test/addons/kubevirt/cr/kustomization.yaml @@ -4,7 +4,7 @@ # yamllint disable rule:line-length --- resources: - - https://raw.githubusercontent.com/nirs/drenv-addons/main/kubevirt/9c3a52d67c6782cd48ae0702df5493f5023a04cc/kubevirt-cr.yaml + - https://github.com/kubevirt/kubevirt/releases/download/v1.2.1/kubevirt-cr.yaml patches: # Incrase certificate duration to avoid certificates renewals while a cluster # is suspended and resumed. @@ -23,3 +23,15 @@ patches: duration: 168h server: duration: 168h + # Use single replica to minimize resource usage. + - target: + kind: KubeVirt + name: kubevirt + patch: |- + apiVersion: kubevirt.io/v1 + kind: Kubevirt + metadata: + name: not-used + spec: + infra: + replicas: 1 diff --git a/test/addons/kubevirt/fetch b/test/addons/kubevirt/fetch index a7fd5cba5..4fe101c4f 100755 --- a/test/addons/kubevirt/fetch +++ b/test/addons/kubevirt/fetch @@ -9,8 +9,8 @@ from drenv import cache os.chdir(os.path.dirname(__file__)) -path = cache.path("addons/kubevirt-operator.yaml") +path = cache.path("addons/kubevirt-operator-1.2.1.yaml") cache.fetch("operator", path) -path = cache.path("addons/kubevirt-cr.yaml") +path = cache.path("addons/kubevirt-cr-1.2.1.yaml") cache.fetch("cr", path) diff --git a/test/addons/kubevirt/operator/kustomization.yaml b/test/addons/kubevirt/operator/kustomization.yaml index 43483f12a..ba14faf3e 100644 --- a/test/addons/kubevirt/operator/kustomization.yaml +++ b/test/addons/kubevirt/operator/kustomization.yaml @@ -4,5 +4,13 @@ # yamllint disable rule:line-length --- resources: - # TODO: use latest release. - - https://raw.githubusercontent.com/nirs/drenv-addons/main/kubevirt/9c3a52d67c6782cd48ae0702df5493f5023a04cc/kubevirt-operator.yaml + - https://github.com/kubevirt/kubevirt/releases/download/v1.2.1/kubevirt-operator.yaml +patches: + # Use single replica to minimize resource usage. + - target: + kind: Deployment + name: virt-operator + patch: |- + - op: replace + path: /spec/replicas + value: 1 diff --git a/test/addons/kubevirt/start b/test/addons/kubevirt/start index 981df4585..5346ef1d0 100755 --- a/test/addons/kubevirt/start +++ b/test/addons/kubevirt/start @@ -14,7 +14,7 @@ NAMESPACE = "kubevirt" def deploy(cluster): print("Deploying kubevirt operator") - path = cache.path("addons/kubevirt-operator.yaml") + path = cache.path("addons/kubevirt-operator-1.2.1.yaml") cache.fetch("operator", path) kubectl.apply("--filename", path, context=cluster) @@ -28,7 +28,7 @@ def deploy(cluster): ) print("Deploying kubevirt cr") - path = cache.path("addons/kubevirt-cr.yaml") + path = cache.path("addons/kubevirt-cr-1.2.1.yaml") cache.fetch("cr", path) kubectl.apply("--filename", path, context=cluster) diff --git a/test/addons/olm/crds/kustomization.yaml b/test/addons/olm/crds/kustomization.yaml new file mode 100644 index 000000000..a70db1faa --- /dev/null +++ b/test/addons/olm/crds/kustomization.yaml @@ -0,0 +1,6 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +--- +resources: + - https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.22.0/crds.yaml diff --git a/test/addons/olm/fetch b/test/addons/olm/fetch new file mode 100755 index 000000000..b67c38238 --- /dev/null +++ b/test/addons/olm/fetch @@ -0,0 +1,15 @@ +#!/usr/bin/env python3 + +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +import os + +from drenv import cache + +os.chdir(os.path.dirname(__file__)) +path = cache.path("addons/olm-crds.yaml") +cache.fetch("crds", path) + +path = cache.path("addons/olm-operators.yaml") +cache.fetch("operators", path) diff --git a/test/addons/olm/operators/kustomization.yaml b/test/addons/olm/operators/kustomization.yaml new file mode 100644 index 000000000..ebe0b7b87 --- /dev/null +++ b/test/addons/olm/operators/kustomization.yaml @@ -0,0 +1,6 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +--- +resources: + - https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.22.0/olm.yaml diff --git a/test/addons/olm/start b/test/addons/olm/start index 11d6e7e0e..d7e5fb3e6 100755 --- a/test/addons/olm/start +++ b/test/addons/olm/start @@ -8,9 +8,8 @@ import sys import drenv from drenv import kubectl +from drenv import cache -VERSION = "v0.22.0" -BASE_URL = f"https://github.com/operator-framework/operator-lifecycle-manager/releases/download/{VERSION}" NAMESPACE = "olm" @@ -21,21 +20,22 @@ def deploy(cluster): # The CustomResourceDefinition "clusterserviceversions.operators.coreos.com" # is invalid: metadata.annotations: Too long: must have at most 262144 bytes # See https://medium.com/pareture/kubectl-install-crd-failed-annotations-too-long-2ebc91b40c7d - kubectl.apply( - f"--filename={BASE_URL}/crds.yaml", - "--server-side=true", - context=cluster, - ) + path = cache.path("addons/olm-crds.yaml") + cache.fetch("crds", path) + kubectl.apply("--filename", path, "--server-side=true", context=cluster) print("Waiting until cdrs are established") kubectl.wait( "--for=condition=established", - f"--filename={BASE_URL}/crds.yaml", + "--filename", + path, context=cluster, ) print("Deploying olm") - kubectl.apply("--filename", f"{BASE_URL}/olm.yaml", context=cluster) + path = cache.path("addons/olm-operators.yaml") + cache.fetch("operators", path) + kubectl.apply("--filename", path, context=cluster) def wait(cluster): diff --git a/test/drenv/minikube.py b/test/drenv/minikube.py index e7ac6349d..ad94c4afc 100644 --- a/test/drenv/minikube.py +++ b/test/drenv/minikube.py @@ -2,9 +2,12 @@ # SPDX-License-Identifier: Apache-2.0 import errno +import json import logging import os +from packaging.version import Version + from . import commands EXTRA_CONFIG = [ @@ -113,8 +116,10 @@ def setup_files(): To load the configuration you must call load_files() after a cluster is created. """ - _setup_sysctl() - _setup_systemd_resolved() + version = _version() + logging.debug("[minikube] Using minikube version %s", version) + _setup_sysctl(version) + _setup_systemd_resolved(version) def load_files(profile): @@ -137,7 +142,17 @@ def cleanup_files(): _cleanup_file(_sysctl_drenv_conf()) -def _setup_sysctl(): +def _version(): + """ + Get minikube version string ("v1.33.1") and return a package.version.Version + instance. + """ + out = _run("version", output="json") + info = json.loads(out) + return Version(info["minikubeVersion"]) + + +def _setup_sysctl(version): """ Increase fs.inotify limits to avoid random timeouts when starting kubevirt VM. @@ -145,6 +160,10 @@ def _setup_sysctl(): We use the same configuration as OpenShift worker node. See also https://www.suse.com/support/kb/doc/?id=000020048 """ + if version >= Version("v1.33.1"): + logging.debug("[minikube] Skipping sysctl configuration") + return + path = _sysctl_drenv_conf() data = """# Added by drenv setup fs.inotify.max_user_instances = 8192 @@ -165,7 +184,7 @@ def _sysctl_drenv_conf(): return _minikube_file("etc", "sysctl.d", "99-drenv.conf") -def _setup_systemd_resolved(): +def _setup_systemd_resolved(version): """ Disable DNSSEC in systemd-resolved configuration. @@ -174,6 +193,10 @@ def _setup_systemd_resolved(): TODO: Remove when issue is fixed in minikube. """ + if version >= Version("v1.33.1"): + logging.debug("[minikube] Skipping systemd-resolved configuration") + return + path = _systemd_resolved_drenv_conf() data = """# Added by drenv setup [Resolve] diff --git a/test/envs/olm.yaml b/test/envs/olm.yaml new file mode 100644 index 000000000..fd3bc62c7 --- /dev/null +++ b/test/envs/olm.yaml @@ -0,0 +1,15 @@ +# SPDX-FileCopyrightText: The RamenDR authors +# SPDX-License-Identifier: Apache-2.0 + +# Environment for testing olm deployment. +--- +name: olm + +profiles: + - name: c1 + driver: $vm + container_runtime: containerd + memory: 2g + workers: + - addons: + - name: olm diff --git a/test/setup.py b/test/setup.py index f409f3431..ae122d6d7 100644 --- a/test/setup.py +++ b/test/setup.py @@ -21,6 +21,7 @@ install_requires=[ "PyYAML", "toml", + "packaging", ], classifiers=[ "Development Status :: 4 - Beta",