diff --git a/test/framework/machinepool_helpers.go b/test/framework/machinepool_helpers.go index 7aaf40fcefd5..760b942343d7 100644 --- a/test/framework/machinepool_helpers.go +++ b/test/framework/machinepool_helpers.go @@ -232,20 +232,22 @@ func ScaleMachinePoolTopologyAndWait(ctx context.Context, input ScaleMachinePool return patchHelper.Patch(ctx, input.Cluster) }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to scale machine pool topology %s", mpTopology.Name) - log.Logf("Waiting for correct number of replicas to exist") + log.Logf("Waiting for correct number of replicas to exist and have correct number for .spec.replicas") mpList := &expv1.MachinePoolList{} - Eventually(func() error { - return input.ClusterProxy.GetClient().List(ctx, mpList, + mp := expv1.MachinePool{} + Eventually(func(g Gomega) int32 { + g.Expect(input.ClusterProxy.GetClient().List(ctx, mpList, client.InNamespace(input.Cluster.Namespace), client.MatchingLabels{ clusterv1.ClusterNameLabel: input.Cluster.Name, clusterv1.ClusterTopologyMachinePoolNameLabel: mpTopology.Name, }, - ) - }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list MachinePools object for Cluster %s", klog.KRef(input.Cluster.Namespace, input.Cluster.Name)) + )).ToNot(HaveOccurred()) + g.Expect(mpList.Items).To(HaveLen(1)) + mp = mpList.Items[0] + return *mp.Spec.Replicas + }, retryableOperationTimeout, retryableOperationInterval).Should(Equal(input.Replicas), "MachinePool replicas for Cluster %s does not match set topology replicas", klog.KRef(input.Cluster.Namespace, input.Cluster.Name)) - Expect(mpList.Items).To(HaveLen(1)) - mp := mpList.Items[0] WaitForMachinePoolNodesToExist(ctx, WaitForMachinePoolNodesToExistInput{ Getter: input.Getter, MachinePool: &mp, diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go index f9b91e042513..dc1db0eca69e 100644 --- a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go @@ -378,7 +378,7 @@ func dockerMachineToDockerMachinePool(_ context.Context, o client.Object) []ctrl } // updateStatus updates the Status field for the MachinePool object. -// It checks for the current state of the replicas and updates the Status of the MachineSet. +// It checks for the current state of the replicas and updates the Status of the MachinePool. func (r *DockerMachinePoolReconciler) updateStatus(ctx context.Context, cluster *clusterv1.Cluster, machinePool *expv1.MachinePool, dockerMachinePool *infraexpv1.DockerMachinePool, dockerMachines []infrav1.DockerMachine) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) @@ -405,10 +405,10 @@ func (r *DockerMachinePoolReconciler) updateStatus(ctx context.Context, cluster switch { // We are scaling up case readyReplicaCount < desiredReplicas: - conditions.MarkFalse(dockerMachinePool, clusterv1.ResizedCondition, clusterv1.ScalingUpReason, clusterv1.ConditionSeverityWarning, "Scaling up MachineSet to %d replicas (actual %d)", desiredReplicas, readyReplicaCount) + conditions.MarkFalse(dockerMachinePool, clusterv1.ResizedCondition, clusterv1.ScalingUpReason, clusterv1.ConditionSeverityWarning, "Scaling up MachinePool to %d replicas (actual %d)", desiredReplicas, readyReplicaCount) // We are scaling down case readyReplicaCount > desiredReplicas: - conditions.MarkFalse(dockerMachinePool, clusterv1.ResizedCondition, clusterv1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "Scaling down MachineSet to %d replicas (actual %d)", desiredReplicas, readyReplicaCount) + conditions.MarkFalse(dockerMachinePool, clusterv1.ResizedCondition, clusterv1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "Scaling down MachinePool to %d replicas (actual %d)", desiredReplicas, readyReplicaCount) default: // Make sure last resize operation is marked as completed. // NOTE: we are checking the number of machines ready so we report resize completed only when the machines