Skip to content

Commit

Permalink
Merge pull request #10952 from chrischdi/pr-machinepool-flake-fix
Browse files Browse the repository at this point in the history
🌱 test: fix machinepool test to wait for topology controller to set correct number of replicas first
  • Loading branch information
k8s-ci-robot authored Jul 29, 2024
2 parents 608c403 + 5505c78 commit eba0375
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 10 deletions.
16 changes: 9 additions & 7 deletions test/framework/machinepool_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -232,20 +232,22 @@ func ScaleMachinePoolTopologyAndWait(ctx context.Context, input ScaleMachinePool
return patchHelper.Patch(ctx, input.Cluster)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to scale machine pool topology %s", mpTopology.Name)

log.Logf("Waiting for correct number of replicas to exist")
log.Logf("Waiting for correct number of replicas to exist and have correct number for .spec.replicas")
mpList := &expv1.MachinePoolList{}
Eventually(func() error {
return input.ClusterProxy.GetClient().List(ctx, mpList,
mp := expv1.MachinePool{}
Eventually(func(g Gomega) int32 {
g.Expect(input.ClusterProxy.GetClient().List(ctx, mpList,
client.InNamespace(input.Cluster.Namespace),
client.MatchingLabels{
clusterv1.ClusterNameLabel: input.Cluster.Name,
clusterv1.ClusterTopologyMachinePoolNameLabel: mpTopology.Name,
},
)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list MachinePools object for Cluster %s", klog.KRef(input.Cluster.Namespace, input.Cluster.Name))
)).ToNot(HaveOccurred())
g.Expect(mpList.Items).To(HaveLen(1))
mp = mpList.Items[0]
return *mp.Spec.Replicas
}, retryableOperationTimeout, retryableOperationInterval).Should(Equal(input.Replicas), "MachinePool replicas for Cluster %s does not match set topology replicas", klog.KRef(input.Cluster.Namespace, input.Cluster.Name))

Expect(mpList.Items).To(HaveLen(1))
mp := mpList.Items[0]
WaitForMachinePoolNodesToExist(ctx, WaitForMachinePoolNodesToExistInput{
Getter: input.Getter,
MachinePool: &mp,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -378,7 +378,7 @@ func dockerMachineToDockerMachinePool(_ context.Context, o client.Object) []ctrl
}

// updateStatus updates the Status field for the MachinePool object.
// It checks for the current state of the replicas and updates the Status of the MachineSet.
// It checks for the current state of the replicas and updates the Status of the MachinePool.
func (r *DockerMachinePoolReconciler) updateStatus(ctx context.Context, cluster *clusterv1.Cluster, machinePool *expv1.MachinePool, dockerMachinePool *infraexpv1.DockerMachinePool, dockerMachines []infrav1.DockerMachine) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx)

Expand All @@ -405,10 +405,10 @@ func (r *DockerMachinePoolReconciler) updateStatus(ctx context.Context, cluster
switch {
// We are scaling up
case readyReplicaCount < desiredReplicas:
conditions.MarkFalse(dockerMachinePool, clusterv1.ResizedCondition, clusterv1.ScalingUpReason, clusterv1.ConditionSeverityWarning, "Scaling up MachineSet to %d replicas (actual %d)", desiredReplicas, readyReplicaCount)
conditions.MarkFalse(dockerMachinePool, clusterv1.ResizedCondition, clusterv1.ScalingUpReason, clusterv1.ConditionSeverityWarning, "Scaling up MachinePool to %d replicas (actual %d)", desiredReplicas, readyReplicaCount)
// We are scaling down
case readyReplicaCount > desiredReplicas:
conditions.MarkFalse(dockerMachinePool, clusterv1.ResizedCondition, clusterv1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "Scaling down MachineSet to %d replicas (actual %d)", desiredReplicas, readyReplicaCount)
conditions.MarkFalse(dockerMachinePool, clusterv1.ResizedCondition, clusterv1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "Scaling down MachinePool to %d replicas (actual %d)", desiredReplicas, readyReplicaCount)
default:
// Make sure last resize operation is marked as completed.
// NOTE: we are checking the number of machines ready so we report resize completed only when the machines
Expand Down

0 comments on commit eba0375

Please sign in to comment.