Skip to content

Commit

Permalink
Re-diff files
Browse files Browse the repository at this point in the history
  • Loading branch information
willie-yao committed Aug 15, 2023
1 parent 123a5aa commit 3717198
Show file tree
Hide file tree
Showing 8 changed files with 103 additions and 34 deletions.
4 changes: 0 additions & 4 deletions exp/api/v1alpha3/conversion.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,10 +44,6 @@ func Convert_v1alpha3_MachinePool_To_v1beta1_MachinePool(in *MachinePool, out *e
return nil
}

func Convert_v1beta1_MachinePoolSpec_To_v1alpha3_MachinePoolSpec(in *expv1.MachinePoolSpec, out *MachinePoolSpec, s apimachineryconversion.Scope) error {
return autoConvert_v1beta1_MachinePoolSpec_To_v1alpha3_MachinePoolSpec(in, out, s)
}

func Convert_v1beta1_MachinePool_To_v1alpha3_MachinePool(in *expv1.MachinePool, out *MachinePool, s apimachineryconversion.Scope) error {
if err := autoConvert_v1beta1_MachinePool_To_v1alpha3_MachinePool(in, out, s); err != nil {
return err
Expand Down
15 changes: 10 additions & 5 deletions exp/api/v1alpha3/zz_generated.conversion.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 0 additions & 5 deletions exp/api/v1alpha4/conversion.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,12 @@ limitations under the License.
package v1alpha4

import (
apimachineryconversion "k8s.io/apimachinery/pkg/conversion"
"sigs.k8s.io/controller-runtime/pkg/conversion"

expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
utilconversion "sigs.k8s.io/cluster-api/util/conversion"
)

func Convert_v1beta1_MachinePoolSpec_To_v1alpha4_MachinePoolSpec(in *expv1.MachinePoolSpec, out *MachinePoolSpec, s apimachineryconversion.Scope) error {
return autoConvert_v1beta1_MachinePoolSpec_To_v1alpha4_MachinePoolSpec(in, out, s)
}

func (src *MachinePool) ConvertTo(dstRaw conversion.Hub) error {
dst := dstRaw.(*expv1.MachinePool)

Expand Down
15 changes: 10 additions & 5 deletions exp/api/v1alpha4/zz_generated.conversion.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

19 changes: 13 additions & 6 deletions internal/controllers/topology/cluster/desired_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,13 @@ func (r *Reconciler) computeDesiredState(ctx context.Context, s *scope.Scope) (*
}
s.UpgradeTracker.MachineDeployments.MarkUpgrading(mdUpgradingNames...)

// Mark all the MachinePools that are currently upgrading.
mpUpgradingNames, err := s.Current.MachinePools.Upgrading(ctx, r.Client)
if err != nil {
return nil, errors.Wrap(err, "failed to check if any MachinePool is upgrading")
}
s.UpgradeTracker.MachinePools.MarkUpgrading(mpUpgradingNames...)

// Compute the desired state of the ControlPlane object, eventually adding a reference to the
// InfrastructureMachineTemplate generated by the previous step.
if desiredState.ControlPlane.Object, err = r.computeControlPlane(ctx, s, desiredState.ControlPlane.InfrastructureMachineTemplate); err != nil {
Expand Down Expand Up @@ -920,17 +927,17 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c

// Compute the bootstrap template.
currentMachinePool := s.Current.MachinePools[machinePoolTopology.Name]
var currentBootstrapTemplateRef *corev1.ObjectReference
var currentBootstrapConfigRef *corev1.ObjectReference
if currentMachinePool != nil && currentMachinePool.BootstrapObject != nil {
currentBootstrapTemplateRef = currentMachinePool.Object.Spec.Template.Spec.Bootstrap.ConfigRef
currentBootstrapConfigRef = currentMachinePool.Object.Spec.Template.Spec.Bootstrap.ConfigRef
}
var err error
desiredMachinePool.BootstrapObject, err = templateToObject(templateToInput{
template: machinePoolBlueprint.BootstrapConfig,
templateClonedFromRef: contract.ObjToRef(machinePoolBlueprint.BootstrapConfig),
cluster: s.Current.Cluster,
namePrefix: bootstrapTemplateNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name),
currentObjectRef: currentBootstrapTemplateRef,
currentObjectRef: currentBootstrapConfigRef,
})
if err != nil {
return nil, errors.Wrapf(err, "failed to compute bootstrap object for topology %q", machinePoolTopology.Name)
Expand Down Expand Up @@ -991,9 +998,9 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c
}

// Compute the MachinePool object.
desiredBootstrapTemplateRef, err := calculateRefDesiredAPIVersion(currentBootstrapTemplateRef, desiredMachinePool.BootstrapObject)
desiredBootstrapConfigRef, err := calculateRefDesiredAPIVersion(currentBootstrapConfigRef, desiredMachinePool.BootstrapObject)
if err != nil {
return nil, errors.Wrap(err, "failed to calculate desired bootstrap template ref")
return nil, errors.Wrap(err, "failed to calculate desired bootstrap config ref")
}
desiredInfraMachineTemplateRef, err := calculateRefDesiredAPIVersion(currentInfraMachineTemplateRef, desiredMachinePool.InfrastructureMachinePoolObject)
if err != nil {
Expand All @@ -1016,7 +1023,7 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c
Spec: clusterv1.MachineSpec{
ClusterName: s.Current.Cluster.Name,
Version: pointer.String(version),
Bootstrap: clusterv1.Bootstrap{ConfigRef: desiredBootstrapTemplateRef},
Bootstrap: clusterv1.Bootstrap{ConfigRef: desiredBootstrapConfigRef},
InfrastructureRef: *desiredInfraMachineTemplateRef,
NodeDrainTimeout: nodeDrainTimeout,
NodeVolumeDetachTimeout: nodeVolumeDetachTimeout,
Expand Down
49 changes: 42 additions & 7 deletions internal/controllers/topology/cluster/reconcile_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,11 @@ func (r *Reconciler) callAfterClusterUpgrade(ctx context.Context, s *scope.Scope
len(s.UpgradeTracker.MachineDeployments.UpgradingNames()) == 0 && // Machine deployments are not upgrading or not about to upgrade
!s.UpgradeTracker.MachineDeployments.IsAnyPendingCreate() && // No MachineDeployments are pending create
!s.UpgradeTracker.MachineDeployments.IsAnyPendingUpgrade() && // No MachineDeployments are pending an upgrade
!s.UpgradeTracker.MachineDeployments.DeferredUpgrade() { // No MachineDeployments have deferred an upgrade
!s.UpgradeTracker.MachineDeployments.DeferredUpgrade() && // No MachineDeployments have deferred an upgrade
len(s.UpgradeTracker.MachinePools.UpgradingNames()) == 0 && // Machine pools are not upgrading or not about to upgrade
!s.UpgradeTracker.MachinePools.IsAnyPendingCreate() && // No MachinePools are pending create
!s.UpgradeTracker.MachinePools.IsAnyPendingUpgrade() && // No MachinePools are pending an upgrade
!s.UpgradeTracker.MachinePools.DeferredUpgrade() { // No MachinePools have deferred an upgrade
// Everything is stable and the cluster can be considered fully upgraded.
hookRequest := &runtimehooksv1.AfterClusterUpgradeRequest{
Cluster: *s.Current.Cluster,
Expand Down Expand Up @@ -747,7 +751,7 @@ func (r *Reconciler) reconcileMachinePools(ctx context.Context, s *scope.Scope)
continue
}

if err := r.createMachinePool(ctx, s.Current.Cluster, mp); err != nil {
if err := r.createMachinePool(ctx, s, mp); err != nil {
return err
}
}
Expand Down Expand Up @@ -800,9 +804,23 @@ func (r *Reconciler) getCurrentMachinePools(ctx context.Context, s *scope.Scope)
}

// createMachinePool creates a MachinePool and the corresponding templates.
func (r *Reconciler) createMachinePool(ctx context.Context, cluster *clusterv1.Cluster, mp *scope.MachinePoolState) error {
log := tlog.LoggerFrom(ctx).WithMachinePool(mp.Object)
func (r *Reconciler) createMachinePool(ctx context.Context, s *scope.Scope, mp *scope.MachinePoolState) error {
// Do not create the MachinePool if it is marked as pending create.
// This will also block MHC creation because creating the MHC without the corresponding
// MachinePool is unnecessary.
mpTopologyName, ok := mp.Object.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel]
if !ok || mpTopologyName == "" {
// Note: This is only an additional safety check and should not happen. The label will always be added when computing
// the desired MachinePool.
return errors.Errorf("new MachinePool is missing the %q label", clusterv1.ClusterTopologyMachinePoolNameLabel)
}
// Return early if the MachinePool is pending create.
if s.UpgradeTracker.MachinePools.IsPendingCreate(mpTopologyName) {
return nil
}

log := tlog.LoggerFrom(ctx).WithMachinePool(mp.Object)
cluster := s.Current.Cluster
infraCtx, _ := log.WithObject(mp.InfrastructureMachinePoolObject).Into(ctx)
if err := r.reconcileReferencedObject(infraCtx, reconcileReferencedObjectInput{
cluster: cluster,
Expand Down Expand Up @@ -830,6 +848,23 @@ func (r *Reconciler) createMachinePool(ctx context.Context, cluster *clusterv1.C
}
r.recorder.Eventf(cluster, corev1.EventTypeNormal, createEventReason, "Created %q", tlog.KObj{Obj: mp.Object})

// Wait until MachinePool is visible in the cache.
// Note: We have to do this because otherwise using a cached client in current state could
// miss a newly created MachinePool (because the cache might be stale).
err = wait.PollUntilContextTimeout(ctx, 5*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) {
key := client.ObjectKey{Namespace: mp.Object.Namespace, Name: mp.Object.Name}
if err := r.Client.Get(ctx, key, &expv1.MachinePool{}); err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
return true, nil
})
if err != nil {
return errors.Wrapf(err, "failed waiting for MachinePool %s to be visible in the cache after create", mp.Object.Kind)
}

return nil
}

Expand Down Expand Up @@ -857,7 +892,7 @@ func (r *Reconciler) updateMachinePool(ctx context.Context, cluster *clusterv1.C
return errors.Wrapf(err, "failed to reconcile %s", tlog.KObj{Obj: currentMP.Object})
}

// Check differences between current and desired MachineDeployment, and eventually patch the current object.
// Check differences between current and desired MachinePool, and eventually patch the current object.
log = log.WithObject(desiredMP.Object)
patchHelper, err := r.patchHelperFactory(ctx, currentMP.Object, desiredMP.Object)
if err != nil {
Expand Down Expand Up @@ -904,8 +939,8 @@ type machineDiff struct {
toCreate, toUpdate, toDelete []string
}

// calculateMachineDeploymentDiff compares two maps of MachineDeploymentState and calculates which
// MachineDeployments should be created, updated or deleted.
// calculateMachineDeploymentDiff compares two maps of MachinePoolState and calculates which
// MachinePools should be created, updated or deleted.
func calculateMachineDeploymentDiff(current, desired map[string]*scope.MachineDeploymentState) machineDiff {
var diff machineDiff

Expand Down
7 changes: 6 additions & 1 deletion internal/webhooks/clusterclass.go
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@ func (webhook *ClusterClass) validateRemovedMachineDeploymentClassesAreNotUsed(c
func (webhook *ClusterClass) validateRemovedMachinePoolClassesAreNotUsed(clusters []clusterv1.Cluster, oldClusterClass, newClusterClass *clusterv1.ClusterClass) field.ErrorList {
var allErrs field.ErrorList

removedClasses := webhook.removedMachineClasses(oldClusterClass, newClusterClass)
removedClasses := webhook.removedMachinePoolClasses(oldClusterClass, newClusterClass)
// If no classes have been removed return early as no further checks are needed.
if len(removedClasses) == 0 {
return nil
Expand Down Expand Up @@ -322,6 +322,11 @@ func (webhook *ClusterClass) removedMachineClasses(oldClusterClass, newClusterCl
removedClasses.Insert(oldClass.Class)
}
}
return removedClasses
}

func (webhook *ClusterClass) removedMachinePoolClasses(oldClusterClass, newClusterClass *clusterv1.ClusterClass) sets.Set[string] {
removedClasses := sets.Set[string]{}

mpClasses := webhook.classNamesFromMPWorkerClass(newClusterClass.Spec.Workers)
for _, oldClass := range oldClusterClass.Spec.Workers.MachinePools {
Expand Down
23 changes: 22 additions & 1 deletion test/extension/handlers/topologymutation/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -359,13 +359,34 @@ func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infr
return errors.New("no version variables found for DockerMachineTemplate patch")
}

// patchDockerMachineTemplate patches the DockerMachinePoolTemplate.
// patchDockerMachinePoolTemplate patches the DockerMachinePoolTemplate.
// It sets the CustomImage to an image for the version in use by the MachinePool.
// NOTE: this patch is not required anymore after the introduction of the kind mapper in kind, however we keep it
// as example of version aware patches.
func patchDockerMachinePoolTemplate(ctx context.Context, dockerMachinePoolTemplate *infraexpv1.DockerMachinePoolTemplate, templateVariables map[string]apiextensionsv1.JSON) error {
log := ctrl.LoggerFrom(ctx)

// If the DockerMachinePoolTemplate belongs to the ControlPlane, set the images using the ControlPlane version.
// NOTE: ControlPlane version might be different than Cluster.version or MachinePool's versions;
// the builtin variables provides the right version to use.
// NOTE: This works by checking the existence of a builtin variable that exists only for templates liked to the ControlPlane.
cpVersion, found, err := topologymutation.GetStringVariable(templateVariables, "builtin.controlPlane.version")
if err != nil {
return errors.Wrap(err, "could not set customImage to control plane dockerMachinePoolTemplate")
}
if found {
semVer, err := version.ParseMajorMinorPatchTolerant(cpVersion)
if err != nil {
return errors.Wrap(err, "could not parse control plane version")
}
kindMapping := kind.GetMapping(semVer, "")

log.Info(fmt.Sprintf("Setting MachinePool custom image to %q", kindMapping.Image))
dockerMachinePoolTemplate.Spec.Template.Spec.CustomImage = kindMapping.Image
// return early if we have successfully patched a control plane dockerMachineTemplate
return nil
}

// If the DockerMachinePoolTemplate belongs to a MachinePool, set the images the MachinePool version.
// NOTE: MachinePool version might be different than Cluster.version or other MachinePool's versions;
// the builtin variables provides the right version to use.
Expand Down

0 comments on commit 3717198

Please sign in to comment.