diff --git a/api/v1alpha1/harvestercluster_types.go b/api/v1alpha1/harvestercluster_types.go index 3457a53..9982f2a 100644 --- a/api/v1alpha1/harvestercluster_types.go +++ b/api/v1alpha1/harvestercluster_types.go @@ -81,7 +81,8 @@ type LoadBalancerConfig struct { IPAMType IPAMType `json:"ipamType"` // IpPoolRef is a reference to an existing IpPool object in Harvester's cluster. - // This field is mutually exclusive with "ipPool". //TODO: To be implemented + // This field is mutually exclusive with "ipPool". + //TODO: To be implemented IpPoolRef string `json:"ipPoolRef,omitempty"` // IpPool defines a new IpPool that will be added to Harvester. diff --git a/api/v1alpha1/harvestermachine_types.go b/api/v1alpha1/harvestermachine_types.go index d00bb7e..794921a 100644 --- a/api/v1alpha1/harvestermachine_types.go +++ b/api/v1alpha1/harvestermachine_types.go @@ -22,6 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" capiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) const ( @@ -30,6 +31,14 @@ const ( MachineFinalizer = "harvestermachine.infrastructure.cluster.x-k8s.io" ) +const ( + // MachineCreatedCondition documents that the machine has been created. + MachineCreatedCondition capiv1beta1.ConditionType = "MachineCreated" + + // MachineNotFoundReason documents that the machine was not found. + MachineNotFoundReason = "MachineNotFound" +) + // HarvesterMachineSpec defines the desired state of HarvesterMachine. type HarvesterMachineSpec struct { // ProviderID will be the ID of the VM in the provider (Harvester). @@ -134,3 +143,13 @@ type HarvesterMachineList struct { func init() { SchemeBuilder.Register(&HarvesterMachine{}, &HarvesterMachineList{}) } + +// GetConditions returns the set of conditions for this object. +func (m *HarvesterMachine) GetConditions() clusterv1.Conditions { + return m.Status.Conditions +} + +// SetConditions sets the conditions on this object. +func (m *HarvesterMachine) SetConditions(conditions clusterv1.Conditions) { + m.Status.Conditions = conditions +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 1003329..9870446 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -24,7 +24,6 @@ package v1alpha1 import ( "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cluster-api/api/v1beta1" ) diff --git a/controllers/harvestercluster_controller.go b/controllers/harvestercluster_controller.go index 24d82ea..1bcd29f 100644 --- a/controllers/harvestercluster_controller.go +++ b/controllers/harvestercluster_controller.go @@ -362,14 +362,14 @@ func (r *HarvesterClusterReconciler) ReconcileNormal(scope ClusterScope) (res ct // The following is executed only if there are ownedCPHarvesterMachines if !conditions.IsTrue(scope.HarvesterCluster, infrav1.LoadBalancerReadyCondition) { - err := createLoadBalancerIfNotExists(scope, ownedCPHarvesterMachines) + err := createLoadBalancerIfNotExists(scope) if err != nil { logger.V(1).Info("could not create the LoadBalancer, requeuing ...") return ctrl.Result{RequeueAfter: 5 * time.Minute}, nil } - lbIP, err := getLoadBalancerIP(scope.Cluster, scope.HarvesterCluster, scope.HarvesterClient) + lbIP, err := getLoadBalancerIP(scope.HarvesterCluster, scope.HarvesterClient) if err != nil { logger.Error(err, "could not get the LoadBalancer IP") @@ -393,7 +393,7 @@ func (r *HarvesterClusterReconciler) ReconcileNormal(scope ClusterScope) (res ct return res, err } -func getLoadBalancerIP(cluster *clusterv1.Cluster, harvesterCluster *infrav1.HarvesterCluster, hvClient *lbclient.Clientset) (string, error) { +func getLoadBalancerIP(harvesterCluster *infrav1.HarvesterCluster, hvClient *lbclient.Clientset) (string, error) { createdLB, err := hvClient.LoadbalancerV1beta1().LoadBalancers(harvesterCluster.Spec.TargetNamespace).Get( context.TODO(), harvesterCluster.Namespace+"-"+harvesterCluster.Name+"-lb", @@ -484,7 +484,7 @@ func (r *HarvesterClusterReconciler) reconcileHarvesterConfig(ctx context.Contex return hvRESTConfig, nil } -func createLoadBalancerIfNotExists(scope ClusterScope, ownedCPMachines []infrav1.HarvesterMachine) (err error) { +func createLoadBalancerIfNotExists(scope ClusterScope) (err error) { additionalListeners := getListenersFromAPI(scope.HarvesterCluster) lbToCreate := &lbv1beta1.LoadBalancer{ @@ -631,7 +631,15 @@ func (r *HarvesterClusterReconciler) getOwnedCPHarversterMachines(scope ClusterS return []infrav1.HarvesterMachine{}, errors.Wrap(err, "unable to list owned ControlPlane Machines") } - return ownedCPHarvesterMachines.Items, nil + ownedCPHarvesterMachinesReady := make([]infrav1.HarvesterMachine, 0) + + for _, machine := range ownedCPHarvesterMachines.Items { + if machine.Status.Ready { + ownedCPHarvesterMachinesReady = append(ownedCPHarvesterMachinesReady, machine) + } + } + + return ownedCPHarvesterMachinesReady, nil } // ReconcileDelete is the part of the Reconcialiation that deletes a HarvesterCluster and everything which depends on it. diff --git a/controllers/harvestermachine_controller.go b/controllers/harvestermachine_controller.go index f5524a9..690ac4b 100644 --- a/controllers/harvestermachine_controller.go +++ b/controllers/harvestermachine_controller.go @@ -314,20 +314,39 @@ func (r *HarvesterMachineReconciler) ReconcileNormal(hvScope Scope) (res reconci return ctrl.Result{}, nil } - logger.Info("No existing VM found in Harvester, creating a new one ...") + if !conditions.IsTrue(hvScope.HarvesterMachine, infrav1.MachineCreatedCondition) { + logger.Info("No existing VM found in Harvester, creating a new one ...") - _, err = createVMFromHarvesterMachine(hvScope) - if err != nil { - logger.Error(err, "unable to create VM from HarvesterMachine information") - } + hvScope.HarvesterMachine.Status.Ready = false + + _, err = createVMFromHarvesterMachine(hvScope) + if err != nil { + logger.Error(err, "unable to create VM from HarvesterMachine information") - // Patch the HarvesterCluster resource with the current conditions. - hvClusterCopy := hvScope.HarvesterCluster.DeepCopy() - conditions.MarkTrue(hvClusterCopy, infrav1.InitMachineCreatedCondition) - hvClusterCopy.Status.Ready = false + return ctrl.Result{}, err + } + + conditions.MarkTrue(hvScope.HarvesterMachine, infrav1.MachineCreatedCondition) + hvScope.HarvesterMachine.Status.Ready = true - if err := r.Client.Status().Patch(hvScope.Ctx, hvClusterCopy, client.MergeFrom(hvScope.HarvesterCluster)); err != nil { - logger.Error(err, "failed to update HarvesterCluster Conditions with InitMachineCreatedCondition") + // Patch the HarvesterCluster resource with the InitMachineCreatedCondition if it is not already set. + if conditions.IsFalse(hvScope.HarvesterCluster, infrav1.InitMachineCreatedCondition) { + hvClusterCopy := hvScope.HarvesterCluster.DeepCopy() + conditions.MarkTrue(hvClusterCopy, infrav1.InitMachineCreatedCondition) + hvClusterCopy.Status.Ready = false + + if err := r.Client.Status().Patch(hvScope.Ctx, hvClusterCopy, client.MergeFrom(hvScope.HarvesterCluster)); err != nil { + logger.Error(err, "failed to update HarvesterCluster Conditions with InitMachineCreatedCondition") + } + } + } else { + if (existingVM == &kubevirtv1.VirtualMachine{}) { + hvScope.HarvesterMachine.Status.Ready = false + conditions.MarkFalse(hvScope.HarvesterMachine, + infrav1.MachineCreatedCondition, infrav1.MachineNotFoundReason, clusterv1.ConditionSeverityError, "VM not found in Harvester") + + return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil + } } return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil @@ -625,19 +644,19 @@ runcmd: if err != nil { if !apierrors.IsNotFound(err) { hvScope.Logger.V(2).Info("unable to get cloud-init secret, error was different than NotFound") //nolint:gomnd + } else { + _, err = hvScope.HarvesterClient.CoreV1().Secrets(hvScope.HarvesterCluster.Spec.TargetNamespace).Create( + context.TODO(), cloudInitSecret, metav1.CreateOptions{}) + if err != nil { + return nil, errors.Wrap(err, "unable to create cloud-init secret") + } + } + } else { + _, err = hvScope.HarvesterClient.CoreV1().Secrets(hvScope.HarvesterCluster.Spec.TargetNamespace).Update( + context.TODO(), cloudInitSecret, metav1.UpdateOptions{}) + if err != nil { + return nil, errors.Wrap(err, "unable to update cloud-init secret") } - } - - _, err = hvScope.HarvesterClient.CoreV1().Secrets(hvScope.HarvesterCluster.Spec.TargetNamespace).Create( - context.TODO(), cloudInitSecret, metav1.CreateOptions{}) - if err != nil { - return nil, errors.Wrap(err, "unable to create cloud-init secret") - } - - if err != nil { - err = fmt.Errorf("error during getting cloud init user data from Harvester: %w", err) - - return nil, err } vmTemplate = &kubevirtv1.VirtualMachineInstanceTemplateSpec{ @@ -828,11 +847,12 @@ func (r *HarvesterMachineReconciler) ReconcileDelete(hvScope Scope) (res ctrl.Re attachedPVCString := vm.Annotations[vmAnnotationPVC] attachedPVCObj := []*v1.PersistentVolumeClaim{} - err = json.Unmarshal([]byte(attachedPVCString), &attachedPVCObj) - if err != nil { - return ctrl.Result{Requeue: true}, err + if attachedPVCString != "" { + err = json.Unmarshal([]byte(attachedPVCString), &attachedPVCObj) + if err != nil { + return ctrl.Result{Requeue: true}, err + } } - err = hvScope.HarvesterClient.KubevirtV1().VirtualMachines(hvScope.HarvesterCluster.Spec.TargetNamespace).Delete( hvScope.Ctx, hvScope.HarvesterMachine.Name, metav1.DeleteOptions{}) if err != nil {