diff --git a/Makefile b/Makefile index 0c173f8b18..f06970aa2a 100644 --- a/Makefile +++ b/Makefile @@ -320,6 +320,7 @@ generate-e2e-templates-main: $(KUSTOMIZE) ## Generate test templates for the mai "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build $(E2E_TEMPLATE_DIR)/main/pci > $(E2E_TEMPLATE_DIR)/main/cluster-template-pci.yaml # for DHCP overrides "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build $(E2E_TEMPLATE_DIR)/main/dhcp-overrides > $(E2E_TEMPLATE_DIR)/main/cluster-template-dhcp-overrides.yaml + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build $(E2E_TEMPLATE_DIR)/main/ownerreferences > $(E2E_TEMPLATE_DIR)/main/cluster-template-ownerreferences.yaml ## -------------------------------------- diff --git a/controllers/vspherecluster_reconciler.go b/controllers/vspherecluster_reconciler.go index 771e34ac51..0596928fda 100644 --- a/controllers/vspherecluster_reconciler.go +++ b/controllers/vspherecluster_reconciler.go @@ -285,38 +285,46 @@ func (r clusterReconciler) reconcileNormal(ctx context.Context, clusterCtx *capv func (r clusterReconciler) reconcileIdentitySecret(ctx context.Context, clusterCtx *capvcontext.ClusterContext) error { vsphereCluster := clusterCtx.VSphereCluster - if identity.IsSecretIdentity(vsphereCluster) { - secret := &corev1.Secret{} - secretKey := client.ObjectKey{ - Namespace: vsphereCluster.Namespace, - Name: vsphereCluster.Spec.IdentityRef.Name, - } - err := clusterCtx.Client.Get(ctx, secretKey, secret) - if err != nil { - return err - } + if !identity.IsSecretIdentity(vsphereCluster) { + return nil + } + secret := &corev1.Secret{} + secretKey := client.ObjectKey{ + Namespace: vsphereCluster.Namespace, + Name: vsphereCluster.Spec.IdentityRef.Name, + } + err := clusterCtx.Client.Get(ctx, secretKey, secret) + if err != nil { + return err + } - // check if cluster is already an owner - if !clusterutilv1.IsOwnedByObject(secret, vsphereCluster) { - ownerReferences := secret.GetOwnerReferences() - if identity.IsOwnedByIdentityOrCluster(ownerReferences) { - return fmt.Errorf("another cluster has set the OwnerRef for secret: %s/%s", secret.Namespace, secret.Name) - } - ownerReferences = append(ownerReferences, metav1.OwnerReference{ - APIVersion: infrav1.GroupVersion.String(), - Kind: vsphereCluster.Kind, - Name: vsphereCluster.Name, - UID: vsphereCluster.UID, - }) - secret.SetOwnerReferences(ownerReferences) - } - if !ctrlutil.ContainsFinalizer(secret, infrav1.SecretIdentitySetFinalizer) { - ctrlutil.AddFinalizer(secret, infrav1.SecretIdentitySetFinalizer) - } - err = r.Client.Update(ctx, secret) - if err != nil { - return err - } + // If a different VSphereCluster is an owner return an error. + if !clusterutilv1.IsOwnedByObject(secret, vsphereCluster) && identity.IsOwnedByIdentityOrCluster(secret.GetOwnerReferences()) { + return fmt.Errorf("another cluster has set the OwnerRef for secret: %s/%s", secret.Namespace, secret.Name) + } + + helper, err := patch.NewHelper(secret, clusterCtx.Client) + if err != nil { + return err + } + + // Ensure the VSphereCluster is an owner and that the APIVersion is up to date. + secret.SetOwnerReferences(clusterutilv1.EnsureOwnerRef(secret.GetOwnerReferences(), + metav1.OwnerReference{ + APIVersion: infrav1.GroupVersion.String(), + Kind: vsphereCluster.Kind, + Name: vsphereCluster.Name, + UID: vsphereCluster.UID, + }, + )) + + // Ensure the finalizer is added. + if !ctrlutil.ContainsFinalizer(secret, infrav1.SecretIdentitySetFinalizer) { + ctrlutil.AddFinalizer(secret, infrav1.SecretIdentitySetFinalizer) + } + err = helper.Patch(ctx, secret) + if err != nil { + return err } return nil diff --git a/controllers/vsphereclusteridentity_controller.go b/controllers/vsphereclusteridentity_controller.go index d025b4de5f..626938f559 100644 --- a/controllers/vsphereclusteridentity_controller.go +++ b/controllers/vsphereclusteridentity_controller.go @@ -136,30 +136,30 @@ func (r clusterIdentityReconciler) Reconcile(ctx context.Context, req reconcile. return reconcile.Result{}, errors.Errorf("secret: %s not found in namespace: %s", secretKey.Name, secretKey.Namespace) } - if !clusterutilv1.IsOwnedByObject(secret, identity) { - ownerReferences := secret.GetOwnerReferences() - if pkgidentity.IsOwnedByIdentityOrCluster(ownerReferences) { - conditions.MarkFalse(identity, infrav1.CredentialsAvailableCondidtion, infrav1.SecretAlreadyInUseReason, clusterv1.ConditionSeverityError, "secret being used by another Cluster/VSphereIdentity") - identity.Status.Ready = false - return reconcile.Result{}, errors.New("secret being used by another Cluster/VSphereIdentity") - } - - ownerReferences = append(ownerReferences, metav1.OwnerReference{ - APIVersion: infrav1.GroupVersion.String(), - Kind: identity.Kind, - Name: identity.Name, - UID: identity.UID, - }) - secret.SetOwnerReferences(ownerReferences) + // If this secret is owned by a different VSphereClusterIdentity or a VSphereCluster, mark the identity as not ready and return an error. + if !clusterutilv1.IsOwnedByObject(secret, identity) && pkgidentity.IsOwnedByIdentityOrCluster(secret.GetOwnerReferences()) { + conditions.MarkFalse(identity, infrav1.CredentialsAvailableCondidtion, infrav1.SecretAlreadyInUseReason, clusterv1.ConditionSeverityError, "secret being used by another Cluster/VSphereIdentity") + identity.Status.Ready = false + return reconcile.Result{}, errors.New("secret being used by another Cluster/VSphereIdentity") + } - if !ctrlutil.ContainsFinalizer(secret, infrav1.SecretIdentitySetFinalizer) { - ctrlutil.AddFinalizer(secret, infrav1.SecretIdentitySetFinalizer) - } - err = r.Client.Update(ctx, secret) - if err != nil { - conditions.MarkFalse(identity, infrav1.CredentialsAvailableCondidtion, infrav1.SecretOwnerReferenceFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - return reconcile.Result{}, err - } + // Ensure the VSphereClusterIdentity is set as the owner of the secret, and that the reference has an up to date APIVersion. + secret.SetOwnerReferences( + clusterutilv1.EnsureOwnerRef(secret.GetOwnerReferences(), + metav1.OwnerReference{ + APIVersion: infrav1.GroupVersion.String(), + Kind: identity.Kind, + Name: identity.Name, + UID: identity.UID, + })) + + if !ctrlutil.ContainsFinalizer(secret, infrav1.SecretIdentitySetFinalizer) { + ctrlutil.AddFinalizer(secret, infrav1.SecretIdentitySetFinalizer) + } + err = r.Client.Update(ctx, secret) + if err != nil { + conditions.MarkFalse(identity, infrav1.CredentialsAvailableCondidtion, infrav1.SecretOwnerReferenceFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + return reconcile.Result{}, err } conditions.MarkTrue(identity, infrav1.CredentialsAvailableCondidtion) diff --git a/controllers/vspheredeploymentzone_controller.go b/controllers/vspheredeploymentzone_controller.go index 77a59a7a53..f3a04822f3 100644 --- a/controllers/vspheredeploymentzone_controller.go +++ b/controllers/vspheredeploymentzone_controller.go @@ -182,26 +182,8 @@ func (r vsphereDeploymentZoneReconciler) reconcileNormal(deploymentZoneCtx *capv deploymentZoneCtx.VSphereDeploymentZone.Status.Ready = pointer.Bool(false) return errors.Wrapf(err, "failed to reconcile failure domain") } - conditions.MarkTrue(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition) - - // Ensure the VSphereDeploymentZone is marked as an owner of the VSphereFailureDomain. - if !clusterutilv1.HasOwnerRef(deploymentZoneCtx.VSphereFailureDomain.GetOwnerReferences(), metav1.OwnerReference{ - APIVersion: infrav1.GroupVersion.String(), - Kind: "VSphereDeploymentZone", - Name: deploymentZoneCtx.VSphereDeploymentZone.Name, - }) { - if err := updateOwnerReferences(deploymentZoneCtx, deploymentZoneCtx.VSphereFailureDomain, r.Client, func() []metav1.OwnerReference { - return append(deploymentZoneCtx.VSphereFailureDomain.OwnerReferences, metav1.OwnerReference{ - APIVersion: infrav1.GroupVersion.String(), - Kind: deploymentZoneCtx.VSphereDeploymentZone.Kind, - Name: deploymentZoneCtx.VSphereDeploymentZone.Name, - UID: deploymentZoneCtx.VSphereDeploymentZone.UID, - }) - }); err != nil { - return err - } - } + // Mark the deployment zone as ready. deploymentZoneCtx.VSphereDeploymentZone.Status.Ready = pointer.Bool(true) return nil } diff --git a/controllers/vspheredeploymentzone_controller_domain.go b/controllers/vspheredeploymentzone_controller_domain.go index 455bbe51f2..5759b02825 100644 --- a/controllers/vspheredeploymentzone_controller_domain.go +++ b/controllers/vspheredeploymentzone_controller_domain.go @@ -18,8 +18,10 @@ package controllers import ( "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterutilv1 "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" ctrl "sigs.k8s.io/controller-runtime" @@ -58,6 +60,24 @@ func (r vsphereDeploymentZoneReconciler) reconcileFailureDomain(deploymentZoneCt logger.Error(err, "topology is not configured correctly") return errors.Wrap(err, "topology is not configured correctly") } + + // Ensure the VSphereDeploymentZone is marked as an owner of the VSphereFailureDomain. + if err := updateOwnerReferences(deploymentZoneCtx, deploymentZoneCtx.VSphereFailureDomain, r.Client, + func() []metav1.OwnerReference { + return clusterutilv1.EnsureOwnerRef( + deploymentZoneCtx.VSphereFailureDomain.OwnerReferences, + metav1.OwnerReference{ + APIVersion: infrav1.GroupVersion.String(), + Kind: deploymentZoneCtx.VSphereDeploymentZone.Kind, + Name: deploymentZoneCtx.VSphereDeploymentZone.Name, + UID: deploymentZoneCtx.VSphereDeploymentZone.UID, + }) + }); err != nil { + return err + } + + // Mark the VSphereDeploymentZone as having a valid VSphereFailureDomain. + conditions.MarkTrue(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition) return nil } diff --git a/controllers/vspheremachine_controller.go b/controllers/vspheremachine_controller.go index 7d15151f60..9edc24518c 100644 --- a/controllers/vspheremachine_controller.go +++ b/controllers/vspheremachine_controller.go @@ -263,7 +263,7 @@ func (r *machineReconciler) Reconcile(_ context.Context, req ctrl.Request) (_ ct } func (r *machineReconciler) reconcileDelete(machineCtx capvcontext.MachineContext) (reconcile.Result, error) { - machineCtx.GetLogger().Info("Handling deleted SphereMachine") + machineCtx.GetLogger().Info("Handling deleted VSphereMachine") conditions.MarkFalse(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") if err := r.VMService.ReconcileDelete(machineCtx); err != nil { diff --git a/pkg/identity/identity.go b/pkg/identity/identity.go index ced2487c92..b4a09d04d7 100644 --- a/pkg/identity/identity.go +++ b/pkg/identity/identity.go @@ -122,11 +122,11 @@ func validateInputs(c client.Client, cluster *infrav1.VSphereCluster) error { return nil } +// IsSecretIdentity returns true if the VSphereCluster identity is a Secret. func IsSecretIdentity(cluster *infrav1.VSphereCluster) bool { if cluster == nil || cluster.Spec.IdentityRef == nil { return false } - return cluster.Spec.IdentityRef.Kind == infrav1.SecretKind } diff --git a/test/e2e/README.md b/test/e2e/README.md index 3fe3b54b87..06591600f0 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -14,36 +14,36 @@ In order to run the e2e tests the following requirements must be met: * The testing must occur on a host that can access the VMs deployed to vSphere via the network * Ginkgo ([download](https://onsi.github.io/ginkgo/#getting-ginkgo)) * Docker ([download](https://www.docker.com/get-started)) -* Kind v0.7.0+ ([download](https://kind.sigs.k8s.io)) +* Kind v0.20.0+ ([download](https://kind.sigs.k8s.io)) ### Environment variables The first step to running the e2e tests is setting up the required environment variables: -| Environment variable | Description | Example | -| ----------------------------- | ----------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `VSPHERE_SERVER` | The IP address or FQDN of a vCenter 6.7u3 server | `my.vcenter.com` | -| `VSPHERE_USERNAME` | The username used to access the vSphere server | `my-username` | -| `VSPHERE_PASSWORD` | The password used to access the vSphere server | `my-password` | -| `VSPHERE_DATACENTER` | The unique name or inventory path of the datacenter in which VMs will be created | `my-datacenter` or `/my-datacenter` | -| `VSPHERE_FOLDER` | The unique name or inventory path of the folder in which VMs will be created | `my-folder` or `/my-datacenter/vm/my-folder` | -| `VSPHERE_RESOURCE_POOL` | The unique name or inventory path of the resource pool in which VMs will be created | `my-resource-pool` or `/my-datacenter/host/Cluster-1/Resources/my-resource-pool` | -| `VSPHERE_DATASTORE` | The unique name or inventory path of the datastore in which VMs will be created | `my-datastore` or `/my-datacenter/datstore/my-datastore` | -| `VSPHERE_NETWORK` | The unique name or inventory path of the network to which VMs will be connected | `my-network` or `/my-datacenter/network/my-network` | -| `VSPHERE_SSH_PRIVATE_KEY` | The file path of the private key used to ssh into the CAPV VMs | `/home/foo/bar-ssh.key` | -| `VSPHERE_SSH_AUTHORIZED_KEY` | The public key that is added to the CAPV VMs | `ssh-rsa ABCDEF...XYZ=` | -| `VSPHERE_TLS_THUMBPRINT` | The TLS thumbprint of the vSphere server's certificate which should be trusted | `2A:3F:BC:CA:C0:96:35:D4:B7:A2:AA:3C:C1:33:D9:D7:BE:EC:31:55` | -| `CONTROL_PLANE_ENDPOINT_IP` | The IP that kube-vip should use as a control plane endpoint | `10.10.123.100` | -| `VSPHERE_STORAGE_POLICY` | The name of an existing vSphere storage policy to be assigned to created VMs | `my-test-sp` | +| Environment variable | Description | Example | +|------------------------------|-------------------------------------------------------------------------------------|----------------------------------------------------------------------------------| +| `VSPHERE_SERVER` | The IP address or FQDN of a vCenter 6.7u3 server | `my.vcenter.com` | +| `VSPHERE_USERNAME` | The username used to access the vSphere server | `my-username` | +| `VSPHERE_PASSWORD` | The password used to access the vSphere server | `my-password` | +| `VSPHERE_DATACENTER` | The unique name or inventory path of the datacenter in which VMs will be created | `my-datacenter` or `/my-datacenter` | +| `VSPHERE_FOLDER` | The unique name or inventory path of the folder in which VMs will be created | `my-folder` or `/my-datacenter/vm/my-folder` | +| `VSPHERE_RESOURCE_POOL` | The unique name or inventory path of the resource pool in which VMs will be created | `my-resource-pool` or `/my-datacenter/host/Cluster-1/Resources/my-resource-pool` | +| `VSPHERE_DATASTORE` | The unique name or inventory path of the datastore in which VMs will be created | `my-datastore` or `/my-datacenter/datstore/my-datastore` | +| `VSPHERE_NETWORK` | The unique name or inventory path of the network to which VMs will be connected | `my-network` or `/my-datacenter/network/my-network` | +| `VSPHERE_SSH_PRIVATE_KEY` | The file path of the private key used to ssh into the CAPV VMs | `/home/foo/bar-ssh.key` | +| `VSPHERE_SSH_AUTHORIZED_KEY` | The public key that is added to the CAPV VMs | `ssh-rsa ABCDEF...XYZ=` | +| `VSPHERE_TLS_THUMBPRINT` | The TLS thumbprint of the vSphere server's certificate which should be trusted | `2A:3F:BC:CA:C0:96:35:D4:B7:A2:AA:3C:C1:33:D9:D7:BE:EC:31:55` | +| `CONTROL_PLANE_ENDPOINT_IP` | The IP that kube-vip should use as a control plane endpoint | `10.10.123.100` | +| `VSPHERE_STORAGE_POLICY` | The name of an existing vSphere storage policy to be assigned to created VMs | `my-test-sp` | ### Flags | Flag | Description | Default Value | -|-------------------------|----------------------------------------------------------------------------------------------------------|-----------| -| `SKIP_RESOURCE_CLEANUP` | This flags skips cleanup of the resources created during the tests as well as the kind/bootstrap cluster | `false` | -| `USE_EXISTING_CLUSTER` | This flag enables the usage of an existing K8S cluster as the management cluster to run tests against. | `false` | -| `GINKGO_TEST_TIMEOUT` | This sets the timeout for the E2E test suite. | `2h` | -| `GINKGO_FOCUS` | This populates the `-focus` flag of the `ginkgo` run command. | `""` | +|-------------------------|----------------------------------------------------------------------------------------------------------|---------------| +| `SKIP_RESOURCE_CLEANUP` | This flags skips cleanup of the resources created during the tests as well as the kind/bootstrap cluster | `false` | +| `USE_EXISTING_CLUSTER` | This flag enables the usage of an existing K8S cluster as the management cluster to run tests against. | `false` | +| `GINKGO_TEST_TIMEOUT` | This sets the timeout for the E2E test suite. | `2h` | +| `GINKGO_FOCUS` | This populates the `-focus` flag of the `ginkgo` run command. | `""` | ### Running the e2e tests diff --git a/test/e2e/capv_quick_start_test.go b/test/e2e/capv_quick_start_test.go deleted file mode 100644 index 3a462d283b..0000000000 --- a/test/e2e/capv_quick_start_test.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - . "github.com/onsi/ginkgo/v2" - capi_e2e "sigs.k8s.io/cluster-api/test/e2e" -) - -var _ = Describe("Cluster Creation using Cluster API quick-start test", func() { - capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { - return capi_e2e.QuickStartSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - } - }) -}) diff --git a/test/e2e/config/vsphere-ci.yaml b/test/e2e/config/vsphere-ci.yaml index 507192a52a..6ae945aa7d 100644 --- a/test/e2e/config/vsphere-ci.yaml +++ b/test/e2e/config/vsphere-ci.yaml @@ -8,11 +8,11 @@ # For creating local images, run ./hack/e2e.sh images: - - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.5.0 + - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.5.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.5.0 + - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.5.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.5.0 + - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.5.1 loadBehavior: tryLoad - name: gcr.io/k8s-staging-cluster-api/capv-manager:e2e loadBehavior: mustLoad @@ -28,9 +28,9 @@ providers: - name: cluster-api type: CoreProvider versions: - - name: v1.5.0 + - name: v1.5.1 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.0/core-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.1/core-components.yaml" type: "url" contract: v1beta1 files: @@ -42,9 +42,9 @@ providers: - name: kubeadm type: BootstrapProvider versions: - - name: v1.5.0 + - name: v1.5.1 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.0/bootstrap-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.1/bootstrap-components.yaml" type: "url" contract: v1beta1 files: @@ -56,9 +56,9 @@ providers: - name: kubeadm type: ControlPlaneProvider versions: - - name: v1.5.0 + - name: v1.5.1 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.0/control-plane-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.1/control-plane-components.yaml" type: "url" contract: v1beta1 files: @@ -92,6 +92,7 @@ providers: - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-storage-policy.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-topology.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-dhcp-overrides.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-ownerreferences.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/clusterclass-quick-start.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-ignition.yaml" - sourcePath: "../data/shared/main/v1beta1_provider/metadata.yaml" @@ -106,6 +107,7 @@ variables: WORKER_MACHINE_COUNT: 1 IP_FAMILY: "IPv4" CLUSTER_CLASS_NAME: "quick-start" + VSPHERE_COMPUTE_CLUSTER: "Cluster-1" VSPHERE_DATACENTER: "SDDC-Datacenter" VSPHERE_FOLDER: "clusterapi" VSPHERE_RESOURCE_POOL: "clusterapi" diff --git a/test/e2e/config/vsphere-dev.yaml b/test/e2e/config/vsphere-dev.yaml index 6cf1e7a0bf..8c045c50ac 100644 --- a/test/e2e/config/vsphere-dev.yaml +++ b/test/e2e/config/vsphere-dev.yaml @@ -11,11 +11,11 @@ # - from the CAPV repository root, `make e2e` to build the vsphere provider image and run e2e tests. images: - - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.5.0 + - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.5.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.5.0 + - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.5.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.5.0 + - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.5.1 loadBehavior: tryLoad - name: gcr.io/k8s-staging-cluster-api/capv-manager:e2e loadBehavior: mustLoad @@ -31,9 +31,9 @@ providers: - name: cluster-api type: CoreProvider versions: - - name: v1.5.0 + - name: v1.5.1 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.0/core-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.1/core-components.yaml" type: "url" contract: v1beta1 files: @@ -45,9 +45,9 @@ providers: - name: kubeadm type: BootstrapProvider versions: - - name: v1.5.0 + - name: v1.5.1 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.0/bootstrap-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.1/bootstrap-components.yaml" type: "url" contract: v1beta1 files: @@ -59,9 +59,9 @@ providers: - name: kubeadm type: ControlPlaneProvider versions: - - name: v1.5.0 + - name: v1.5.1 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.0/control-plane-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.1/control-plane-components.yaml" type: "url" contract: v1beta1 files: @@ -97,6 +97,7 @@ providers: - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-dhcp-overrides.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/clusterclass-quick-start.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-ignition.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere/main/cluster-template-ownerreferences.yaml" - sourcePath: "../data/shared/main/v1beta1_provider/metadata.yaml" variables: @@ -113,6 +114,7 @@ variables: VSPHERE_SERVER: "vcenter.vmware.com" VSPHERE_TLS_THUMBPRINT: "AA:BB:CC:DD:11:22:33:44:EE:FF" VSPHERE_DATACENTER: "SDDC-Datacenter" + VSPHERE_COMPUTE_CLUSTER: "cluster0" VSPHERE_FOLDER: "FolderName" VSPHERE_RESOURCE_POOL: "ResourcePool" VSPHERE_DATASTORE: "WorkloadDatastore" diff --git a/test/e2e/data/infrastructure-vsphere/main/ownerreferences/cluster-identity.yaml b/test/e2e/data/infrastructure-vsphere/main/ownerreferences/cluster-identity.yaml new file mode 100644 index 0000000000..efbd83e992 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/main/ownerreferences/cluster-identity.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereClusterIdentity +metadata: + name: ownerreferences +spec: + secretName: ownerreferences + allowedNamespaces: + selector: + matchLabels: + kubernetes.io/metadata.name: '${NAMESPACE}' \ No newline at end of file diff --git a/test/e2e/data/infrastructure-vsphere/main/ownerreferences/drop-existing-identity-secret.yaml b/test/e2e/data/infrastructure-vsphere/main/ownerreferences/drop-existing-identity-secret.yaml new file mode 100644 index 0000000000..ce43e72266 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/main/ownerreferences/drop-existing-identity-secret.yaml @@ -0,0 +1,7 @@ +# This secret is not needed. This cluster uses a ClusterIdentity instead +$patch: delete +apiVersion: v1 +kind: Secret +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} diff --git a/test/e2e/data/infrastructure-vsphere/main/ownerreferences/failure-domains.yaml b/test/e2e/data/infrastructure-vsphere/main/ownerreferences/failure-domains.yaml new file mode 100644 index 0000000000..6fcabaa219 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/main/ownerreferences/failure-domains.yaml @@ -0,0 +1,31 @@ +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereFailureDomain +metadata: + name: "ownerreferences" +spec: + region: + name: '${VSPHERE_DATACENTER}' + type: Datacenter + tagCategory: k8s-region +# autoConfigure: true + zone: + name: '${VSPHERE_COMPUTE_CLUSTER}' + type: ComputeCluster + tagCategory: k8s-zone +# autoConfigure: true + topology: + datacenter: '${VSPHERE_DATACENTER}' + # datastore is optional and should\can be set when only one compute cluster is set + # or we should use storage policy + computeCluster: '${VSPHERE_COMPUTE_CLUSTER}' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereDeploymentZone +metadata: + name: "ownerreferences" +spec: + server: '${VSPHERE_SERVER}' + failureDomain: "ownerreferences" + placementConstraint: + resourcePool: '${VSPHERE_RESOURCE_POOL}' \ No newline at end of file diff --git a/test/e2e/data/infrastructure-vsphere/main/ownerreferences/kustomization.yaml b/test/e2e/data/infrastructure-vsphere/main/ownerreferences/kustomization.yaml new file mode 100644 index 0000000000..c05d7d011f --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/main/ownerreferences/kustomization.yaml @@ -0,0 +1,12 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../base + - cluster-identity.yaml + - failure-domains.yaml +patchesStrategicMerge: + - ../commons/cluster-resource-set-label.yaml + - ../commons/cluster-network-CIDR.yaml + - ../commons/cluster-resource-set-csi-insecure.yaml + - vsphereclusteridentity.yaml + - drop-existing-identity-secret.yaml diff --git a/test/e2e/data/infrastructure-vsphere/main/ownerreferences/vsphereclusteridentity.yaml b/test/e2e/data/infrastructure-vsphere/main/ownerreferences/vsphereclusteridentity.yaml new file mode 100644 index 0000000000..9c5f80fd28 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere/main/ownerreferences/vsphereclusteridentity.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereCluster +metadata: + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" +spec: + identityRef: + kind: VSphereClusterIdentity + name: ownerreferences + diff --git a/test/e2e/ignition_test.go b/test/e2e/ignition_test.go deleted file mode 100644 index a84ed6c00c..0000000000 --- a/test/e2e/ignition_test.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - . "github.com/onsi/ginkgo/v2" - "k8s.io/utils/pointer" - capi_e2e "sigs.k8s.io/cluster-api/test/e2e" -) - -var _ = Describe("Cluster creation with [Ignition] bootstrap [PR-Blocking]", func() { - capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { - return capi_e2e.QuickStartSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - Flavor: pointer.String("ignition"), - } - }) -}) diff --git a/test/e2e/ownerreference_test.go b/test/e2e/ownerreference_test.go new file mode 100644 index 0000000000..98af07814c --- /dev/null +++ b/test/e2e/ownerreference_test.go @@ -0,0 +1,290 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/pointer" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" + capi_e2e "sigs.k8s.io/cluster-api/test/e2e" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/util/patch" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + + infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" +) + +var _ = Describe("OwnerReference checks with FailureDomains and ClusterIdentity", func() { + // Before running the test create the secret used by the VSphereClusterIdentity to connect to the vCenter. + BeforeEach(func() { + createVsphereIdentitySecret(ctx, bootstrapClusterProxy) + }) + + capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { + return capi_e2e.QuickStartSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + Flavor: pointer.String("ownerreferences"), + PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { + // Inject a client to use for checkClusterIdentitySecretOwnerRef + checkClusterIdentitySecretOwnerRef(ctx, proxy.GetClient()) + + // Set up a periodic patch to ensure the DeploymentZone is reconciled. + forcePeriodicReconcile(ctx, proxy.GetClient(), namespace) + + // This check ensures that owner references are resilient - i.e. correctly re-reconciled - when removed. + framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName, + framework.CoreOwnerReferenceAssertion, + framework.KubeadmBootstrapOwnerReferenceAssertions, + framework.KubeadmControlPlaneOwnerReferenceAssertions, + framework.ExpOwnerReferenceAssertions, + VSphereKubernetesReferenceAssertions, + VSphereReferenceAssertions, + ) + // This check ensures that owner references are always updated to the most recent apiVersion. + framework.ValidateOwnerReferencesOnUpdate(ctx, proxy, namespace, clusterName, + framework.CoreOwnerReferenceAssertion, + framework.KubeadmBootstrapOwnerReferenceAssertions, + framework.KubeadmControlPlaneOwnerReferenceAssertions, + framework.ExpOwnerReferenceAssertions, + VSphereKubernetesReferenceAssertions, + VSphereReferenceAssertions, + ) + }, + } + }) + + // Delete objects created by the test which are not in the test namespace. + AfterEach(func() { + cleanupVSphereObjects(ctx, bootstrapClusterProxy) + }) + +}) + +var ( + VSphereKubernetesReferenceAssertions = map[string]func([]metav1.OwnerReference) error{ + // Need custom Kubernetes assertions for secrets. Secrets in the CAPV tests can also be owned by the vSphereCluster. + "Secret": func(owners []metav1.OwnerReference) error { + return framework.HasOneOfExactOwners(owners, + // Secrets for cluster certificates must be owned by the KubeadmControlPlane. + []metav1.OwnerReference{kubeadmControlPlaneController}, + // The bootstrap secret should be owned by a KubeadmConfig. + []metav1.OwnerReference{kubeadmConfigController}, + // Secrets created as a resource for a ClusterResourceSet can be owned by the ClusterResourceSet. + []metav1.OwnerReference{clusterResourceSetOwner}, + // Secrets created as an identityReference for a vSphereCluster should be owned but the vSphereCluster. + []metav1.OwnerReference{vSphereClusterOwner}, + ) + }, + "ConfigMap": func(owners []metav1.OwnerReference) error { + // The only configMaps considered here are those owned by a ClusterResourceSet. + return framework.HasExactOwners(owners, clusterResourceSetOwner) + }, + } +) + +var ( + VSphereReferenceAssertions = map[string]func([]metav1.OwnerReference) error{ + "VSphereCluster": func(owners []metav1.OwnerReference) error { + return framework.HasExactOwners(owners, clusterController) + }, + "VSphereClusterTemplate": func(owners []metav1.OwnerReference) error { + return framework.HasExactOwners(owners, clusterClassOwner) + }, + "VSphereMachine": func(owners []metav1.OwnerReference) error { + // The vSphereCluster takes ownership of all vSphereMachines in addition to the core Machine. + return framework.HasExactOwners(owners, vSphereClusterOwner, machineController) + }, + "VSphereMachineTemplate": func(owners []metav1.OwnerReference) error { + // The vSphereMachineTemplate can be owned by the Cluster or the ClusterClass. + return framework.HasOneOfExactOwners(owners, []metav1.OwnerReference{clusterOwner}, []metav1.OwnerReference{clusterClassOwner}) + }, + "VSphereVM": func(owners []metav1.OwnerReference) error { + return framework.HasExactOwners(owners, vSphereMachineOwner) + }, + // VSphereClusterIdentity does not have any owners. + "VSphereClusterIdentity": func(owners []metav1.OwnerReference) error { + // The vSphereClusterIdentity does not have any owners. + return framework.HasExactOwners(owners) + }, + "VSphereDeploymentZone": func(owners []metav1.OwnerReference) error { + // The vSphereDeploymentZone does not have any owners. + return framework.HasExactOwners(owners) + }, + "VSphereFailureDomain": func(owners []metav1.OwnerReference) error { + // The vSphereFailureDomain can be owned by one or more vSphereDeploymentZones. + return framework.HasOneOfExactOwners(owners, []metav1.OwnerReference{vSphereDeploymentZoneOwner}, []metav1.OwnerReference{vSphereDeploymentZoneOwner, vSphereDeploymentZoneOwner}) + }, + } +) + +var ( + // CAPV owners. + vSphereMachineOwner = metav1.OwnerReference{Kind: "VSphereMachine", APIVersion: infrav1.GroupVersion.String()} + vSphereClusterOwner = metav1.OwnerReference{Kind: "VSphereCluster", APIVersion: infrav1.GroupVersion.String()} + vSphereDeploymentZoneOwner = metav1.OwnerReference{Kind: "VSphereDeploymentZone", APIVersion: infrav1.GroupVersion.String()} + vSphereClusterIdentityOwner = metav1.OwnerReference{Kind: "VSphereClusterIdentity", APIVersion: infrav1.GroupVersion.String()} + + // CAPI owners. + clusterClassOwner = metav1.OwnerReference{Kind: "ClusterClass", APIVersion: clusterv1.GroupVersion.String()} + clusterOwner = metav1.OwnerReference{Kind: "Cluster", APIVersion: clusterv1.GroupVersion.String()} + clusterController = metav1.OwnerReference{Kind: "Cluster", APIVersion: clusterv1.GroupVersion.String(), Controller: pointer.Bool(true)} + machineController = metav1.OwnerReference{Kind: "Machine", APIVersion: clusterv1.GroupVersion.String(), Controller: pointer.Bool(true)} + clusterResourceSetOwner = metav1.OwnerReference{Kind: "ClusterResourceSet", APIVersion: addonsv1.GroupVersion.String()} + + // KCP owner. + kubeadmControlPlaneController = metav1.OwnerReference{Kind: "KubeadmControlPlane", APIVersion: controlplanev1.GroupVersion.String(), Controller: pointer.Bool(true)} + + // CAPBK owner. + kubeadmConfigController = metav1.OwnerReference{Kind: "KubeadmConfig", APIVersion: bootstrapv1.GroupVersion.String(), Controller: pointer.Bool(true)} +) + +// The following names are hardcoded in templates to make cleanup easier. +var ( + clusterIdentityName = "ownerreferences" + clusterIdentitySecretNamespace = "capv-system" + deploymentZoneName = "ownerreferences" +) + +// cleanupVSphereObjects deletes the Secret, VSphereClusterIdentity, and VSphereDeploymentZone created for this test. +// The VSphereFailureDomain, and the Secret for the VSphereClusterIdentity should be deleted as a result of the above. +func cleanupVSphereObjects(ctx context.Context, bootstrapClusterProxy framework.ClusterProxy) bool { + Eventually(func() error { + if err := bootstrapClusterProxy.GetClient().Delete(ctx, + &infrav1.VSphereClusterIdentity{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterIdentityName, + }, + }); err != nil && !apierrors.IsNotFound(err) { + return err + } + if err := bootstrapClusterProxy.GetClient().Delete(ctx, + &infrav1.VSphereDeploymentZone{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentZoneName, + }, + }); err != nil && !apierrors.IsNotFound(err) { + return err + } + return nil + }).Should(Succeed()) + return true +} + +func createVsphereIdentitySecret(ctx context.Context, bootstrapClusterProxy framework.ClusterProxy) { + username := e2eConfig.GetVariable("VSPHERE_USERNAME") + password := e2eConfig.GetVariable("VSPHERE_PASSWORD") + Expect(username).To(Not(BeEmpty())) + Expect(password).To(Not(BeEmpty())) + Expect(bootstrapClusterProxy.GetClient().Create(ctx, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: clusterIdentitySecretNamespace, + Name: clusterIdentityName, + }, + Data: map[string][]byte{ + "password": []byte(password), + "username": []byte(username), + }, + })).To(Succeed()) +} + +func checkClusterIdentitySecretOwnerRef(ctx context.Context, c ctrlclient.Client) { + s := &corev1.Secret{} + + Eventually(func() error { + if err := c.Get(ctx, ctrlclient.ObjectKey{Namespace: clusterIdentitySecretNamespace, Name: clusterIdentityName}, s); err != nil { + return err + } + return framework.HasExactOwners(s.GetOwnerReferences(), vSphereClusterIdentityOwner) + }, 1*time.Minute).Should(Succeed()) + + // Patch the secret to have a wrong APIVersion. + helper, err := patch.NewHelper(s, c) + Expect(err).ToNot(HaveOccurred()) + newOwners := []metav1.OwnerReference{} + for _, owner := range s.GetOwnerReferences() { + var gv schema.GroupVersion + gv, err := schema.ParseGroupVersion(owner.APIVersion) + Expect(err).ToNot(HaveOccurred()) + gv.Version = "v1alpha1" + owner.APIVersion = gv.String() + newOwners = append(newOwners, owner) + } + s.SetOwnerReferences(newOwners) + Expect(helper.Patch(ctx, s)).To(Succeed()) + + // Force reconcile the ClusterIdentity which owns the secret. + annotationPatch := ctrlclient.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"metadata\":{\"annotations\":{\"cluster.x-k8s.io/modifiedAt\":\"%v\"}}}", time.Now().Format(time.RFC3339)))) + Expect(c.Patch(ctx, &infrav1.VSphereClusterIdentity{ObjectMeta: metav1.ObjectMeta{Name: clusterIdentityName}}, annotationPatch)).To(Succeed()) + + // Check that the secret ownerReferences were correctly reconciled. + Eventually(func() error { + if err := c.Get(ctx, ctrlclient.ObjectKey{Namespace: clusterIdentitySecretNamespace, Name: clusterIdentityName}, s); err != nil { + return err + } + return framework.HasExactOwners(s.GetOwnerReferences(), vSphereClusterIdentityOwner) + }, 5*time.Minute).Should(Succeed()) +} + +// forcePeriodicReconcile forces the vSphereDeploymentZone and ClusterResourceSets to reconcile every 20 seconds. +// This reduces the chance of race conditions resulting in flakes in the test. +func forcePeriodicReconcile(ctx context.Context, c ctrlclient.Client, namespace string) { + deploymentZoneList := &infrav1.VSphereDeploymentZoneList{} + crsList := &addonsv1.ClusterResourceSetList{} + ticker := time.NewTicker(20 * time.Second) + stopTimer := time.NewTimer(5 * time.Minute) + go func() { + for { + select { + case <-ticker.C: + Expect(c.List(ctx, deploymentZoneList)).To(Succeed()) + for _, zone := range deploymentZoneList.Items { + annotationPatch := ctrlclient.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"metadata\":{\"annotations\":{\"cluster.x-k8s.io/modifiedAt\":\"%v\"}}}", time.Now().Format(time.RFC3339)))) + Expect(c.Patch(ctx, zone.DeepCopy(), annotationPatch)).To(Succeed()) + } + Expect(c.List(ctx, crsList, ctrlclient.InNamespace(namespace))).To(Succeed()) + for _, crs := range crsList.Items { + annotationPatch := ctrlclient.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"metadata\":{\"annotations\":{\"cluster.x-k8s.io/modifiedAt\":\"%v\"}}}", time.Now().Format(time.RFC3339)))) + Expect(c.Patch(ctx, crs.DeepCopy(), annotationPatch)).To(Succeed()) + } + case <-stopTimer.C: + ticker.Stop() + return + case <-ctx.Done(): + ticker.Stop() + return + } + } + }() +} diff --git a/test/e2e/capv_clusterclass_quickstart_test.go b/test/e2e/quick_start_test.go similarity index 55% rename from test/e2e/capv_clusterclass_quickstart_test.go rename to test/e2e/quick_start_test.go index b44851fa7c..ff3a213990 100644 --- a/test/e2e/capv_clusterclass_quickstart_test.go +++ b/test/e2e/quick_start_test.go @@ -1,5 +1,5 @@ /* -Copyright 2022 The Kubernetes Authors. +Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,6 +22,18 @@ import ( capi_e2e "sigs.k8s.io/cluster-api/test/e2e" ) +var _ = Describe("Cluster Creation using Cluster API quick-start test", func() { + capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { + return capi_e2e.QuickStartSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + } + }) +}) + var _ = Describe("ClusterClass Creation using Cluster API quick-start test [PR-Blocking] [ClusterClass]", func() { capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { return capi_e2e.QuickStartSpecInput{ @@ -34,3 +46,16 @@ var _ = Describe("ClusterClass Creation using Cluster API quick-start test [PR-B } }) }) + +var _ = Describe("Cluster creation with [Ignition] bootstrap [PR-Blocking]", func() { + capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { + return capi_e2e.QuickStartSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + Flavor: pointer.String("ignition"), + } + }) +}) diff --git a/test/integration/integration-dev.yaml b/test/integration/integration-dev.yaml index fc037d2828..e400958333 100644 --- a/test/integration/integration-dev.yaml +++ b/test/integration/integration-dev.yaml @@ -11,11 +11,11 @@ # - from the CAPV repository root, `make e2e` to build the vsphere provider image and run e2e tests. images: - - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.5.0 + - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.5.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.5.0 + - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.5.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.5.0 + - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.5.1 loadBehavior: tryLoad - name: gcr.io/k8s-staging-cluster-api/capv-manager:e2e loadBehavior: mustLoad @@ -30,9 +30,9 @@ providers: - name: cluster-api type: CoreProvider versions: - - name: v1.5.0 + - name: v1.5.1 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.0/core-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.1/core-components.yaml" type: "url" contract: v1beta1 files: @@ -44,9 +44,9 @@ providers: - name: kubeadm type: BootstrapProvider versions: - - name: v1.5.0 + - name: v1.5.1 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.0/bootstrap-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.1/bootstrap-components.yaml" type: "url" contract: v1beta1 files: @@ -58,9 +58,9 @@ providers: - name: kubeadm type: ControlPlaneProvider versions: - - name: v1.5.0 + - name: v1.5.1 # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.0/control-plane-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.5.1/control-plane-components.yaml" type: "url" contract: v1beta1 files: