Skip to content

Commit

Permalink
Merge pull request #2209 from killianmuldoon/pr-ownerRef-stability
Browse files Browse the repository at this point in the history
🌱 Add ownerReference resilience test
  • Loading branch information
k8s-ci-robot authored Sep 13, 2023
2 parents 2f2db52 + d63e2bc commit 987e3fe
Show file tree
Hide file tree
Showing 20 changed files with 526 additions and 194 deletions.
1 change: 1 addition & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -320,6 +320,7 @@ generate-e2e-templates-main: $(KUSTOMIZE) ## Generate test templates for the mai
"$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build $(E2E_TEMPLATE_DIR)/main/pci > $(E2E_TEMPLATE_DIR)/main/cluster-template-pci.yaml
# for DHCP overrides
"$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build $(E2E_TEMPLATE_DIR)/main/dhcp-overrides > $(E2E_TEMPLATE_DIR)/main/cluster-template-dhcp-overrides.yaml
"$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build $(E2E_TEMPLATE_DIR)/main/ownerreferences > $(E2E_TEMPLATE_DIR)/main/cluster-template-ownerreferences.yaml


## --------------------------------------
Expand Down
70 changes: 39 additions & 31 deletions controllers/vspherecluster_reconciler.go
Original file line number Diff line number Diff line change
Expand Up @@ -285,38 +285,46 @@ func (r clusterReconciler) reconcileNormal(ctx context.Context, clusterCtx *capv

func (r clusterReconciler) reconcileIdentitySecret(ctx context.Context, clusterCtx *capvcontext.ClusterContext) error {
vsphereCluster := clusterCtx.VSphereCluster
if identity.IsSecretIdentity(vsphereCluster) {
secret := &corev1.Secret{}
secretKey := client.ObjectKey{
Namespace: vsphereCluster.Namespace,
Name: vsphereCluster.Spec.IdentityRef.Name,
}
err := clusterCtx.Client.Get(ctx, secretKey, secret)
if err != nil {
return err
}
if !identity.IsSecretIdentity(vsphereCluster) {
return nil
}
secret := &corev1.Secret{}
secretKey := client.ObjectKey{
Namespace: vsphereCluster.Namespace,
Name: vsphereCluster.Spec.IdentityRef.Name,
}
err := clusterCtx.Client.Get(ctx, secretKey, secret)
if err != nil {
return err
}

// check if cluster is already an owner
if !clusterutilv1.IsOwnedByObject(secret, vsphereCluster) {
ownerReferences := secret.GetOwnerReferences()
if identity.IsOwnedByIdentityOrCluster(ownerReferences) {
return fmt.Errorf("another cluster has set the OwnerRef for secret: %s/%s", secret.Namespace, secret.Name)
}
ownerReferences = append(ownerReferences, metav1.OwnerReference{
APIVersion: infrav1.GroupVersion.String(),
Kind: vsphereCluster.Kind,
Name: vsphereCluster.Name,
UID: vsphereCluster.UID,
})
secret.SetOwnerReferences(ownerReferences)
}
if !ctrlutil.ContainsFinalizer(secret, infrav1.SecretIdentitySetFinalizer) {
ctrlutil.AddFinalizer(secret, infrav1.SecretIdentitySetFinalizer)
}
err = r.Client.Update(ctx, secret)
if err != nil {
return err
}
// If a different VSphereCluster is an owner return an error.
if !clusterutilv1.IsOwnedByObject(secret, vsphereCluster) && identity.IsOwnedByIdentityOrCluster(secret.GetOwnerReferences()) {
return fmt.Errorf("another cluster has set the OwnerRef for secret: %s/%s", secret.Namespace, secret.Name)
}

helper, err := patch.NewHelper(secret, clusterCtx.Client)
if err != nil {
return err
}

// Ensure the VSphereCluster is an owner and that the APIVersion is up to date.
secret.SetOwnerReferences(clusterutilv1.EnsureOwnerRef(secret.GetOwnerReferences(),
metav1.OwnerReference{
APIVersion: infrav1.GroupVersion.String(),
Kind: vsphereCluster.Kind,
Name: vsphereCluster.Name,
UID: vsphereCluster.UID,
},
))

// Ensure the finalizer is added.
if !ctrlutil.ContainsFinalizer(secret, infrav1.SecretIdentitySetFinalizer) {
ctrlutil.AddFinalizer(secret, infrav1.SecretIdentitySetFinalizer)
}
err = helper.Patch(ctx, secret)
if err != nil {
return err
}

return nil
Expand Down
46 changes: 23 additions & 23 deletions controllers/vsphereclusteridentity_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,30 +136,30 @@ func (r clusterIdentityReconciler) Reconcile(ctx context.Context, req reconcile.
return reconcile.Result{}, errors.Errorf("secret: %s not found in namespace: %s", secretKey.Name, secretKey.Namespace)
}

if !clusterutilv1.IsOwnedByObject(secret, identity) {
ownerReferences := secret.GetOwnerReferences()
if pkgidentity.IsOwnedByIdentityOrCluster(ownerReferences) {
conditions.MarkFalse(identity, infrav1.CredentialsAvailableCondidtion, infrav1.SecretAlreadyInUseReason, clusterv1.ConditionSeverityError, "secret being used by another Cluster/VSphereIdentity")
identity.Status.Ready = false
return reconcile.Result{}, errors.New("secret being used by another Cluster/VSphereIdentity")
}

ownerReferences = append(ownerReferences, metav1.OwnerReference{
APIVersion: infrav1.GroupVersion.String(),
Kind: identity.Kind,
Name: identity.Name,
UID: identity.UID,
})
secret.SetOwnerReferences(ownerReferences)
// If this secret is owned by a different VSphereClusterIdentity or a VSphereCluster, mark the identity as not ready and return an error.
if !clusterutilv1.IsOwnedByObject(secret, identity) && pkgidentity.IsOwnedByIdentityOrCluster(secret.GetOwnerReferences()) {
conditions.MarkFalse(identity, infrav1.CredentialsAvailableCondidtion, infrav1.SecretAlreadyInUseReason, clusterv1.ConditionSeverityError, "secret being used by another Cluster/VSphereIdentity")
identity.Status.Ready = false
return reconcile.Result{}, errors.New("secret being used by another Cluster/VSphereIdentity")
}

if !ctrlutil.ContainsFinalizer(secret, infrav1.SecretIdentitySetFinalizer) {
ctrlutil.AddFinalizer(secret, infrav1.SecretIdentitySetFinalizer)
}
err = r.Client.Update(ctx, secret)
if err != nil {
conditions.MarkFalse(identity, infrav1.CredentialsAvailableCondidtion, infrav1.SecretOwnerReferenceFailedReason, clusterv1.ConditionSeverityWarning, err.Error())
return reconcile.Result{}, err
}
// Ensure the VSphereClusterIdentity is set as the owner of the secret, and that the reference has an up to date APIVersion.
secret.SetOwnerReferences(
clusterutilv1.EnsureOwnerRef(secret.GetOwnerReferences(),
metav1.OwnerReference{
APIVersion: infrav1.GroupVersion.String(),
Kind: identity.Kind,
Name: identity.Name,
UID: identity.UID,
}))

if !ctrlutil.ContainsFinalizer(secret, infrav1.SecretIdentitySetFinalizer) {
ctrlutil.AddFinalizer(secret, infrav1.SecretIdentitySetFinalizer)
}
err = r.Client.Update(ctx, secret)
if err != nil {
conditions.MarkFalse(identity, infrav1.CredentialsAvailableCondidtion, infrav1.SecretOwnerReferenceFailedReason, clusterv1.ConditionSeverityWarning, err.Error())
return reconcile.Result{}, err
}

conditions.MarkTrue(identity, infrav1.CredentialsAvailableCondidtion)
Expand Down
20 changes: 1 addition & 19 deletions controllers/vspheredeploymentzone_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -182,26 +182,8 @@ func (r vsphereDeploymentZoneReconciler) reconcileNormal(deploymentZoneCtx *capv
deploymentZoneCtx.VSphereDeploymentZone.Status.Ready = pointer.Bool(false)
return errors.Wrapf(err, "failed to reconcile failure domain")
}
conditions.MarkTrue(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition)

// Ensure the VSphereDeploymentZone is marked as an owner of the VSphereFailureDomain.
if !clusterutilv1.HasOwnerRef(deploymentZoneCtx.VSphereFailureDomain.GetOwnerReferences(), metav1.OwnerReference{
APIVersion: infrav1.GroupVersion.String(),
Kind: "VSphereDeploymentZone",
Name: deploymentZoneCtx.VSphereDeploymentZone.Name,
}) {
if err := updateOwnerReferences(deploymentZoneCtx, deploymentZoneCtx.VSphereFailureDomain, r.Client, func() []metav1.OwnerReference {
return append(deploymentZoneCtx.VSphereFailureDomain.OwnerReferences, metav1.OwnerReference{
APIVersion: infrav1.GroupVersion.String(),
Kind: deploymentZoneCtx.VSphereDeploymentZone.Kind,
Name: deploymentZoneCtx.VSphereDeploymentZone.Name,
UID: deploymentZoneCtx.VSphereDeploymentZone.UID,
})
}); err != nil {
return err
}
}

// Mark the deployment zone as ready.
deploymentZoneCtx.VSphereDeploymentZone.Status.Ready = pointer.Bool(true)
return nil
}
Expand Down
20 changes: 20 additions & 0 deletions controllers/vspheredeploymentzone_controller_domain.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,10 @@ package controllers

import (
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kerrors "k8s.io/apimachinery/pkg/util/errors"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
clusterutilv1 "sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
ctrl "sigs.k8s.io/controller-runtime"

Expand Down Expand Up @@ -58,6 +60,24 @@ func (r vsphereDeploymentZoneReconciler) reconcileFailureDomain(deploymentZoneCt
logger.Error(err, "topology is not configured correctly")
return errors.Wrap(err, "topology is not configured correctly")
}

// Ensure the VSphereDeploymentZone is marked as an owner of the VSphereFailureDomain.
if err := updateOwnerReferences(deploymentZoneCtx, deploymentZoneCtx.VSphereFailureDomain, r.Client,
func() []metav1.OwnerReference {
return clusterutilv1.EnsureOwnerRef(
deploymentZoneCtx.VSphereFailureDomain.OwnerReferences,
metav1.OwnerReference{
APIVersion: infrav1.GroupVersion.String(),
Kind: deploymentZoneCtx.VSphereDeploymentZone.Kind,
Name: deploymentZoneCtx.VSphereDeploymentZone.Name,
UID: deploymentZoneCtx.VSphereDeploymentZone.UID,
})
}); err != nil {
return err
}

// Mark the VSphereDeploymentZone as having a valid VSphereFailureDomain.
conditions.MarkTrue(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition)
return nil
}

Expand Down
2 changes: 1 addition & 1 deletion controllers/vspheremachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ func (r *machineReconciler) Reconcile(_ context.Context, req ctrl.Request) (_ ct
}

func (r *machineReconciler) reconcileDelete(machineCtx capvcontext.MachineContext) (reconcile.Result, error) {
machineCtx.GetLogger().Info("Handling deleted SphereMachine")
machineCtx.GetLogger().Info("Handling deleted VSphereMachine")
conditions.MarkFalse(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "")

if err := r.VMService.ReconcileDelete(machineCtx); err != nil {
Expand Down
2 changes: 1 addition & 1 deletion pkg/identity/identity.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,11 +122,11 @@ func validateInputs(c client.Client, cluster *infrav1.VSphereCluster) error {
return nil
}

// IsSecretIdentity returns true if the VSphereCluster identity is a Secret.
func IsSecretIdentity(cluster *infrav1.VSphereCluster) bool {
if cluster == nil || cluster.Spec.IdentityRef == nil {
return false
}

return cluster.Spec.IdentityRef.Kind == infrav1.SecretKind
}

Expand Down
42 changes: 21 additions & 21 deletions test/e2e/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,36 +14,36 @@ In order to run the e2e tests the following requirements must be met:
* The testing must occur on a host that can access the VMs deployed to vSphere via the network
* Ginkgo ([download](https://onsi.github.io/ginkgo/#getting-ginkgo))
* Docker ([download](https://www.docker.com/get-started))
* Kind v0.7.0+ ([download](https://kind.sigs.k8s.io))
* Kind v0.20.0+ ([download](https://kind.sigs.k8s.io))

### Environment variables

The first step to running the e2e tests is setting up the required environment variables:

| Environment variable | Description | Example |
| ----------------------------- | ----------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- |
| `VSPHERE_SERVER` | The IP address or FQDN of a vCenter 6.7u3 server | `my.vcenter.com` |
| `VSPHERE_USERNAME` | The username used to access the vSphere server | `my-username` |
| `VSPHERE_PASSWORD` | The password used to access the vSphere server | `my-password` |
| `VSPHERE_DATACENTER` | The unique name or inventory path of the datacenter in which VMs will be created | `my-datacenter` or `/my-datacenter` |
| `VSPHERE_FOLDER` | The unique name or inventory path of the folder in which VMs will be created | `my-folder` or `/my-datacenter/vm/my-folder` |
| `VSPHERE_RESOURCE_POOL` | The unique name or inventory path of the resource pool in which VMs will be created | `my-resource-pool` or `/my-datacenter/host/Cluster-1/Resources/my-resource-pool` |
| `VSPHERE_DATASTORE` | The unique name or inventory path of the datastore in which VMs will be created | `my-datastore` or `/my-datacenter/datstore/my-datastore` |
| `VSPHERE_NETWORK` | The unique name or inventory path of the network to which VMs will be connected | `my-network` or `/my-datacenter/network/my-network` |
| `VSPHERE_SSH_PRIVATE_KEY` | The file path of the private key used to ssh into the CAPV VMs | `/home/foo/bar-ssh.key` |
| `VSPHERE_SSH_AUTHORIZED_KEY` | The public key that is added to the CAPV VMs | `ssh-rsa ABCDEF...XYZ=` |
| `VSPHERE_TLS_THUMBPRINT` | The TLS thumbprint of the vSphere server's certificate which should be trusted | `2A:3F:BC:CA:C0:96:35:D4:B7:A2:AA:3C:C1:33:D9:D7:BE:EC:31:55` |
| `CONTROL_PLANE_ENDPOINT_IP` | The IP that kube-vip should use as a control plane endpoint | `10.10.123.100` |
| `VSPHERE_STORAGE_POLICY` | The name of an existing vSphere storage policy to be assigned to created VMs | `my-test-sp` |
| Environment variable | Description | Example |
|------------------------------|-------------------------------------------------------------------------------------|----------------------------------------------------------------------------------|
| `VSPHERE_SERVER` | The IP address or FQDN of a vCenter 6.7u3 server | `my.vcenter.com` |
| `VSPHERE_USERNAME` | The username used to access the vSphere server | `my-username` |
| `VSPHERE_PASSWORD` | The password used to access the vSphere server | `my-password` |
| `VSPHERE_DATACENTER` | The unique name or inventory path of the datacenter in which VMs will be created | `my-datacenter` or `/my-datacenter` |
| `VSPHERE_FOLDER` | The unique name or inventory path of the folder in which VMs will be created | `my-folder` or `/my-datacenter/vm/my-folder` |
| `VSPHERE_RESOURCE_POOL` | The unique name or inventory path of the resource pool in which VMs will be created | `my-resource-pool` or `/my-datacenter/host/Cluster-1/Resources/my-resource-pool` |
| `VSPHERE_DATASTORE` | The unique name or inventory path of the datastore in which VMs will be created | `my-datastore` or `/my-datacenter/datstore/my-datastore` |
| `VSPHERE_NETWORK` | The unique name or inventory path of the network to which VMs will be connected | `my-network` or `/my-datacenter/network/my-network` |
| `VSPHERE_SSH_PRIVATE_KEY` | The file path of the private key used to ssh into the CAPV VMs | `/home/foo/bar-ssh.key` |
| `VSPHERE_SSH_AUTHORIZED_KEY` | The public key that is added to the CAPV VMs | `ssh-rsa ABCDEF...XYZ=` |
| `VSPHERE_TLS_THUMBPRINT` | The TLS thumbprint of the vSphere server's certificate which should be trusted | `2A:3F:BC:CA:C0:96:35:D4:B7:A2:AA:3C:C1:33:D9:D7:BE:EC:31:55` |
| `CONTROL_PLANE_ENDPOINT_IP` | The IP that kube-vip should use as a control plane endpoint | `10.10.123.100` |
| `VSPHERE_STORAGE_POLICY` | The name of an existing vSphere storage policy to be assigned to created VMs | `my-test-sp` |

### Flags

| Flag | Description | Default Value |
|-------------------------|----------------------------------------------------------------------------------------------------------|-----------|
| `SKIP_RESOURCE_CLEANUP` | This flags skips cleanup of the resources created during the tests as well as the kind/bootstrap cluster | `false` |
| `USE_EXISTING_CLUSTER` | This flag enables the usage of an existing K8S cluster as the management cluster to run tests against. | `false` |
| `GINKGO_TEST_TIMEOUT` | This sets the timeout for the E2E test suite. | `2h` |
| `GINKGO_FOCUS` | This populates the `-focus` flag of the `ginkgo` run command. | `""` |
|-------------------------|----------------------------------------------------------------------------------------------------------|---------------|
| `SKIP_RESOURCE_CLEANUP` | This flags skips cleanup of the resources created during the tests as well as the kind/bootstrap cluster | `false` |
| `USE_EXISTING_CLUSTER` | This flag enables the usage of an existing K8S cluster as the management cluster to run tests against. | `false` |
| `GINKGO_TEST_TIMEOUT` | This sets the timeout for the E2E test suite. | `2h` |
| `GINKGO_FOCUS` | This populates the `-focus` flag of the `ginkgo` run command. | `""` |

### Running the e2e tests

Expand Down
34 changes: 0 additions & 34 deletions test/e2e/capv_quick_start_test.go

This file was deleted.

Loading

0 comments on commit 987e3fe

Please sign in to comment.