diff --git a/test/e2e/quick_start_test.go b/test/e2e/quick_start_test.go index 5d918afd56c9..960fad0a061a 100644 --- a/test/e2e/quick_start_test.go +++ b/test/e2e/quick_start_test.go @@ -40,6 +40,7 @@ var _ = Describe("When following the Cluster API quick-start", func() { InfrastructureProvider: ptr.To("docker"), PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { // This check ensures that owner references are resilient - i.e. correctly re-reconciled - when removed. + By("Checking that owner references are resilient") framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName), framework.CoreOwnerReferenceAssertion, framework.ExpOwnerReferenceAssertions, @@ -49,6 +50,7 @@ var _ = Describe("When following the Cluster API quick-start", func() { framework.KubernetesReferenceAssertions, ) // This check ensures that owner references are correctly updated to the correct apiVersion. + By("Checking that owner references are updated to the correct API version") framework.ValidateOwnerReferencesOnUpdate(ctx, proxy, namespace, clusterName, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName), framework.CoreOwnerReferenceAssertion, framework.ExpOwnerReferenceAssertions, @@ -58,14 +60,16 @@ var _ = Describe("When following the Cluster API quick-start", func() { framework.KubernetesReferenceAssertions, ) // This check ensures that finalizers are resilient - i.e. correctly re-reconciled - when removed. + By("Checking that finalizers are resilient") framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName), - framework.CoreFinalizersAssertion, + framework.CoreFinalizersAssertionWithLegacyClusters, framework.KubeadmControlPlaneFinalizersAssertion, framework.ExpFinalizersAssertion, framework.DockerInfraFinalizersAssertion, ) // This check ensures that the resourceVersions are stable, i.e. it verifies there are no // continuous reconciles when everything should be stable. + By("Checking that resourceVersions are stable") framework.ValidateResourceVersionStable(ctx, proxy, namespace, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName)) }, } @@ -82,8 +86,9 @@ var _ = Describe("When following the Cluster API quick-start with ClusterClass [ SkipCleanup: skipCleanup, Flavor: ptr.To("topology"), InfrastructureProvider: ptr.To("docker"), - // This check ensures that owner references are resilient - i.e. correctly re-reconciled - when removed. PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { + // This check ensures that owner references are resilient - i.e. correctly re-reconciled - when removed. + By("Checking that owner references are resilient") framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName), framework.CoreOwnerReferenceAssertion, framework.ExpOwnerReferenceAssertions, @@ -93,6 +98,7 @@ var _ = Describe("When following the Cluster API quick-start with ClusterClass [ framework.KubernetesReferenceAssertions, ) // This check ensures that owner references are correctly updated to the correct apiVersion. + By("Checking that owner references are updated to the correct API version") framework.ValidateOwnerReferencesOnUpdate(ctx, proxy, namespace, clusterName, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName), framework.CoreOwnerReferenceAssertion, framework.ExpOwnerReferenceAssertions, @@ -102,14 +108,16 @@ var _ = Describe("When following the Cluster API quick-start with ClusterClass [ framework.KubernetesReferenceAssertions, ) // This check ensures that finalizers are resilient - i.e. correctly re-reconciled - when removed. + By("Checking that finalizers are resilient") framework.ValidateFinalizersResilience(ctx, proxy, namespace, clusterName, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName), - framework.CoreFinalizersAssertion, + framework.CoreFinalizersAssertionWithClassyClusters, framework.KubeadmControlPlaneFinalizersAssertion, framework.ExpFinalizersAssertion, framework.DockerInfraFinalizersAssertion, ) // This check ensures that the resourceVersions are stable, i.e. it verifies there are no // continuous reconciles when everything should be stable. + By("Checking that resourceVersions are stable") framework.ValidateResourceVersionStable(ctx, proxy, namespace, clusterctlcluster.FilterClusterObjectsWithNameFilter(clusterName)) }, } diff --git a/test/framework/finalizers_helpers.go b/test/framework/finalizers_helpers.go index 568853364ca3..336023fae164 100644 --- a/test/framework/finalizers_helpers.go +++ b/test/framework/finalizers_helpers.go @@ -20,10 +20,12 @@ import ( "context" "fmt" "reflect" + "strings" "time" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "sigs.k8s.io/controller-runtime/pkg/client" @@ -37,34 +39,43 @@ import ( "sigs.k8s.io/cluster-api/util/patch" ) -// CoreFinalizersAssertion maps Cluster API core types to their expected finalizers. -var CoreFinalizersAssertion = map[string][]string{ - "Cluster": {clusterv1.ClusterFinalizer}, - "Machine": {clusterv1.MachineFinalizer}, - "MachineSet": {clusterv1.MachineSetTopologyFinalizer}, - "MachineDeployment": {clusterv1.MachineDeploymentTopologyFinalizer}, +// CoreFinalizersAssertionWithLegacyClusters maps Cluster API core types to their expected finalizers for legacy Clusters. +var CoreFinalizersAssertionWithLegacyClusters = map[string]func(types.NamespacedName) []string{ + clusterKind: func(_ types.NamespacedName) []string { return []string{clusterv1.ClusterFinalizer} }, + machineKind: func(_ types.NamespacedName) []string { return []string{clusterv1.MachineFinalizer} }, } +// CoreFinalizersAssertionWithClassyClusters maps Cluster API core types to their expected finalizers for classy Clusters. +var CoreFinalizersAssertionWithClassyClusters = func() map[string]func(types.NamespacedName) []string { + r := map[string]func(types.NamespacedName) []string{} + for k, v := range CoreFinalizersAssertionWithLegacyClusters { + r[k] = v + } + r[machineSetKind] = func(_ types.NamespacedName) []string { return []string{clusterv1.MachineSetTopologyFinalizer} } + r[machineDeploymentKind] = func(_ types.NamespacedName) []string { return []string{clusterv1.MachineDeploymentTopologyFinalizer} } + return r +}() + // ExpFinalizersAssertion maps experimental resource types to their expected finalizers. -var ExpFinalizersAssertion = map[string][]string{ - "ClusterResourceSet": {addonsv1.ClusterResourceSetFinalizer}, - "MachinePool": {expv1.MachinePoolFinalizer}, +var ExpFinalizersAssertion = map[string]func(types.NamespacedName) []string{ + clusterResourceSetKind: func(_ types.NamespacedName) []string { return []string{addonsv1.ClusterResourceSetFinalizer} }, + machinePoolKind: func(_ types.NamespacedName) []string { return []string{expv1.MachinePoolFinalizer} }, } // DockerInfraFinalizersAssertion maps docker infrastructure resource types to their expected finalizers. -var DockerInfraFinalizersAssertion = map[string][]string{ - "DockerMachine": {infrav1.MachineFinalizer}, - "DockerCluster": {infrav1.ClusterFinalizer}, - "DockerMachinePool": {infraexpv1.MachinePoolFinalizer}, +var DockerInfraFinalizersAssertion = map[string]func(types.NamespacedName) []string{ + dockerMachineKind: func(_ types.NamespacedName) []string { return []string{infrav1.MachineFinalizer} }, + dockerClusterKind: func(_ types.NamespacedName) []string { return []string{infrav1.ClusterFinalizer} }, + dockerMachinePoolKind: func(_ types.NamespacedName) []string { return []string{infraexpv1.MachinePoolFinalizer} }, } // KubeadmControlPlaneFinalizersAssertion maps Kubeadm resource types to their expected finalizers. -var KubeadmControlPlaneFinalizersAssertion = map[string][]string{ - "KubeadmControlPlane": {controlplanev1.KubeadmControlPlaneFinalizer}, +var KubeadmControlPlaneFinalizersAssertion = map[string]func(types.NamespacedName) []string{ + kubeadmControlPlaneKind: func(_ types.NamespacedName) []string { return []string{controlplanev1.KubeadmControlPlaneFinalizer} }, } // ValidateFinalizersResilience checks that expected finalizers are in place, deletes them, and verifies that expected finalizers are properly added again. -func ValidateFinalizersResilience(ctx context.Context, proxy ClusterProxy, namespace, clusterName string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction, finalizerAssertions ...map[string][]string) { +func ValidateFinalizersResilience(ctx context.Context, proxy ClusterProxy, namespace, clusterName string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction, finalizerAssertions ...map[string]func(name types.NamespacedName) []string) { clusterKey := client.ObjectKey{Namespace: namespace, Name: clusterName} allFinalizerAssertions, err := concatenateFinalizerAssertions(finalizerAssertions...) Expect(err).ToNot(HaveOccurred()) @@ -107,7 +118,7 @@ func removeFinalizers(ctx context.Context, proxy ClusterProxy, namespace string, } } -func getObjectsWithFinalizers(ctx context.Context, proxy ClusterProxy, namespace string, allFinalizerAssertions map[string][]string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction) map[string]*unstructured.Unstructured { +func getObjectsWithFinalizers(ctx context.Context, proxy ClusterProxy, namespace string, allFinalizerAssertions map[string]func(name types.NamespacedName) []string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction) map[string]*unstructured.Unstructured { graph, err := clusterctlcluster.GetOwnerGraph(ctx, namespace, proxy.GetKubeconfigPath(), ownerGraphFilterFunction) Expect(err).ToNot(HaveOccurred()) @@ -121,11 +132,15 @@ func getObjectsWithFinalizers(ctx context.Context, proxy ClusterProxy, namespace err = proxy.GetClient().Get(ctx, nodeNamespacedName, obj) Expect(err).ToNot(HaveOccurred()) + // assert if the expected finalizers are set on the resource (including also checking if there are unexpected finalizers) setFinalizers := obj.GetFinalizers() + var expectedFinalizers []string + if assertion, ok := allFinalizerAssertions[node.Object.Kind]; ok { + expectedFinalizers = assertion(types.NamespacedName{Namespace: node.Object.Namespace, Name: node.Object.Name}) + } + Expect(setFinalizers).To(Equal(expectedFinalizers), "for resource type %s", node.Object.Kind) if len(setFinalizers) > 0 { - // assert if the expected finalizers are set on the resource - Expect(setFinalizers).To(Equal(allFinalizerAssertions[node.Object.Kind]), "for resource type %s", node.Object.Kind) objsWithFinalizers[fmt.Sprintf("%s/%s/%s", node.Object.Kind, node.Object.Namespace, node.Object.Name)] = obj } } @@ -134,24 +149,27 @@ func getObjectsWithFinalizers(ctx context.Context, proxy ClusterProxy, namespace } // assertFinalizersExist ensures that current Finalizers match those in the initialObjectsWithFinalizers. -func assertFinalizersExist(ctx context.Context, proxy ClusterProxy, namespace string, initialObjsWithFinalizers map[string]*unstructured.Unstructured, allFinalizerAssertions map[string][]string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction) { +func assertFinalizersExist(ctx context.Context, proxy ClusterProxy, namespace string, initialObjsWithFinalizers map[string]*unstructured.Unstructured, allFinalizerAssertions map[string]func(name types.NamespacedName) []string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction) { Eventually(func() error { var allErrs []error finalObjsWithFinalizers := getObjectsWithFinalizers(ctx, proxy, namespace, allFinalizerAssertions, ownerGraphFilterFunction) + // Check if all the initial objects with finalizers have them back. for objKindNamespacedName, obj := range initialObjsWithFinalizers { // verify if finalizers for this resource were set on reconcile if _, valid := finalObjsWithFinalizers[objKindNamespacedName]; !valid { - allErrs = append(allErrs, fmt.Errorf("no finalizers set for %s", - objKindNamespacedName)) + allErrs = append(allErrs, fmt.Errorf("no finalizers set for %s, at the beginning of the test it has %s", + objKindNamespacedName, obj.GetFinalizers())) continue } // verify if this resource has the appropriate Finalizers set - expectedFinalizers, assert := allFinalizerAssertions[obj.GetKind()] - if !assert { - continue - } + expectedFinalizersF, assert := allFinalizerAssertions[obj.GetKind()] + // NOTE: this case should never happen because all the initialObjsWithFinalizers have been already checked + // against a finalizer assertion. + Expect(assert).To(BeTrue(), "finalizer assertions for %s are missing", objKindNamespacedName) + parts := strings.Split(objKindNamespacedName, "/") + expectedFinalizers := expectedFinalizersF(types.NamespacedName{Namespace: parts[1], Name: parts[2]}) setFinalizers := finalObjsWithFinalizers[objKindNamespacedName].GetFinalizers() if !reflect.DeepEqual(expectedFinalizers, setFinalizers) { @@ -160,21 +178,28 @@ func assertFinalizersExist(ctx context.Context, proxy ClusterProxy, namespace st } } + // Check if there are objects with finalizers not existing initially + for objKindNamespacedName, obj := range finalObjsWithFinalizers { + // verify if finalizers for this resource were set on reconcile + if _, valid := initialObjsWithFinalizers[objKindNamespacedName]; !valid { + allErrs = append(allErrs, fmt.Errorf("%s has finalizers not existing at the beginning of the test: %s", + objKindNamespacedName, obj.GetFinalizers())) + } + } + return kerrors.NewAggregate(allErrs) }).WithTimeout(1 * time.Minute).WithPolling(2 * time.Second).Should(Succeed()) } // concatenateFinalizerAssertions concatenates all finalizer assertions into one map. It reports errors if assertions already exist. -func concatenateFinalizerAssertions(finalizerAssertions ...map[string][]string) (map[string][]string, error) { +func concatenateFinalizerAssertions(finalizerAssertions ...map[string]func(name types.NamespacedName) []string) (map[string]func(name types.NamespacedName) []string, error) { var allErrs []error - allFinalizerAssertions := make(map[string][]string, 0) + allFinalizerAssertions := make(map[string]func(name types.NamespacedName) []string, 0) for i := range finalizerAssertions { for kind, finalizers := range finalizerAssertions[i] { if _, alreadyExists := allFinalizerAssertions[kind]; alreadyExists { - allErrs = append(allErrs, fmt.Errorf("finalizer assertion cannot be applied as it already exists for kind: %s, existing value: %v, new value: %v", - kind, allFinalizerAssertions[kind], finalizers)) - + allErrs = append(allErrs, fmt.Errorf("finalizer assertion cannot be applied as it already exists for kind: %s", kind)) continue } diff --git a/test/framework/ownerreference_helpers.go b/test/framework/ownerreference_helpers.go index 8f68f2d2d28f..1160c2dcf737 100644 --- a/test/framework/ownerreference_helpers.go +++ b/test/framework/ownerreference_helpers.go @@ -45,7 +45,7 @@ import ( ) // ValidateOwnerReferencesOnUpdate checks that expected owner references are updated to the correct apiVersion. -func ValidateOwnerReferencesOnUpdate(ctx context.Context, proxy ClusterProxy, namespace, clusterName string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction, assertFuncs ...map[string]func(reference []metav1.OwnerReference) error) { +func ValidateOwnerReferencesOnUpdate(ctx context.Context, proxy ClusterProxy, namespace, clusterName string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction, assertFuncs ...map[string]func(obj types.NamespacedName, reference []metav1.OwnerReference) error) { clusterKey := client.ObjectKey{Namespace: namespace, Name: clusterName} // Pause the cluster. @@ -70,7 +70,7 @@ func ValidateOwnerReferencesOnUpdate(ctx context.Context, proxy ClusterProxy, na } // ValidateOwnerReferencesResilience checks that expected owner references are in place, deletes them, and verifies that expect owner references are properly rebuilt. -func ValidateOwnerReferencesResilience(ctx context.Context, proxy ClusterProxy, namespace, clusterName string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction, assertFuncs ...map[string]func(reference []metav1.OwnerReference) error) { +func ValidateOwnerReferencesResilience(ctx context.Context, proxy ClusterProxy, namespace, clusterName string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction, assertFuncs ...map[string]func(obj types.NamespacedName, reference []metav1.OwnerReference) error) { // Check that the ownerReferences are as expected on the first iteration. AssertOwnerReferences(namespace, proxy.GetKubeconfigPath(), ownerGraphFilterFunction, assertFuncs...) @@ -101,8 +101,8 @@ func ValidateOwnerReferencesResilience(ctx context.Context, proxy ClusterProxy, AssertOwnerReferences(namespace, proxy.GetKubeconfigPath(), ownerGraphFilterFunction, assertFuncs...) } -func AssertOwnerReferences(namespace, kubeconfigPath string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction, assertFuncs ...map[string]func(reference []metav1.OwnerReference) error) { - allAssertFuncs := map[string][]func(reference []metav1.OwnerReference) error{} +func AssertOwnerReferences(namespace, kubeconfigPath string, ownerGraphFilterFunction clusterctlcluster.GetOwnerGraphFilterFunction, assertFuncs ...map[string]func(obj types.NamespacedName, reference []metav1.OwnerReference) error) { + allAssertFuncs := map[string][]func(obj types.NamespacedName, reference []metav1.OwnerReference) error{} for _, m := range assertFuncs { for k, v := range m { allAssertFuncs[k] = append(allAssertFuncs[k], v) @@ -126,7 +126,7 @@ func AssertOwnerReferences(namespace, kubeconfigPath string, ownerGraphFilterFun continue } for _, f := range allAssertFuncs[v.Object.Kind] { - if err := f(v.Owners); err != nil { + if err := f(types.NamespacedName{Namespace: v.Object.Namespace, Name: v.Object.Name}, v.Owners); err != nil { allErrs = append(allErrs, errors.Wrapf(err, "Unexpected ownerReferences for %s/%s", v.Object.Kind, v.Object.Name)) } } @@ -159,32 +159,32 @@ var ( // OwnerReferences aren't as expected. // Note: These relationships are documented in https://github.com/kubernetes-sigs/cluster-api/tree/main/docs/book/src/reference/owner_references.md. // That document should be updated if these references change. -var CoreOwnerReferenceAssertion = map[string]func([]metav1.OwnerReference) error{ - extensionConfigKind: func(owners []metav1.OwnerReference) error { +var CoreOwnerReferenceAssertion = map[string]func(types.NamespacedName, []metav1.OwnerReference) error{ + extensionConfigKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // ExtensionConfig should have no owners. return HasExactOwners(owners) }, - clusterClassKind: func(owners []metav1.OwnerReference) error { + clusterClassKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // ClusterClass doesn't have ownerReferences (it is a clusterctl move-hierarchy root). return HasExactOwners(owners) }, - clusterKind: func(owners []metav1.OwnerReference) error { + clusterKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // Cluster doesn't have ownerReferences (it is a clusterctl move-hierarchy root). return HasExactOwners(owners) }, - machineDeploymentKind: func(owners []metav1.OwnerReference) error { + machineDeploymentKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // MachineDeployments must be owned by a Cluster. return HasExactOwners(owners, clusterOwner) }, - machineSetKind: func(owners []metav1.OwnerReference) error { + machineSetKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // MachineSets must be owned and controlled by a MachineDeployment. return HasExactOwners(owners, machineDeploymentController) }, - machineKind: func(owners []metav1.OwnerReference) error { + machineKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // Machines must be owned and controlled by a MachineSet, MachinePool, or a KubeadmControlPlane, depending on if this Machine is part of a Machine Deployment, MachinePool, or ControlPlane. return HasOneOfExactOwners(owners, []metav1.OwnerReference{machineSetController}, []metav1.OwnerReference{machinePoolController}, []metav1.OwnerReference{kubeadmControlPlaneController}) }, - machineHealthCheckKind: func(owners []metav1.OwnerReference) error { + machineHealthCheckKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // MachineHealthChecks must be owned by the Cluster. return HasExactOwners(owners, clusterOwner) }, @@ -205,17 +205,17 @@ var ( // aren't as expected. // Note: These relationships are documented in https://github.com/kubernetes-sigs/cluster-api/tree/main/docs/book/src/reference/owner_references.md. // That document should be updated if these references change. -var ExpOwnerReferenceAssertions = map[string]func([]metav1.OwnerReference) error{ - clusterResourceSetKind: func(owners []metav1.OwnerReference) error { +var ExpOwnerReferenceAssertions = map[string]func(types.NamespacedName, []metav1.OwnerReference) error{ + clusterResourceSetKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // ClusterResourcesSet doesn't have ownerReferences (it is a clusterctl move-hierarchy root). return HasExactOwners(owners) }, // ClusterResourcesSetBinding has ClusterResourceSet set as owners on creation. - clusterResourceSetBindingKind: func(owners []metav1.OwnerReference) error { + clusterResourceSetBindingKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { return HasOneOfExactOwners(owners, []metav1.OwnerReference{clusterResourceSetOwner}, []metav1.OwnerReference{clusterResourceSetOwner, clusterResourceSetOwner}) }, // MachinePool must be owned by a Cluster. - machinePoolKind: func(owners []metav1.OwnerReference) error { + machinePoolKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // MachinePools must be owned by a Cluster. return HasExactOwners(owners, clusterOwner) }, @@ -230,12 +230,12 @@ var ( // aren't as expected. // Note: These relationships are documented in https://github.com/kubernetes-sigs/cluster-api/tree/main/docs/book/src/reference/owner_references.md. // That document should be updated if these references change. -var KubernetesReferenceAssertions = map[string]func([]metav1.OwnerReference) error{ - secretKind: func(owners []metav1.OwnerReference) error { +var KubernetesReferenceAssertions = map[string]func(types.NamespacedName, []metav1.OwnerReference) error{ + secretKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // Secrets for cluster certificates must be owned and controlled by the KubeadmControlPlane. The bootstrap secret should be owned and controlled by a KubeadmControlPlane. return HasOneOfExactOwners(owners, []metav1.OwnerReference{kubeadmControlPlaneController}, []metav1.OwnerReference{kubeadmConfigController}) }, - configMapKind: func(owners []metav1.OwnerReference) error { + configMapKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // The only configMaps considered here are those owned by a ClusterResourceSet. return HasExactOwners(owners, clusterResourceSetOwner) }, @@ -255,12 +255,12 @@ var ( // OwnerReferences aren't as expected. // Note: These relationships are documented in https://github.com/kubernetes-sigs/cluster-api/tree/main/docs/book/src/reference/owner_references.md. // That document should be updated if these references change. -var KubeadmControlPlaneOwnerReferenceAssertions = map[string]func([]metav1.OwnerReference) error{ - kubeadmControlPlaneKind: func(owners []metav1.OwnerReference) error { +var KubeadmControlPlaneOwnerReferenceAssertions = map[string]func(types.NamespacedName, []metav1.OwnerReference) error{ + kubeadmControlPlaneKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // The KubeadmControlPlane must be owned and controlled by a Cluster. return HasExactOwners(owners, clusterController) }, - kubeadmControlPlaneTemplateKind: func(owners []metav1.OwnerReference) error { + kubeadmControlPlaneTemplateKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // The KubeadmControlPlaneTemplate must be owned by a ClusterClass. return HasExactOwners(owners, clusterClassOwner) }, @@ -279,12 +279,12 @@ var ( // aren't as expected. // Note: These relationships are documented in https://github.com/kubernetes-sigs/cluster-api/tree/main/docs/book/src/reference/owner_references.md. // That document should be updated if these references change. -var KubeadmBootstrapOwnerReferenceAssertions = map[string]func([]metav1.OwnerReference) error{ - kubeadmConfigKind: func(owners []metav1.OwnerReference) error { +var KubeadmBootstrapOwnerReferenceAssertions = map[string]func(types.NamespacedName, []metav1.OwnerReference) error{ + kubeadmConfigKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // The KubeadmConfig must be owned and controlled by a Machine or MachinePool. return HasOneOfExactOwners(owners, []metav1.OwnerReference{machineController}, []metav1.OwnerReference{machinePoolController, clusterOwner}) }, - kubeadmConfigTemplateKind: func(owners []metav1.OwnerReference) error { + kubeadmConfigTemplateKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // The KubeadmConfigTemplate must be owned by a ClusterClass. return HasOneOfExactOwners(owners, []metav1.OwnerReference{clusterOwner}, []metav1.OwnerReference{clusterClassOwner}) }, @@ -306,29 +306,29 @@ var ( // OwnerReferences aren't as expected. // Note: These relationships are documented in https://github.com/kubernetes-sigs/cluster-api/tree/main/docs/book/src/reference/owner_references.md. // That document should be updated if these references change. -var DockerInfraOwnerReferenceAssertions = map[string]func([]metav1.OwnerReference) error{ - dockerMachineKind: func(owners []metav1.OwnerReference) error { +var DockerInfraOwnerReferenceAssertions = map[string]func(types.NamespacedName, []metav1.OwnerReference) error{ + dockerMachineKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // The DockerMachine must be owned and controlled by a Machine or a DockerMachinePool. return HasOneOfExactOwners(owners, []metav1.OwnerReference{machineController}, []metav1.OwnerReference{machineController, dockerMachinePoolController}) }, - dockerMachineTemplateKind: func(owners []metav1.OwnerReference) error { + dockerMachineTemplateKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // Base DockerMachineTemplates referenced in a ClusterClass must be owned by the ClusterClass. // DockerMachineTemplates created for specific Clusters in the Topology controller must be owned by a Cluster. return HasOneOfExactOwners(owners, []metav1.OwnerReference{clusterOwner}, []metav1.OwnerReference{clusterClassOwner}) }, - dockerClusterKind: func(owners []metav1.OwnerReference) error { + dockerClusterKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // DockerCluster must be owned and controlled by a Cluster. return HasExactOwners(owners, clusterController) }, - dockerClusterTemplateKind: func(owners []metav1.OwnerReference) error { + dockerClusterTemplateKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // DockerClusterTemplate must be owned by a ClusterClass. return HasExactOwners(owners, clusterClassOwner) }, - dockerMachinePoolKind: func(owners []metav1.OwnerReference) error { + dockerMachinePoolKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // DockerMachinePool must be owned and controlled by a MachinePool. return HasExactOwners(owners, machinePoolController, clusterOwner) }, - dockerMachinePoolTemplateKind: func(owners []metav1.OwnerReference) error { + dockerMachinePoolTemplateKind: func(_ types.NamespacedName, owners []metav1.OwnerReference) error { // DockerMachinePoolTemplate must be owned by a ClusterClass. return HasExactOwners(owners, clusterClassOwner) },