diff --git a/pkg/cache/cluster.go b/pkg/cache/cluster.go index 96f9ebe72..d8f0a5ddd 100644 --- a/pkg/cache/cluster.go +++ b/pkg/cache/cluster.go @@ -120,6 +120,9 @@ type ClusterCache interface { // IterateHierarchy iterates resource tree starting from the specified top level resource and executes callback for each resource in the tree. // The action callback returns true if iteration should continue and false otherwise. IterateHierarchy(key kube.ResourceKey, action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) + // IterateHierarchyV2 iterates resource tree starting from the specified top level resources and executes callback for each resource in the tree. + // The action callback returns true if iteration should continue and false otherwise. + IterateHierarchyV2(keys []kube.ResourceKey, action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) // IsNamespaced answers if specified group/kind is a namespaced resource API or not IsNamespaced(gk schema.GroupKind) (bool, error) // GetManagedLiveObjs helps finding matching live K8S resources for a given resources list. @@ -1004,6 +1007,107 @@ func (c *clusterCache) IterateHierarchy(key kube.ResourceKey, action func(resour } } +// IterateHierarchy iterates resource tree starting from the specified top level resources and executes callback for each resource in the tree +func (c *clusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(resource *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) { + c.lock.RLock() + defer c.lock.RUnlock() + keysPerNamespace := make(map[string][]kube.ResourceKey) + for _, key := range keys { + _, ok := c.resources[key] + if !ok { + continue + } + keysPerNamespace[key.Namespace] = append(keysPerNamespace[key.Namespace], key) + } + for namespace, namespaceKeys := range keysPerNamespace { + nsNodes := c.nsIndex[namespace] + graph := buildGraph(nsNodes) + visited := make(map[kube.ResourceKey]int) + for _, key := range namespaceKeys { + visited[key] = 0 + } + for _, key := range namespaceKeys { + // The check for existence of key is done above. + res := c.resources[key] + if visited[key] == 2 || !action(res, nsNodes) { + continue + } + visited[key] = 1 + if _, ok := graph[key]; ok { + for _, child := range graph[key] { + if visited[child.ResourceKey()] == 0 && action(child, nsNodes) { + child.iterateChildrenV2(graph, nsNodes, visited, func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool { + if err != nil { + c.log.V(2).Info(err.Error()) + return false + } + return action(child, namespaceResources) + }) + } + } + } + visited[key] = 2 + } + } +} + +func buildGraph(nsNodes map[kube.ResourceKey]*Resource) map[kube.ResourceKey]map[types.UID]*Resource { + // Prepare to construct a graph + nodesByUID := make(map[types.UID][]*Resource, len(nsNodes)) + for _, node := range nsNodes { + nodesByUID[node.Ref.UID] = append(nodesByUID[node.Ref.UID], node) + } + + // In graph, they key is the parent and the value is a list of children. + graph := make(map[kube.ResourceKey]map[types.UID]*Resource) + + // Loop through all nodes, calling each one "childNode," because we're only bothering with it if it has a parent. + for _, childNode := range nsNodes { + for i, ownerRef := range childNode.OwnerRefs { + // First, backfill UID of inferred owner child references. + if ownerRef.UID == "" { + group, err := schema.ParseGroupVersion(ownerRef.APIVersion) + if err != nil { + // APIVersion is invalid, so we couldn't find the parent. + continue + } + graphKeyNode, ok := nsNodes[kube.ResourceKey{Group: group.Group, Kind: ownerRef.Kind, Namespace: childNode.Ref.Namespace, Name: ownerRef.Name}] + if ok { + ownerRef.UID = graphKeyNode.Ref.UID + childNode.OwnerRefs[i] = ownerRef + } else { + // No resource found with the given graph key, so move on. + continue + } + } + + // Now that we have the UID of the parent, update the graph. + uidNodes, ok := nodesByUID[ownerRef.UID] + if ok { + for _, uidNode := range uidNodes { + // Update the graph for this owner to include the child. + if _, ok := graph[uidNode.ResourceKey()]; !ok { + graph[uidNode.ResourceKey()] = make(map[types.UID]*Resource) + } + r, ok := graph[uidNode.ResourceKey()][childNode.Ref.UID] + if !ok { + graph[uidNode.ResourceKey()][childNode.Ref.UID] = childNode + } else if r != nil { + // The object might have multiple children with the same UID (e.g. replicaset from apps and extensions group). + // It is ok to pick any object, but we need to make sure we pick the same child after every refresh. + key1 := r.ResourceKey() + key2 := childNode.ResourceKey() + if strings.Compare(key1.String(), key2.String()) > 0 { + graph[uidNode.ResourceKey()][childNode.Ref.UID] = childNode + } + } + } + } + } + } + return graph +} + // IsNamespaced answers if specified group/kind is a namespaced resource API or not func (c *clusterCache) IsNamespaced(gk schema.GroupKind) (bool, error) { if isNamespaced, ok := c.namespacedResources[gk]; ok { diff --git a/pkg/cache/cluster_test.go b/pkg/cache/cluster_test.go index f5e61a065..26815c07e 100644 --- a/pkg/cache/cluster_test.go +++ b/pkg/cache/cluster_test.go @@ -12,6 +12,7 @@ import ( "golang.org/x/sync/semaphore" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" @@ -73,7 +74,7 @@ var ( - hostname: localhost`, testCreationTime.UTC().Format(time.RFC3339))) ) -func newCluster(t *testing.T, objs ...runtime.Object) *clusterCache { +func newCluster(t testing.TB, objs ...runtime.Object) *clusterCache { cache := newClusterWithOptions(t, []UpdateSettingsFunc{}, objs...) t.Cleanup(func() { @@ -83,7 +84,7 @@ func newCluster(t *testing.T, objs ...runtime.Object) *clusterCache { return cache } -func newClusterWithOptions(t *testing.T, opts []UpdateSettingsFunc, objs ...runtime.Object) *clusterCache { +func newClusterWithOptions(t testing.TB, opts []UpdateSettingsFunc, objs ...runtime.Object) *clusterCache { client := fake.NewSimpleDynamicClient(scheme.Scheme, objs...) reactor := client.ReactionChain[0] client.PrependReactor("list", "*", func(action testcore.Action) (handled bool, ret runtime.Object, err error) { @@ -112,6 +113,10 @@ func newClusterWithOptions(t *testing.T, opts []UpdateSettingsFunc, objs ...runt GroupKind: schema.GroupKind{Group: "apps", Kind: "StatefulSet"}, GroupVersionResource: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "statefulsets"}, Meta: metav1.APIResource{Namespaced: true}, + }, { + GroupKind: schema.GroupKind{Group: "extensions", Kind: "ReplicaSet"}, + GroupVersionResource: schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "replicasets"}, + Meta: metav1.APIResource{Namespaced: true}, }} opts = append([]UpdateSettingsFunc{ @@ -289,7 +294,7 @@ func TestEnsureSyncedSingleNamespace(t *testing.T) { } func TestGetChildren(t *testing.T) { - cluster := newCluster(t, testPod(), testRS(), testDeploy()) + cluster := newCluster(t, testPod1(), testRS(), testDeploy()) err := cluster.EnsureSynced() require.NoError(t, err) @@ -298,7 +303,7 @@ func TestGetChildren(t *testing.T) { Ref: corev1.ObjectReference{ Kind: "Pod", Namespace: "default", - Name: "helm-guestbook-pod", + Name: "helm-guestbook-pod-1", APIVersion: "v1", UID: "1", }, @@ -332,7 +337,7 @@ func TestGetChildren(t *testing.T) { } func TestGetManagedLiveObjs(t *testing.T) { - cluster := newCluster(t, testPod(), testRS(), testDeploy()) + cluster := newCluster(t, testPod1(), testRS(), testDeploy()) cluster.Invalidate(SetPopulateResourceInfoHandler(func(un *unstructured.Unstructured, isRoot bool) (info interface{}, cacheManifest bool) { return nil, true })) @@ -358,7 +363,7 @@ metadata: } func TestGetManagedLiveObjsNamespacedModeClusterLevelResource(t *testing.T) { - cluster := newCluster(t, testPod(), testRS(), testDeploy()) + cluster := newCluster(t, testPod1(), testRS(), testDeploy()) cluster.Invalidate(SetPopulateResourceInfoHandler(func(un *unstructured.Unstructured, isRoot bool) (info interface{}, cacheManifest bool) { return nil, true })) @@ -383,7 +388,7 @@ metadata: } func TestGetManagedLiveObjsNamespacedModeClusterLevelResource_ClusterResourceEnabled(t *testing.T) { - cluster := newCluster(t, testPod(), testRS(), testDeploy()) + cluster := newCluster(t, testPod1(), testRS(), testDeploy()) cluster.Invalidate(SetPopulateResourceInfoHandler(func(un *unstructured.Unstructured, isRoot bool) (info interface{}, cacheManifest bool) { return nil, true })) @@ -424,7 +429,7 @@ metadata: } func TestGetManagedLiveObjsAllNamespaces(t *testing.T) { - cluster := newCluster(t, testPod(), testRS(), testDeploy()) + cluster := newCluster(t, testPod1(), testRS(), testDeploy()) cluster.Invalidate(SetPopulateResourceInfoHandler(func(un *unstructured.Unstructured, isRoot bool) (info interface{}, cacheManifest bool) { return nil, true })) @@ -452,7 +457,7 @@ metadata: } func TestGetManagedLiveObjsValidNamespace(t *testing.T) { - cluster := newCluster(t, testPod(), testRS(), testDeploy()) + cluster := newCluster(t, testPod1(), testRS(), testDeploy()) cluster.Invalidate(SetPopulateResourceInfoHandler(func(un *unstructured.Unstructured, isRoot bool) (info interface{}, cacheManifest bool) { return nil, true })) @@ -480,7 +485,7 @@ metadata: } func TestGetManagedLiveObjsInvalidNamespace(t *testing.T) { - cluster := newCluster(t, testPod(), testRS(), testDeploy()) + cluster := newCluster(t, testPod1(), testRS(), testDeploy()) cluster.Invalidate(SetPopulateResourceInfoHandler(func(un *unstructured.Unstructured, isRoot bool) (info interface{}, cacheManifest bool) { return nil, true })) @@ -587,26 +592,26 @@ metadata: } func TestChildDeletedEvent(t *testing.T) { - cluster := newCluster(t, testPod(), testRS(), testDeploy()) + cluster := newCluster(t, testPod1(), testRS(), testDeploy()) err := cluster.EnsureSynced() require.NoError(t, err) - cluster.processEvent(watch.Deleted, mustToUnstructured(testPod())) + cluster.processEvent(watch.Deleted, mustToUnstructured(testPod1())) rsChildren := getChildren(cluster, mustToUnstructured(testRS())) assert.Equal(t, []*Resource{}, rsChildren) } func TestProcessNewChildEvent(t *testing.T) { - cluster := newCluster(t, testPod(), testRS(), testDeploy()) + cluster := newCluster(t, testPod1(), testRS(), testDeploy()) err := cluster.EnsureSynced() require.NoError(t, err) newPod := strToUnstructured(` apiVersion: v1 kind: Pod metadata: - uid: "4" - name: helm-guestbook-pod2 + uid: "5" + name: helm-guestbook-pod-1-new namespace: default ownerReferences: - apiVersion: apps/v1 @@ -625,7 +630,7 @@ func TestProcessNewChildEvent(t *testing.T) { Ref: corev1.ObjectReference{ Kind: "Pod", Namespace: "default", - Name: "helm-guestbook-pod", + Name: "helm-guestbook-pod-1", APIVersion: "v1", UID: "1", }, @@ -643,9 +648,9 @@ func TestProcessNewChildEvent(t *testing.T) { Ref: corev1.ObjectReference{ Kind: "Pod", Namespace: "default", - Name: "helm-guestbook-pod2", + Name: "helm-guestbook-pod-1-new", APIVersion: "v1", - UID: "4", + UID: "5", }, OwnerRefs: []metav1.OwnerReference{{ APIVersion: "apps/v1", @@ -658,10 +663,10 @@ func TestProcessNewChildEvent(t *testing.T) { } func TestWatchCacheUpdated(t *testing.T) { - removed := testPod() + removed := testPod1() removed.SetName(removed.GetName() + "-removed-pod") - updated := testPod() + updated := testPod1() updated.SetName(updated.GetName() + "-updated-pod") updated.SetResourceVersion("updated-pod-version") @@ -670,10 +675,10 @@ func TestWatchCacheUpdated(t *testing.T) { require.NoError(t, err) - added := testPod() + added := testPod1() added.SetName(added.GetName() + "-new-pod") - podGroupKind := testPod().GroupVersionKind().GroupKind() + podGroupKind := testPod1().GroupVersionKind().GroupKind() cluster.lock.Lock() defer cluster.lock.Unlock() @@ -684,13 +689,13 @@ func TestWatchCacheUpdated(t *testing.T) { } func TestNamespaceModeReplace(t *testing.T) { - ns1Pod := testPod() + ns1Pod := testPod1() ns1Pod.SetNamespace("ns1") ns1Pod.SetName("pod1") - ns2Pod := testPod() + ns2Pod := testPod1() ns2Pod.SetNamespace("ns2") - podGroupKind := testPod().GroupVersionKind().GroupKind() + podGroupKind := testPod1().GroupVersionKind().GroupKind() cluster := newCluster(t, ns1Pod, ns2Pod) err := cluster.EnsureSynced() @@ -805,14 +810,14 @@ func getResourceKey(t *testing.T, obj runtime.Object) kube.ResourceKey { return kube.NewResourceKey(gvk.Group, gvk.Kind, m.GetNamespace(), m.GetName()) } -func testPod() *corev1.Pod { +func testPod1() *corev1.Pod { return &corev1.Pod{ TypeMeta: metav1.TypeMeta{ APIVersion: "v1", Kind: "Pod", }, ObjectMeta: metav1.ObjectMeta{ - Name: "helm-guestbook-pod", + Name: "helm-guestbook-pod-1", Namespace: "default", UID: "1", ResourceVersion: "123", @@ -829,6 +834,30 @@ func testPod() *corev1.Pod { } } +// Similar to pod1, but owner reference lacks uid +func testPod2() *corev1.Pod { + return &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "helm-guestbook-pod-2", + Namespace: "default", + UID: "4", + ResourceVersion: "123", + CreationTimestamp: metav1.NewTime(testCreationTime), + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "ReplicaSet", + Name: "helm-guestbook-rs", + }, + }, + }, + } +} + func testCRD() *apiextensions.CustomResourceDefinition { return &apiextensions.CustomResourceDefinition{ TypeMeta: metav1.TypeMeta{ @@ -958,7 +987,7 @@ func testDeploy() *appsv1.Deployment { } func TestIterateHierachy(t *testing.T) { - cluster := newCluster(t, testPod(), testRS(), testDeploy()) + cluster := newCluster(t, testPod1(), testPod2(), testRS(), testExtensionsRS(), testDeploy()) err := cluster.EnsureSynced() require.NoError(t, err) @@ -971,7 +1000,8 @@ func TestIterateHierachy(t *testing.T) { assert.ElementsMatch(t, []kube.ResourceKey{ - kube.GetResourceKey(mustToUnstructured(testPod())), + kube.GetResourceKey(mustToUnstructured(testPod1())), + kube.GetResourceKey(mustToUnstructured(testPod2())), kube.GetResourceKey(mustToUnstructured(testRS())), kube.GetResourceKey(mustToUnstructured(testDeploy()))}, keys) @@ -1016,10 +1046,135 @@ func TestIterateHierachy(t *testing.T) { []kube.ResourceKey{ kube.GetResourceKey(mustToUnstructured(testDeploy())), kube.GetResourceKey(mustToUnstructured(testRS())), - kube.GetResourceKey(mustToUnstructured(testPod())), + kube.GetResourceKey(mustToUnstructured(testPod1())), + kube.GetResourceKey(mustToUnstructured(testPod2())), }, keys) }) + + // After uid is backfilled for owner of pod2, it should appear in results here as well. + t.Run("IterateStartFromExtensionsRS", func(t *testing.T) { + keys := []kube.ResourceKey{} + cluster.IterateHierarchy(kube.GetResourceKey(mustToUnstructured(testExtensionsRS())), func(child *Resource, _ map[kube.ResourceKey]*Resource) bool { + keys = append(keys, child.ResourceKey()) + return true + }) + + assert.ElementsMatch(t, + []kube.ResourceKey{ + kube.GetResourceKey(mustToUnstructured(testPod1())), + kube.GetResourceKey(mustToUnstructured(testPod2())), + kube.GetResourceKey(mustToUnstructured(testExtensionsRS()))}, + keys) + }) +} + +func TestIterateHierachyV2(t *testing.T) { + cluster := newCluster(t, testPod1(), testPod2(), testRS(), testExtensionsRS(), testDeploy()) + err := cluster.EnsureSynced() + require.NoError(t, err) + + t.Run("IterateAll", func(t *testing.T) { + startKeys := []kube.ResourceKey{kube.GetResourceKey(mustToUnstructured(testDeploy()))} + keys := []kube.ResourceKey{} + cluster.IterateHierarchyV2(startKeys, func(child *Resource, _ map[kube.ResourceKey]*Resource) bool { + keys = append(keys, child.ResourceKey()) + return true + }) + + assert.ElementsMatch(t, + []kube.ResourceKey{ + kube.GetResourceKey(mustToUnstructured(testPod1())), + kube.GetResourceKey(mustToUnstructured(testPod2())), + kube.GetResourceKey(mustToUnstructured(testRS())), + kube.GetResourceKey(mustToUnstructured(testDeploy()))}, + keys) + }) + + t.Run("ExitAtRoot", func(t *testing.T) { + startKeys := []kube.ResourceKey{kube.GetResourceKey(mustToUnstructured(testDeploy()))} + keys := []kube.ResourceKey{} + cluster.IterateHierarchyV2(startKeys, func(child *Resource, _ map[kube.ResourceKey]*Resource) bool { + keys = append(keys, child.ResourceKey()) + return false + }) + + assert.ElementsMatch(t, + []kube.ResourceKey{ + kube.GetResourceKey(mustToUnstructured(testDeploy()))}, + keys) + }) + + t.Run("ExitAtSecondLevelChild", func(t *testing.T) { + startKeys := []kube.ResourceKey{kube.GetResourceKey(mustToUnstructured(testDeploy()))} + keys := []kube.ResourceKey{} + cluster.IterateHierarchyV2(startKeys, func(child *Resource, _ map[kube.ResourceKey]*Resource) bool { + keys = append(keys, child.ResourceKey()) + return child.ResourceKey().Kind != kube.ReplicaSetKind + }) + + assert.ElementsMatch(t, + []kube.ResourceKey{ + kube.GetResourceKey(mustToUnstructured(testDeploy())), + kube.GetResourceKey(mustToUnstructured(testRS())), + }, + keys) + }) + + t.Run("ExitAtThirdLevelChild", func(t *testing.T) { + startKeys := []kube.ResourceKey{kube.GetResourceKey(mustToUnstructured(testDeploy()))} + keys := []kube.ResourceKey{} + cluster.IterateHierarchyV2(startKeys, func(child *Resource, _ map[kube.ResourceKey]*Resource) bool { + keys = append(keys, child.ResourceKey()) + return child.ResourceKey().Kind != kube.PodKind + }) + + assert.ElementsMatch(t, + []kube.ResourceKey{ + kube.GetResourceKey(mustToUnstructured(testDeploy())), + kube.GetResourceKey(mustToUnstructured(testRS())), + kube.GetResourceKey(mustToUnstructured(testPod1())), + kube.GetResourceKey(mustToUnstructured(testPod2())), + }, + keys) + }) + + t.Run("IterateAllStartFromMultiple", func(t *testing.T) { + startKeys := []kube.ResourceKey{ + kube.GetResourceKey(mustToUnstructured(testRS())), + kube.GetResourceKey(mustToUnstructured(testDeploy())), + } + keys := []kube.ResourceKey{} + cluster.IterateHierarchyV2(startKeys, func(child *Resource, _ map[kube.ResourceKey]*Resource) bool { + keys = append(keys, child.ResourceKey()) + return true + }) + + assert.ElementsMatch(t, + []kube.ResourceKey{ + kube.GetResourceKey(mustToUnstructured(testPod1())), + kube.GetResourceKey(mustToUnstructured(testPod2())), + kube.GetResourceKey(mustToUnstructured(testRS())), + kube.GetResourceKey(mustToUnstructured(testDeploy()))}, + keys) + }) + + // After uid is backfilled for owner of pod2, it should appear in results here as well. + t.Run("IterateStartFromExtensionsRS", func(t *testing.T) { + startKeys := []kube.ResourceKey{kube.GetResourceKey(mustToUnstructured(testExtensionsRS()))} + keys := []kube.ResourceKey{} + cluster.IterateHierarchyV2(startKeys, func(child *Resource, _ map[kube.ResourceKey]*Resource) bool { + keys = append(keys, child.ResourceKey()) + return true + }) + + assert.ElementsMatch(t, + []kube.ResourceKey{ + kube.GetResourceKey(mustToUnstructured(testPod1())), + kube.GetResourceKey(mustToUnstructured(testPod2())), + kube.GetResourceKey(mustToUnstructured(testExtensionsRS()))}, + keys) + }) } // Test_watchEvents_Deadlock validates that starting watches will not create a deadlock @@ -1031,7 +1186,7 @@ func Test_watchEvents_Deadlock(t *testing.T) { deadlock := sync.RWMutex{} hasDeadlock := false - res1 := testPod() + res1 := testPod1() res2 := testRS() cluster := newClusterWithOptions(t, []UpdateSettingsFunc{ @@ -1096,3 +1251,79 @@ func Test_watchEvents_Deadlock(t *testing.T) { } } } + +func buildTestResourceMap() map[kube.ResourceKey]*Resource { + ns := make(map[kube.ResourceKey]*Resource) + for i := 0; i < 100000; i++ { + name := fmt.Sprintf("test-%d", i) + ownerName := fmt.Sprintf("test-%d", i/10) + uid := uuid.New().String() + key := kube.ResourceKey{ + Namespace: "default", + Name: name, + Kind: "Pod", + } + resourceYaml := fmt.Sprintf(` +apiVersion: v1 +kind: Pod +metadata: + namespace: default + name: %s + uid: %s`, name, uid) + if i/10 != 0 { + owner := ns[kube.ResourceKey{ + Namespace: "default", + Name: ownerName, + Kind: "Pod", + }] + ownerUid := owner.Ref.UID + resourceYaml += fmt.Sprintf(` + ownerReferences: + - apiVersion: v1 + kind: Pod + name: %s + uid: %s`, ownerName, ownerUid) + } + ns[key] = cacheTest.newResource(strToUnstructured(resourceYaml)) + } + return ns +} + +func BenchmarkBuildGraph(b *testing.B) { + testResources := buildTestResourceMap() + b.ResetTimer() + for n := 0; n < b.N; n++ { + buildGraph(testResources) + } +} + +func BenchmarkIterateHierarchyV2(b *testing.B) { + cluster := newCluster(b) + testResources := buildTestResourceMap() + for _, resource := range testResources { + cluster.setNode(resource) + } + b.ResetTimer() + for n := 0; n < b.N; n++ { + cluster.IterateHierarchyV2([]kube.ResourceKey{ + {Namespace: "default", Name: "test-1", Kind: "Pod"}, + }, func(child *Resource, _ map[kube.ResourceKey]*Resource) bool { + return true + }) + } +} + +//func BenchmarkIterateHierarchy(b *testing.B) { +// cluster := newCluster(b) +// for _, resource := range testResources { +// cluster.setNode(resource) +// } +// b.ResetTimer() +// for n := 0; n < b.N; n++ { +// cluster.IterateHierarchy(kube.ResourceKey{ +// Namespace: "default", Name: "test-1", Kind: "Pod", +// }, func(child *Resource, _ map[kube.ResourceKey]*Resource) bool { +// return true +// }) +// } +//} diff --git a/pkg/cache/mocks/ClusterCache.go b/pkg/cache/mocks/ClusterCache.go index 7a1be7324..c9fbc8f9c 100644 --- a/pkg/cache/mocks/ClusterCache.go +++ b/pkg/cache/mocks/ClusterCache.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.2. DO NOT EDIT. +// Code generated by mockery v2.43.2. DO NOT EDIT. package mocks @@ -237,6 +237,11 @@ func (_m *ClusterCache) IterateHierarchy(key kube.ResourceKey, action func(*cach _m.Called(key, action) } +// IterateHierarchyV2 provides a mock function with given fields: keys, action +func (_m *ClusterCache) IterateHierarchyV2(keys []kube.ResourceKey, action func(*cache.Resource, map[kube.ResourceKey]*cache.Resource) bool) { + _m.Called(keys, action) +} + // OnEvent provides a mock function with given fields: handler func (_m *ClusterCache) OnEvent(handler cache.OnEventHandler) cache.Unsubscribe { ret := _m.Called(handler) diff --git a/pkg/cache/resource.go b/pkg/cache/resource.go index 4097f4dca..eae3d4e6e 100644 --- a/pkg/cache/resource.go +++ b/pkg/cache/resource.go @@ -2,6 +2,7 @@ package cache import ( "fmt" + "k8s.io/apimachinery/pkg/types" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -99,3 +100,33 @@ func (r *Resource) iterateChildren(ns map[kube.ResourceKey]*Resource, parents ma } } } + +// iterateChildrenV2 is a depth-first traversal of the graph of resources starting from the current resource. +func (r *Resource) iterateChildrenV2(graph map[kube.ResourceKey]map[types.UID]*Resource, ns map[kube.ResourceKey]*Resource, visited map[kube.ResourceKey]int, action func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource) bool) { + key := r.ResourceKey() + if visited[key] == 2 { + return + } + // this indicates that we've started processing this node's children + visited[key] = 1 + defer func() { + // this indicates that we've finished processing this node's children + visited[key] = 2 + }() + children, ok := graph[key] + if !ok || children == nil { + return + } + for _, c := range children { + childKey := c.ResourceKey() + child := ns[childKey] + if visited[childKey] == 1 { + // Since we encountered a node that we're currently processing, we know we have a circular dependency. + _ = action(fmt.Errorf("circular dependency detected. %s is child and parent of %s", childKey.String(), key.String()), child, ns) + } else if visited[childKey] == 0 { + if action(nil, child, ns) { + child.iterateChildrenV2(graph, ns, visited, action) + } + } + } +} diff --git a/pkg/cache/resource_test.go b/pkg/cache/resource_test.go index 45e597341..a3b06a6cc 100644 --- a/pkg/cache/resource_test.go +++ b/pkg/cache/resource_test.go @@ -10,7 +10,7 @@ import ( var cacheTest = NewClusterCache(&rest.Config{}) func TestIsParentOf(t *testing.T) { - child := cacheTest.newResource(mustToUnstructured(testPod())) + child := cacheTest.newResource(mustToUnstructured(testPod1())) parent := cacheTest.newResource(mustToUnstructured(testRS())) grandParent := cacheTest.newResource(mustToUnstructured(testDeploy())) @@ -22,7 +22,7 @@ func TestIsParentOfSameKindDifferentGroupAndUID(t *testing.T) { rs := testRS() rs.APIVersion = "somecrd.io/v1" rs.SetUID("123") - child := cacheTest.newResource(mustToUnstructured(testPod())) + child := cacheTest.newResource(mustToUnstructured(testPod1())) invalidParent := cacheTest.newResource(mustToUnstructured(rs)) assert.False(t, invalidParent.isParentOf(child))