From 3232abcf390f5364d4ebfc4ce7c2b5fac0281686 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20B=C3=BCringer?= <4662360+sbueringer@users.noreply.github.com> Date: Mon, 2 Sep 2024 11:04:57 +0200 Subject: [PATCH] Refactor node drain (#11074) --- controllers/alias.go | 13 +- go.mod | 2 +- internal/controllers/machine/drain/cache.go | 97 ++ internal/controllers/machine/drain/drain.go | 380 ++++++ .../controllers/machine/drain/drain_test.go | 1058 +++++++++++++++++ internal/controllers/machine/drain/filters.go | 232 ++++ .../controllers/machine/drain/filters_test.go | 71 ++ .../controllers/machine/machine_controller.go | 165 +-- .../machine/machine_controller_test.go | 399 +++++++ main.go | 34 +- test/go.mod | 19 - test/go.sum | 97 -- 12 files changed, 2350 insertions(+), 217 deletions(-) create mode 100644 internal/controllers/machine/drain/cache.go create mode 100644 internal/controllers/machine/drain/drain.go create mode 100644 internal/controllers/machine/drain/drain_test.go create mode 100644 internal/controllers/machine/drain/filters.go create mode 100644 internal/controllers/machine/drain/filters_test.go diff --git a/controllers/alias.go b/controllers/alias.go index 322d9b22a56e..76e300827c3c 100644 --- a/controllers/alias.go +++ b/controllers/alias.go @@ -18,7 +18,6 @@ package controllers import ( "context" - "time" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -65,18 +64,14 @@ type MachineReconciler struct { // WatchFilterValue is the label value used to filter events prior to reconciliation. WatchFilterValue string - - // NodeDrainClientTimeout timeout of the client used for draining nodes. - NodeDrainClientTimeout time.Duration } func (r *MachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { return (&machinecontroller.Reconciler{ - Client: r.Client, - APIReader: r.APIReader, - Tracker: r.Tracker, - WatchFilterValue: r.WatchFilterValue, - NodeDrainClientTimeout: r.NodeDrainClientTimeout, + Client: r.Client, + APIReader: r.APIReader, + Tracker: r.Tracker, + WatchFilterValue: r.WatchFilterValue, }).SetupWithManager(ctx, mgr, options) } diff --git a/go.mod b/go.mod index 8dc5f43a40d2..17b3ad1352db 100644 --- a/go.mod +++ b/go.mod @@ -33,6 +33,7 @@ require ( github.com/valyala/fastjson v1.6.4 go.etcd.io/etcd/api/v3 v3.5.15 go.etcd.io/etcd/client/v3 v3.5.15 + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 golang.org/x/oauth2 v0.22.0 golang.org/x/text v0.17.0 gomodules.xyz/jsonpatch/v2 v2.4.0 @@ -156,7 +157,6 @@ require ( go.uber.org/zap v1.27.0 // indirect go4.org v0.0.0-20201209231011-d4a079459e60 // indirect golang.org/x/crypto v0.26.0 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/net v0.28.0 // indirect golang.org/x/sync v0.8.0 // indirect golang.org/x/sys v0.23.0 // indirect diff --git a/internal/controllers/machine/drain/cache.go b/internal/controllers/machine/drain/cache.go new file mode 100644 index 000000000000..6bb06ec5c651 --- /dev/null +++ b/internal/controllers/machine/drain/cache.go @@ -0,0 +1,97 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package drain + +import ( + "time" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" +) + +const ( + // ttl is the duration for which we keep entries in the cache. + ttl = 10 * time.Minute + + // expirationInterval is the interval in which we will remove expired entries + // from the cache. + expirationInterval = 10 * time.Hour +) + +// CacheEntry is an entry of the drain cache. It stores at which time a Machine was drained the last time. +type CacheEntry struct { + Machine types.NamespacedName + LastDrain time.Time +} + +// Cache caches the time when the last drain was done for a Machine. +// Specifically we only use it to ensure we only retry drains +// at a specific interval and not more often. +type Cache interface { + // Add adds the given entry to the Cache. + // Note: entries expire after the ttl. + Add(entry CacheEntry) + + // Has checks if the given key (still) exists in the Cache. + // Note: entries expire after the ttl. + Has(machineName types.NamespacedName) (CacheEntry, bool) +} + +// NewCache creates a new cache. +func NewCache() Cache { + r := &drainCache{ + Store: cache.NewTTLStore(func(obj interface{}) (string, error) { + // We only add CacheEntries to the cache, so it's safe to cast to CacheEntry. + return obj.(CacheEntry).Machine.String(), nil + }, ttl), + } + go func() { + for { + // Call list to clear the cache of expired items. + // We have to do this periodically as the cache itself only expires + // items lazily. If we don't do this the cache grows indefinitely. + r.List() + + time.Sleep(expirationInterval) + } + }() + return r +} + +type drainCache struct { + cache.Store +} + +// Add adds the given entry to the Cache. +// Note: entries expire after the ttl. +func (r *drainCache) Add(entry CacheEntry) { + // Note: We can ignore the error here because by only allowing CacheEntries + // and providing the corresponding keyFunc ourselves we can guarantee that + // the error never occurs. + _ = r.Store.Add(entry) +} + +// Has checks if the given key (still) exists in the Cache. +// Note: entries expire after the ttl. +func (r *drainCache) Has(machineName types.NamespacedName) (CacheEntry, bool) { + // Note: We can ignore the error here because GetByKey never returns an error. + item, exists, _ := r.Store.GetByKey(machineName.String()) + if exists { + return item.(CacheEntry), true + } + return CacheEntry{}, false +} diff --git a/internal/controllers/machine/drain/drain.go b/internal/controllers/machine/drain/drain.go new file mode 100644 index 000000000000..c82714e1e1d4 --- /dev/null +++ b/internal/controllers/machine/drain/drain.go @@ -0,0 +1,380 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package drain provides a helper to cordon and drain Nodes. +package drain + +import ( + "context" + "fmt" + "sort" + "strings" + "time" + + "github.com/pkg/errors" + "golang.org/x/exp/maps" + corev1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Helper contains the parameters to control the behaviour of the drain helper. +type Helper struct { + Client client.Client + + // GracePeriodSeconds is how long to wait for a Pod to terminate. + // IMPORTANT: 0 means "delete immediately"; set to a negative value + // to use the pod's terminationGracePeriodSeconds. + // GracePeriodSeconds is used when executing Eviction calls. + GracePeriodSeconds int + + // SkipWaitForDeleteTimeoutSeconds ignores Pods that have a + // DeletionTimeStamp > N seconds. This can be used e.g. when a Node is unreachable + // and the Pods won't drain because of that. + SkipWaitForDeleteTimeoutSeconds int +} + +// CordonNode cordons a Node. +func (d *Helper) CordonNode(ctx context.Context, node *corev1.Node) error { + if node.Spec.Unschedulable { + // Node is already cordoned, nothing to do. + return nil + } + + patch := client.MergeFrom(node.DeepCopy()) + node.Spec.Unschedulable = true + if err := d.Client.Patch(ctx, node, patch); err != nil { + return errors.Wrapf(err, "failed to cordon Node") + } + + return nil +} + +// GetPodsForEviction gets Pods running on a Node and then filters and returns them as PodDeleteList, +// or error if it cannot list Pods or get DaemonSets. All Pods that have to go away can be obtained with .Pods(). +func (d *Helper) GetPodsForEviction(ctx context.Context, nodeName string) (*PodDeleteList, error) { + allPods := []*corev1.Pod{} + podList := &corev1.PodList{} + for { + listOpts := []client.ListOption{ + client.InNamespace(metav1.NamespaceAll), + client.MatchingFields{"spec.nodeName": nodeName}, + client.Continue(podList.Continue), + client.Limit(100), + } + if err := d.Client.List(ctx, podList, listOpts...); err != nil { + return nil, errors.Wrapf(err, "failed to get Pods for eviction") + } + + for _, pod := range podList.Items { + allPods = append(allPods, &pod) + } + + if podList.Continue == "" { + break + } + } + + list := filterPods(ctx, allPods, d.makeFilters()) + if errs := list.errors(); len(errs) > 0 { + return nil, errors.Wrapf(kerrors.NewAggregate(errs), "failed to get Pods for eviction") + } + + return list, nil +} + +func filterPods(ctx context.Context, allPods []*corev1.Pod, filters []PodFilter) *PodDeleteList { + pods := []PodDelete{} + for _, pod := range allPods { + var status PodDeleteStatus + // Collect warnings for the case where we are going to delete the Pod. + var deleteWarnings []string + for _, filter := range filters { + status = filter(ctx, pod) + if !status.Delete { + // short-circuit as soon as pod is filtered out + // at that point, there is no reason to run pod + // through any additional filters + break + } + if status.Reason == PodDeleteStatusTypeWarning { + deleteWarnings = append(deleteWarnings, status.Message) + } + } + + // Note: It only makes sense to aggregate warnings if we are going ahead with the deletion. + // If we don't, it's absolutely fine to just use the status from the filter that decided that + // we are not going to delete the Pod. + if status.Delete && + (status.Reason == PodDeleteStatusTypeOkay || status.Reason == PodDeleteStatusTypeWarning) && + len(deleteWarnings) > 0 { + status.Reason = PodDeleteStatusTypeWarning + status.Message = strings.Join(deleteWarnings, ", ") + } + + // Add the pod to PodDeleteList no matter what PodDeleteStatus is, + // those pods whose PodDeleteStatus is false like DaemonSet will + // be caught by list.errors() + pod.Kind = "Pod" + pod.APIVersion = "v1" + pods = append(pods, PodDelete{ + Pod: pod, + Status: status, + }) + } + list := &PodDeleteList{items: pods} + return list +} + +// EvictPods evicts the pods. +func (d *Helper) EvictPods(ctx context.Context, podDeleteList *PodDeleteList) EvictionResult { + log := ctrl.LoggerFrom(ctx) + + // Sort podDeleteList, this is important so we always deterministically evict Pods and build the EvictionResult. + // Otherwise the condition could change with every single reconcile even if nothing changes on the Node. + sort.Slice(podDeleteList.items, func(i, j int) bool { + return fmt.Sprintf("%s/%s", podDeleteList.items[i].Pod.GetNamespace(), podDeleteList.items[i].Pod.GetName()) < + fmt.Sprintf("%s/%s", podDeleteList.items[j].Pod.GetNamespace(), podDeleteList.items[j].Pod.GetName()) + }) + + var podsToTriggerEviction []PodDelete + var podsWithDeletionTimestamp []PodDelete + var podsToBeIgnored []PodDelete + for _, pod := range podDeleteList.items { + switch { + case pod.Status.Delete && pod.Pod.DeletionTimestamp.IsZero(): + podsToTriggerEviction = append(podsToTriggerEviction, pod) + case pod.Status.Delete: + podsWithDeletionTimestamp = append(podsWithDeletionTimestamp, pod) + default: + podsToBeIgnored = append(podsToBeIgnored, pod) + } + } + + log.Info("Drain not completed yet, there are still Pods on the Node that have to be drained", + "podsToTriggerEviction", podDeleteListToString(podsToTriggerEviction, 5), + "podsWithDeletionTimestamp", podDeleteListToString(podsWithDeletionTimestamp, 5), + ) + + // Trigger evictions for at most 10s. We'll continue on the next reconcile if we hit the timeout. + evictionTimeout := 10 * time.Second + ctx, cancel := context.WithTimeout(ctx, evictionTimeout) + defer cancel() + + res := EvictionResult{ + PodsFailedEviction: map[string][]*corev1.Pod{}, + } + + for _, pd := range podsToBeIgnored { + log := ctrl.LoggerFrom(ctx, "Pod", klog.KObj(pd.Pod)) + if pd.Status.Reason == PodDeleteStatusTypeWarning && pd.Status.Message != "" { + log = log.WithValues("reason", pd.Status.Message) + } + + log.V(4).Info("Skip evicting Pod because it should be ignored") + res.PodsIgnored = append(res.PodsIgnored, pd.Pod) + } + + for _, pd := range podsWithDeletionTimestamp { + log := ctrl.LoggerFrom(ctx, "Pod", klog.KObj(pd.Pod)) + + log.V(4).Info("Skip triggering Pod eviction because it already has a deletionTimestamp") + res.PodsDeletionTimestampSet = append(res.PodsDeletionTimestampSet, pd.Pod) + } + +evictionLoop: + for _, pd := range podsToTriggerEviction { + log := ctrl.LoggerFrom(ctx, "Pod", klog.KObj(pd.Pod)) + if pd.Status.Reason == PodDeleteStatusTypeWarning && pd.Status.Message != "" { + log = log.WithValues("warning", pd.Status.Message) + } + ctx := ctrl.LoggerInto(ctx, log) + + select { + case <-ctx.Done(): + // Skip eviction if the eviction timeout is reached. + err := fmt.Errorf("eviction timeout of %s reached, eviction will be retried", evictionTimeout) + log.V(4).Error(err, "Error when evicting Pod") + res.PodsFailedEviction[err.Error()] = append(res.PodsFailedEviction[err.Error()], pd.Pod) + continue evictionLoop + default: + } + + log.V(4).Info("Evicting Pod") + + err := d.evictPod(ctx, pd.Pod) + switch { + case err == nil: + log.V(4).Info("Pod eviction successfully triggered") + res.PodsDeletionTimestampSet = append(res.PodsDeletionTimestampSet, pd.Pod) + case apierrors.IsNotFound(err): + // Pod doesn't exist anymore as it has been deleted in the meantime. + log.V(4).Info("Eviction not needed, Pod doesn't exist anymore") + res.PodsNotFound = append(res.PodsNotFound, pd.Pod) + case apierrors.IsTooManyRequests(err): + var statusError *apierrors.StatusError + + // Ensure the causes are also included in the error message. + // Before: "Cannot evict pod as it would violate the pod's disruption budget." + // After: "Cannot evict pod as it would violate the pod's disruption budget. The disruption budget nginx needs 20 healthy pods and has 20 currently" + if ok := errors.As(err, &statusError); ok { + errorMessage := statusError.Status().Message + if statusError.Status().Details != nil { + var causes []string + for _, cause := range statusError.Status().Details.Causes { + causes = append(causes, cause.Message) + } + errorMessage = fmt.Sprintf("%s %v", errorMessage, strings.Join(causes, ",")) + } + err = errors.New(errorMessage) + } + + log.V(4).Error(err, "Error when evicting Pod") + res.PodsFailedEviction[err.Error()] = append(res.PodsFailedEviction[err.Error()], pd.Pod) + case apierrors.IsForbidden(err) && apierrors.HasStatusCause(err, corev1.NamespaceTerminatingCause): + // Creating an eviction resource in a terminating namespace will throw a forbidden error, e.g.: + // "pods "pod-6-to-trigger-eviction-namespace-terminating" is forbidden: unable to create new content in namespace test-namespace because it is being terminated" + // The kube-controller-manager is supposed to set the deletionTimestamp on the Pod and then this error will go away. + msg := "Cannot evict pod from terminating namespace: unable to create eviction (kube-controller-manager should set deletionTimestamp)" + log.V(4).Error(err, msg) + res.PodsFailedEviction[msg] = append(res.PodsFailedEviction[msg], pd.Pod) + default: + log.V(4).Error(err, "Error when evicting Pod") + res.PodsFailedEviction[err.Error()] = append(res.PodsFailedEviction[err.Error()], pd.Pod) + } + } + + return res +} + +// evictPod evicts the given Pod, or return an error if it couldn't. +func (d *Helper) evictPod(ctx context.Context, pod *corev1.Pod) error { + delOpts := metav1.DeleteOptions{} + if d.GracePeriodSeconds >= 0 { + gracePeriodSeconds := int64(d.GracePeriodSeconds) + delOpts.GracePeriodSeconds = &gracePeriodSeconds + } + + eviction := &policyv1.Eviction{ + ObjectMeta: metav1.ObjectMeta{ + Name: pod.Name, + Namespace: pod.Namespace, + }, + DeleteOptions: &delOpts, + } + + return d.Client.SubResource("eviction").Create(ctx, pod, eviction) +} + +// EvictionResult contains the results of an eviction. +type EvictionResult struct { + PodsDeletionTimestampSet []*corev1.Pod + PodsFailedEviction map[string][]*corev1.Pod + PodsNotFound []*corev1.Pod + PodsIgnored []*corev1.Pod +} + +// DrainCompleted returns if a Node is entirely drained, i.e. if all relevant Pods have gone away. +func (r EvictionResult) DrainCompleted() bool { + return len(r.PodsDeletionTimestampSet) == 0 && len(r.PodsFailedEviction) == 0 +} + +// ConditionMessage returns a condition message for the case where a drain is not completed. +func (r EvictionResult) ConditionMessage() string { + if r.DrainCompleted() { + return "" + } + + conditionMessage := "Drain not completed yet:" + if len(r.PodsDeletionTimestampSet) > 0 { + conditionMessage = fmt.Sprintf("%s\n* Pods with deletionTimestamp that still exist: %s", + conditionMessage, PodListToString(r.PodsDeletionTimestampSet, 5)) + } + if len(r.PodsFailedEviction) > 0 { + sortedFailureMessages := maps.Keys(r.PodsFailedEviction) + sort.Strings(sortedFailureMessages) + + conditionMessage = fmt.Sprintf("%s\n* Pods with eviction failed:", conditionMessage) + + skippedFailureMessages := []string{} + if len(sortedFailureMessages) > 5 { + skippedFailureMessages = sortedFailureMessages[5:] + sortedFailureMessages = sortedFailureMessages[:5] + } + for _, failureMessage := range sortedFailureMessages { + pods := r.PodsFailedEviction[failureMessage] + conditionMessage = fmt.Sprintf("%s\n * %s: %s", conditionMessage, failureMessage, PodListToString(pods, 3)) + } + if len(skippedFailureMessages) > 0 { + skippedFailureMessagesCount := len(skippedFailureMessages) + podCount := 0 + for _, failureMessage := range skippedFailureMessages { + podCount += len(r.PodsFailedEviction[failureMessage]) + } + + conditionMessage = fmt.Sprintf("%s\n * ... ", conditionMessage) + if skippedFailureMessagesCount == 1 { + conditionMessage += "(1 more error " + } else { + conditionMessage += fmt.Sprintf("(%d more errors ", skippedFailureMessagesCount) + } + if podCount == 1 { + conditionMessage += "applying to 1 Pod)" + } else { + conditionMessage += fmt.Sprintf("applying to %d Pods)", podCount) + } + } + } + return conditionMessage +} + +// podDeleteListToString returns a comma-separated list of the first n entries of the PodDelete list. +func podDeleteListToString(podList []PodDelete, n int) string { + return listToString(podList, func(pd PodDelete) string { + return klog.KObj(pd.Pod).String() + }, n) +} + +// PodListToString returns a comma-separated list of the first n entries of the Pod list. +func PodListToString(podList []*corev1.Pod, n int) string { + return listToString(podList, func(p *corev1.Pod) string { + return klog.KObj(p).String() + }, n) +} + +// listToString returns a comma-separated list of the first n entries of the list (strings are calculated via stringFunc). +func listToString[T any](list []T, stringFunc func(T) string, n int) string { + shortenedBy := 0 + if len(list) > n { + shortenedBy = len(list) - n + list = list[:n] + } + stringList := []string{} + for _, p := range list { + stringList = append(stringList, stringFunc(p)) + } + + if shortenedBy > 0 { + stringList = append(stringList, fmt.Sprintf("... (%d more)", shortenedBy)) + } + + return strings.Join(stringList, ", ") +} diff --git a/internal/controllers/machine/drain/drain_test.go b/internal/controllers/machine/drain/drain_test.go new file mode 100644 index 000000000000..1418e2c95a36 --- /dev/null +++ b/internal/controllers/machine/drain/drain_test.go @@ -0,0 +1,1058 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package drain + +import ( + "context" + "fmt" + "net/http" + "testing" + "time" + + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" +) + +func TestRunCordonOrUncordon(t *testing.T) { + tests := []struct { + name string + node *corev1.Node + }{ + { + name: "Uncordoned Node should be cordoned", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + }, + Spec: corev1.NodeSpec{ + Unschedulable: false, + }, + }, + }, + { + name: "Cordoned Node should stay cordoned", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + }, + Spec: corev1.NodeSpec{ + Unschedulable: true, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + fakeClient := fake.NewClientBuilder().WithObjects(tt.node).Build() + + drainer := &Helper{ + Client: fakeClient, + } + + g.Expect(drainer.CordonNode(context.Background(), tt.node)).To(Succeed()) + + gotNode := tt.node.DeepCopy() + g.Expect(fakeClient.Get(context.Background(), client.ObjectKeyFromObject(gotNode), gotNode)).To(Succeed()) + g.Expect(gotNode.Spec.Unschedulable).To(BeTrue()) + }) + } +} + +func TestGetPodsForEviction(t *testing.T) { + tests := []struct { + name string + pods []*corev1.Pod + wantPodDeleteList PodDeleteList + }{ + { + name: "skipDeletedFilter", + pods: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1-skip-pod-old-deletionTimestamp", + DeletionTimestamp: &metav1.Time{Time: time.Now().Add(time.Duration(1) * time.Minute * -1)}, + Finalizers: []string{"block-deletion"}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-2-delete-pod-new-deletionTimestamp", + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{"block-deletion"}, + }, + }, + }, + wantPodDeleteList: PodDeleteList{items: []PodDelete{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1-skip-pod-old-deletionTimestamp", + }, + }, + // Skip this Pod because deletionTimestamp is > SkipWaitForDeleteTimeoutSeconds (=10s) ago. + Status: PodDeleteStatus{ + Delete: false, + Reason: PodDeleteStatusTypeSkip, + }, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-2-delete-pod-new-deletionTimestamp", + }, + }, + // Delete this Pod because deletionTimestamp is < SkipWaitForDeleteTimeoutSeconds (=10s) ago. + Status: PodDeleteStatus{ + Delete: true, + Reason: PodDeleteStatusTypeWarning, + Message: unmanagedWarning, + }, + }, + }}, + }, + { + name: "daemonSetFilter", + pods: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1-delete-pod-with-different-controller", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + Controller: ptr.To(true), + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-2-delete-succeeded-daemonset-pod", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "DaemonSet", + Controller: ptr.To(true), + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-3-delete-orphaned-daemonset-pod", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "DaemonSet", + Name: "daemonset-does-not-exist", + Controller: ptr.To(true), + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-4-skip-daemonset-pod", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "DaemonSet", + Name: "daemonset-does-exist", + Controller: ptr.To(true), + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + }, + }, + wantPodDeleteList: PodDeleteList{items: []PodDelete{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1-delete-pod-with-different-controller", + }, + }, + // Delete this Pod because the controller is not a DaemonSet + Status: PodDeleteStatus{ + Delete: true, + Reason: PodDeleteStatusTypeOkay, + }, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-2-delete-succeeded-daemonset-pod", + }, + }, + // Delete this DaemonSet Pod because it is succeeded. + Status: PodDeleteStatus{ + Delete: true, + Reason: PodDeleteStatusTypeOkay, + }, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-3-delete-orphaned-daemonset-pod", + }, + }, + // Delete this DaemonSet Pod because it is orphaned. + Status: PodDeleteStatus{ + Delete: true, + Reason: PodDeleteStatusTypeWarning, + Message: daemonSetOrphanedWarning, + }, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-4-skip-daemonset-pod", + }, + }, + // Skip this DaemonSet Pod. + Status: PodDeleteStatus{ + Delete: false, + Reason: PodDeleteStatusTypeWarning, + Message: daemonSetWarning, + }, + }, + }}, + }, + { + name: "mirrorPodFilter", + pods: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1-skip-mirror-pod", + Annotations: map[string]string{ + corev1.MirrorPodAnnotationKey: "some-value", + }, + }, + }, + }, + wantPodDeleteList: PodDeleteList{items: []PodDelete{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1-skip-mirror-pod", + }, + }, + // Skip this Pod because it is a mirror pod. + Status: PodDeleteStatus{ + Delete: false, + Reason: PodDeleteStatusTypeSkip, + }, + }, + }}, + }, + { + name: "localStorageFilter", + pods: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1-delete-pod-without-local-storage", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + Controller: ptr.To(true), + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-2-delete-succeeded-pod-with-local-storage", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + Controller: ptr.To(true), + }, + }, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "empty-dir", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-3-delete-running-pod-with-local-storage", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + Controller: ptr.To(true), + }, + }, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "empty-dir", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + }, + }, + wantPodDeleteList: PodDeleteList{items: []PodDelete{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1-delete-pod-without-local-storage", + }, + }, + // Delete regular Pod without local storage. + Status: PodDeleteStatus{ + Delete: true, + Reason: PodDeleteStatusTypeOkay, + }, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-2-delete-succeeded-pod-with-local-storage", + }, + }, + // Delete succeeded Pod with local storage. + Status: PodDeleteStatus{ + Delete: true, + Reason: PodDeleteStatusTypeOkay, + }, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-3-delete-running-pod-with-local-storage", + }, + }, + // Delete running Pod with local storage. + Status: PodDeleteStatus{ + Delete: true, + Reason: PodDeleteStatusTypeWarning, + Message: localStorageWarning, + }, + }, + }}, + }, + { + name: "unreplicatedFilter", + pods: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1-delete-succeeded-pod", + }, + Status: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-2-delete-running-deployment-pod", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + Controller: ptr.To(true), + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-3-delete-running-standalone-pod", + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + }, + }, + wantPodDeleteList: PodDeleteList{items: []PodDelete{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1-delete-succeeded-pod", + }, + }, + Status: PodDeleteStatus{ + Delete: true, + Reason: PodDeleteStatusTypeOkay, + }, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-2-delete-running-deployment-pod", + }, + }, + Status: PodDeleteStatus{ + Delete: true, + Reason: PodDeleteStatusTypeOkay, + }, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-3-delete-running-standalone-pod", + }, + }, + Status: PodDeleteStatus{ + Delete: true, + Reason: PodDeleteStatusTypeWarning, + Message: unmanagedWarning, + }, + }, + }}, + }, + { + name: "warnings from multiple filters", + pods: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1-delete-multiple-warnings", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "DaemonSet", + Name: "daemonset-does-not-exist", + Controller: ptr.To(true), + }, + }, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "empty-dir", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + }, + }, + wantPodDeleteList: PodDeleteList{items: []PodDelete{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1-delete-multiple-warnings", + }, + }, + Status: PodDeleteStatus{ + Delete: true, + Reason: PodDeleteStatusTypeWarning, + Message: daemonSetOrphanedWarning + ", " + localStorageWarning, + }, + }, + }}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + // Setting NodeName here to avoid noise in the table above. + for i := range tt.pods { + tt.pods[i].Spec.NodeName = "node-1" + } + + var objs []client.Object + for _, o := range tt.pods { + objs = append(objs, o) + } + objs = append(objs, &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "daemonset-does-exist", + }, + }) + + fakeClient := fake.NewClientBuilder(). + WithObjects(objs...). + WithIndex(&corev1.Pod{}, "spec.nodeName", podByNodeName). + Build() + drainer := &Helper{ + Client: fakeClient, + SkipWaitForDeleteTimeoutSeconds: 10, + } + + gotPodDeleteList, err := drainer.GetPodsForEviction(context.Background(), "node-1") + g.Expect(err).ToNot(HaveOccurred()) + // Cleanup for easier diff. + for i, pd := range gotPodDeleteList.items { + gotPodDeleteList.items[i].Pod = &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: pd.Pod.Name, + }, + } + } + g.Expect(gotPodDeleteList.items).To(BeComparableTo(tt.wantPodDeleteList.items)) + }) + } +} + +func TestEvictPods(t *testing.T) { + tests := []struct { + name string + podDeleteList *PodDeleteList + wantEvictionResult EvictionResult + }{ + { + name: "EvictPods correctly", + podDeleteList: &PodDeleteList{items: []PodDelete{ + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1-ignored", + }, + }, + Status: PodDeleteStatus{ + Delete: false, // Will be skipped because Delete is set to false. + Reason: PodDeleteStatusTypeWarning, + Message: daemonSetWarning, + }, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-2-deletionTimestamp-set", + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + }, + Status: PodDeleteStatus{ + Delete: true, + Reason: PodDeleteStatusTypeOkay, + }, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-3-to-trigger-eviction-successfully", + }, + }, + Status: PodDeleteStatus{ + Delete: true, + Reason: PodDeleteStatusTypeOkay, + }, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-4-to-trigger-eviction-pod-not-found", + }, + }, + Status: PodDeleteStatus{ + Delete: true, + Reason: PodDeleteStatusTypeOkay, + }, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-5-to-trigger-eviction-pdb-violated-2", + }, + }, + Status: PodDeleteStatus{ + Delete: true, + Reason: PodDeleteStatusTypeOkay, + }, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-5-to-trigger-eviction-pdb-violated-1", + }, + }, + Status: PodDeleteStatus{ + Delete: true, + Reason: PodDeleteStatusTypeOkay, + }, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-6-to-trigger-eviction-namespace-terminating", + }, + }, + Status: PodDeleteStatus{ + Delete: true, + Reason: PodDeleteStatusTypeOkay, + }, + }, + { + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-7-to-trigger-eviction-some-other-error", + }, + }, + Status: PodDeleteStatus{ + Delete: true, + Reason: PodDeleteStatusTypeOkay, + }, + }, + }}, + wantEvictionResult: EvictionResult{ + PodsIgnored: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1-ignored", + }, + }, + }, + PodsDeletionTimestampSet: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-2-deletionTimestamp-set", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-3-to-trigger-eviction-successfully", + }, + }, + }, + PodsNotFound: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-4-to-trigger-eviction-pod-not-found", + }, + }, + }, + PodsFailedEviction: map[string][]*corev1.Pod{ + "Cannot evict pod as it would violate the pod's disruption budget. The disruption budget pod-5-pdb needs 3 healthy pods and has 2 currently": { + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-5-to-trigger-eviction-pdb-violated-1", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-5-to-trigger-eviction-pdb-violated-2", + }, + }, + }, + "Cannot evict pod from terminating namespace: unable to create eviction (kube-controller-manager should set deletionTimestamp)": { + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-6-to-trigger-eviction-namespace-terminating", + }, + }, + }, + "some other error": { + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-7-to-trigger-eviction-some-other-error", + }, + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + fakeClient := fake.NewClientBuilder().WithObjects().Build() + + podResource := schema.GroupResource{Group: corev1.GroupName, Resource: "pods"} + + fakeClient = interceptor.NewClient(fakeClient, interceptor.Funcs{ + SubResourceCreate: func(_ context.Context, _ client.Client, subResourceName string, obj client.Object, _ client.Object, _ ...client.SubResourceCreateOption) error { + g.Expect(subResourceName).To(Equal("eviction")) + switch name := obj.GetName(); name { + case "pod-3-to-trigger-eviction-successfully": + return nil // Successful eviction. + case "pod-4-to-trigger-eviction-pod-not-found": + return apierrors.NewNotFound(podResource, name) + case "pod-5-to-trigger-eviction-pdb-violated-1", "pod-5-to-trigger-eviction-pdb-violated-2": + return &apierrors.StatusError{ + ErrStatus: metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusTooManyRequests, + Reason: metav1.StatusReasonTooManyRequests, + Message: "Cannot evict pod as it would violate the pod's disruption budget.", + Details: &metav1.StatusDetails{ + Causes: []metav1.StatusCause{ + { + Type: "DisruptionBudget", + Message: "The disruption budget pod-5-pdb needs 3 healthy pods and has 2 currently", + }, + }, + }, + }, + } + case "pod-6-to-trigger-eviction-namespace-terminating": + return &apierrors.StatusError{ + ErrStatus: metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusForbidden, + Reason: metav1.StatusReasonForbidden, + Message: "pods \"pod-6-to-trigger-eviction-namespace-terminating\" is forbidden: unable to create new content in namespace test-namespace because it is being terminated", + Details: &metav1.StatusDetails{ + Name: "pod-6-to-trigger-eviction-namespace-terminating", + Kind: "pods", + Causes: []metav1.StatusCause{ + { + Type: corev1.NamespaceTerminatingCause, + Message: "namespace test-namespace is being terminated", + Field: "metadata.namespace", + }, + }, + }, + }, + } + case "pod-7-to-trigger-eviction-some-other-error": + return apierrors.NewBadRequest("some other error") + } + + g.Fail(fmt.Sprintf("eviction behavior for Pod %q not implemented", obj.GetName())) + return nil + }, + }) + + drainer := &Helper{ + Client: fakeClient, + } + + gotEvictionResult := drainer.EvictPods(context.Background(), tt.podDeleteList) + // Cleanup for easier diff. + for i, pod := range gotEvictionResult.PodsDeletionTimestampSet { + gotEvictionResult.PodsDeletionTimestampSet[i] = &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: pod.Name, + }, + } + } + g.Expect(gotEvictionResult).To(BeComparableTo(tt.wantEvictionResult)) + }) + } +} + +func TestEvictionResult_ConditionMessage(t *testing.T) { + tests := []struct { + name string + evictionResult EvictionResult + wantConditionMessage string + }{ + { + name: "Compute no condition message correctly", + evictionResult: EvictionResult{}, // Drain completed. + wantConditionMessage: ``, + }, + { + name: "Compute short condition message correctly", + evictionResult: EvictionResult{ + PodsDeletionTimestampSet: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-2-deletionTimestamp-set-1", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-3-to-trigger-eviction-successfully-1", + }, + }, + }, + PodsFailedEviction: map[string][]*corev1.Pod{ + "Cannot evict pod as it would violate the pod's disruption budget. The disruption budget pod-5-pdb needs 20 healthy pods and has 20 currently": { + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-5-to-trigger-eviction-pdb-violated-1", + }, + }, + }, + "some other error 1": { + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-6-to-trigger-eviction-some-other-error", + }, + }, + }, + }, + }, + wantConditionMessage: `Drain not completed yet: +* Pods with deletionTimestamp that still exist: pod-2-deletionTimestamp-set-1, pod-3-to-trigger-eviction-successfully-1 +* Pods with eviction failed: + * Cannot evict pod as it would violate the pod's disruption budget. The disruption budget pod-5-pdb needs 20 healthy pods and has 20 currently: pod-5-to-trigger-eviction-pdb-violated-1 + * some other error 1: pod-6-to-trigger-eviction-some-other-error`, + }, + { + name: "Compute long condition message correctly", + evictionResult: EvictionResult{ + PodsIgnored: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1-ignored-should-not-be-included", + }, + }, + }, + PodsDeletionTimestampSet: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-2-deletionTimestamp-set-1", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-2-deletionTimestamp-set-2", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-2-deletionTimestamp-set-3", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-3-to-trigger-eviction-successfully-1", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-3-to-trigger-eviction-successfully-2", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-3-to-trigger-eviction-successfully-3-should-not-be-included", // only first 5. + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-3-to-trigger-eviction-successfully-4-should-not-be-included", // only first 5. + }, + }, + }, + PodsNotFound: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-4-to-trigger-eviction-pod-not-found-should-not-be-included", + }, + }, + }, + PodsFailedEviction: map[string][]*corev1.Pod{ + "Cannot evict pod as it would violate the pod's disruption budget. The disruption budget pod-5-pdb needs 20 healthy pods and has 20 currently": { + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-5-to-trigger-eviction-pdb-violated-1", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-5-to-trigger-eviction-pdb-violated-2", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-5-to-trigger-eviction-pdb-violated-3", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-5-to-trigger-eviction-pdb-violated-4-should-not-be-included", // only first 3 + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-5-to-trigger-eviction-pdb-violated-5-should-not-be-included", // only first 3 + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-5-to-trigger-eviction-pdb-violated-6-should-not-be-included", // only first 3 + }, + }, + }, + "some other error 1": { + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-6-to-trigger-eviction-some-other-error", + }, + }, + }, + "some other error 2": { + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-7-to-trigger-eviction-some-other-error", + }, + }, + }, + "some other error 3": { + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-8-to-trigger-eviction-some-other-error", + }, + }, + }, + "some other error 4": { + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-9-to-trigger-eviction-some-other-error", + }, + }, + }, + "some other error 5 should not be included": { // only first 5 + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-10-to-trigger-eviction-some-other-error", + }, + }, + }, + }, + }, + wantConditionMessage: `Drain not completed yet: +* Pods with deletionTimestamp that still exist: pod-2-deletionTimestamp-set-1, pod-2-deletionTimestamp-set-2, pod-2-deletionTimestamp-set-3, pod-3-to-trigger-eviction-successfully-1, pod-3-to-trigger-eviction-successfully-2, ... (2 more) +* Pods with eviction failed: + * Cannot evict pod as it would violate the pod's disruption budget. The disruption budget pod-5-pdb needs 20 healthy pods and has 20 currently: pod-5-to-trigger-eviction-pdb-violated-1, pod-5-to-trigger-eviction-pdb-violated-2, pod-5-to-trigger-eviction-pdb-violated-3, ... (3 more) + * some other error 1: pod-6-to-trigger-eviction-some-other-error + * some other error 2: pod-7-to-trigger-eviction-some-other-error + * some other error 3: pod-8-to-trigger-eviction-some-other-error + * some other error 4: pod-9-to-trigger-eviction-some-other-error + * ... (1 more error applying to 1 Pod)`, + }, + { + name: "Compute long condition message correctly with more skipped errors", + evictionResult: EvictionResult{ + PodsFailedEviction: map[string][]*corev1.Pod{ + "some other error 1": { + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1-to-trigger-eviction-some-other-error", + }, + }, + }, + "some other error 2": { + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-2-to-trigger-eviction-some-other-error", + }, + }, + }, + "some other error 3": { + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-3-to-trigger-eviction-some-other-error", + }, + }, + }, + "some other error 4": { + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-4-to-trigger-eviction-some-other-error", + }, + }, + }, + "some other error 5": { + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-5-to-trigger-eviction-some-other-error", + }, + }, + }, + "some other error 6 should not be included": { // only first 5 + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-6-to-trigger-eviction-some-other-error-1", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-6-to-trigger-eviction-some-other-error-2", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-6-to-trigger-eviction-some-other-error-3", + }, + }, + }, + "some other error 7 should not be included": { // only first 5 + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-7-to-trigger-eviction-some-other-error", + }, + }, + }, + }, + }, + wantConditionMessage: `Drain not completed yet: +* Pods with eviction failed: + * some other error 1: pod-1-to-trigger-eviction-some-other-error + * some other error 2: pod-2-to-trigger-eviction-some-other-error + * some other error 3: pod-3-to-trigger-eviction-some-other-error + * some other error 4: pod-4-to-trigger-eviction-some-other-error + * some other error 5: pod-5-to-trigger-eviction-some-other-error + * ... (2 more errors applying to 4 Pods)`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + g.Expect(tt.evictionResult.ConditionMessage()).To(Equal(tt.wantConditionMessage)) + }) + } +} + +func podByNodeName(o client.Object) []string { + pod, ok := o.(*corev1.Pod) + if !ok { + panic(fmt.Sprintf("Expected a Pod but got a %T", o)) + } + + if pod.Spec.NodeName == "" { + return nil + } + + return []string{pod.Spec.NodeName} +} diff --git a/internal/controllers/machine/drain/filters.go b/internal/controllers/machine/drain/filters.go new file mode 100644 index 000000000000..1851bd453104 --- /dev/null +++ b/internal/controllers/machine/drain/filters.go @@ -0,0 +1,232 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package drain + +import ( + "context" + "fmt" + "strings" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Note: This file is still mostly kept in sync with: https://github.com/kubernetes/kubernetes/blob/v1.31.0/staging/src/k8s.io/kubectl/pkg/drain/filters.go +// Minor modifications have been made to drop branches that are never used in Cluster API and to use the +// controller-runtime Client. + +const ( + daemonSetOrphanedWarning = "evicting orphaned DaemonSet-managed Pod" + daemonSetWarning = "ignoring DaemonSet-managed Pod" + localStorageWarning = "evicting Pod with local storage" + unmanagedWarning = "evicting Pod that have no controller" +) + +// PodDelete informs filtering logic whether a pod should be deleted or not. +type PodDelete struct { + Pod *corev1.Pod + Status PodDeleteStatus +} + +// PodDeleteList is a wrapper around []PodDelete. +type PodDeleteList struct { + items []PodDelete +} + +// Pods returns a list of Pods that have to go away before the Node can be considered completely drained. +func (l *PodDeleteList) Pods() []*corev1.Pod { + pods := []*corev1.Pod{} + for _, i := range l.items { + if i.Status.Delete { + pods = append(pods, i.Pod) + } + } + return pods +} + +func (l *PodDeleteList) errors() []error { + failedPods := make(map[string][]string) + for _, i := range l.items { + if i.Status.Reason == PodDeleteStatusTypeError { + msg := i.Status.Message + if msg == "" { + msg = "unexpected error" + } + failedPods[msg] = append(failedPods[msg], fmt.Sprintf("%s/%s", i.Pod.Namespace, i.Pod.Name)) + } + } + errs := make([]error, 0, len(failedPods)) + for msg, pods := range failedPods { + errs = append(errs, fmt.Errorf("cannot evict %s: %s", msg, strings.Join(pods, ", "))) + } + return errs +} + +// PodDeleteStatus informs filters if a pod should be deleted. +type PodDeleteStatus struct { + // Delete means that this Pod has to go away before the Node can be considered completely drained.. + Delete bool + Reason string + Message string +} + +// PodFilter takes a pod and returns a PodDeleteStatus. +type PodFilter func(context.Context, *corev1.Pod) PodDeleteStatus + +const ( + // PodDeleteStatusTypeOkay is "Okay". + PodDeleteStatusTypeOkay = "Okay" + // PodDeleteStatusTypeSkip is "Skip". + PodDeleteStatusTypeSkip = "Skip" + // PodDeleteStatusTypeWarning is "Warning". + PodDeleteStatusTypeWarning = "Warning" + // PodDeleteStatusTypeError is "Error". + PodDeleteStatusTypeError = "Error" +) + +// MakePodDeleteStatusOkay is a helper method to return the corresponding PodDeleteStatus. +func MakePodDeleteStatusOkay() PodDeleteStatus { + return PodDeleteStatus{ + Delete: true, + Reason: PodDeleteStatusTypeOkay, + } +} + +// MakePodDeleteStatusSkip is a helper method to return the corresponding PodDeleteStatus. +func MakePodDeleteStatusSkip() PodDeleteStatus { + return PodDeleteStatus{ + Delete: false, + Reason: PodDeleteStatusTypeSkip, + } +} + +// MakePodDeleteStatusWithWarning is a helper method to return the corresponding PodDeleteStatus. +func MakePodDeleteStatusWithWarning(del bool, message string) PodDeleteStatus { + return PodDeleteStatus{ + Delete: del, + Reason: PodDeleteStatusTypeWarning, + Message: message, + } +} + +// MakePodDeleteStatusWithError is a helper method to return the corresponding PodDeleteStatus. +func MakePodDeleteStatusWithError(message string) PodDeleteStatus { + return PodDeleteStatus{ + Delete: false, + Reason: PodDeleteStatusTypeError, + Message: message, + } +} + +// The filters are applied in a specific order. +func (d *Helper) makeFilters() []PodFilter { + return []PodFilter{ + d.skipDeletedFilter, + d.daemonSetFilter, + d.mirrorPodFilter, + d.localStorageFilter, + d.unreplicatedFilter, + } +} + +func hasLocalStorage(pod *corev1.Pod) bool { + for _, volume := range pod.Spec.Volumes { + if volume.EmptyDir != nil { + return true + } + } + + return false +} + +func (d *Helper) daemonSetFilter(ctx context.Context, pod *corev1.Pod) PodDeleteStatus { + // Note that we return false in cases where the pod is DaemonSet managed, + // regardless of flags. + // + // The exception is for pods that are orphaned (the referencing + // management resource - including DaemonSet - is not found). + // Such pods will be deleted. + controllerRef := metav1.GetControllerOf(pod) + if controllerRef == nil || controllerRef.Kind != appsv1.SchemeGroupVersion.WithKind("DaemonSet").Kind { + return MakePodDeleteStatusOkay() + } + // Any finished pod can be removed. + if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed { + return MakePodDeleteStatusOkay() + } + + if err := d.Client.Get(ctx, client.ObjectKey{Namespace: pod.Namespace, Name: controllerRef.Name}, &appsv1.DaemonSet{}); err != nil { + // remove orphaned pods with a warning + if apierrors.IsNotFound(err) { + return MakePodDeleteStatusWithWarning(true, daemonSetOrphanedWarning) + } + + return MakePodDeleteStatusWithError(err.Error()) + } + + return MakePodDeleteStatusWithWarning(false, daemonSetWarning) +} + +func (d *Helper) mirrorPodFilter(_ context.Context, pod *corev1.Pod) PodDeleteStatus { + if _, found := pod.ObjectMeta.Annotations[corev1.MirrorPodAnnotationKey]; found { + return MakePodDeleteStatusSkip() + } + return MakePodDeleteStatusOkay() +} + +func (d *Helper) localStorageFilter(_ context.Context, pod *corev1.Pod) PodDeleteStatus { + if !hasLocalStorage(pod) { + return MakePodDeleteStatusOkay() + } + // Any finished pod can be removed. + if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed { + return MakePodDeleteStatusOkay() + } + + return MakePodDeleteStatusWithWarning(true, localStorageWarning) +} + +func (d *Helper) unreplicatedFilter(_ context.Context, pod *corev1.Pod) PodDeleteStatus { + // any finished pod can be removed + if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed { + return MakePodDeleteStatusOkay() + } + + controllerRef := metav1.GetControllerOf(pod) + if controllerRef != nil { + return MakePodDeleteStatusOkay() + } + + return MakePodDeleteStatusWithWarning(true, unmanagedWarning) +} + +func shouldSkipPod(pod *corev1.Pod, skipDeletedTimeoutSeconds int) bool { + return skipDeletedTimeoutSeconds > 0 && + !pod.ObjectMeta.DeletionTimestamp.IsZero() && + int(time.Since(pod.ObjectMeta.GetDeletionTimestamp().Time).Seconds()) > skipDeletedTimeoutSeconds +} + +func (d *Helper) skipDeletedFilter(_ context.Context, pod *corev1.Pod) PodDeleteStatus { + if shouldSkipPod(pod, d.SkipWaitForDeleteTimeoutSeconds) { + return MakePodDeleteStatusSkip() + } + return MakePodDeleteStatusOkay() +} diff --git a/internal/controllers/machine/drain/filters_test.go b/internal/controllers/machine/drain/filters_test.go new file mode 100644 index 000000000000..a53beb6b4a3f --- /dev/null +++ b/internal/controllers/machine/drain/filters_test.go @@ -0,0 +1,71 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package drain + +import ( + "context" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestSkipDeletedFilter(t *testing.T) { + tCases := []struct { + timeStampAgeSeconds int + skipWaitForDeleteTimeoutSeconds int + expectedDelete bool + }{ + { + timeStampAgeSeconds: 0, + skipWaitForDeleteTimeoutSeconds: 20, + expectedDelete: true, + }, + { + timeStampAgeSeconds: 1, + skipWaitForDeleteTimeoutSeconds: 20, + expectedDelete: true, + }, + { + timeStampAgeSeconds: 100, + skipWaitForDeleteTimeoutSeconds: 20, + expectedDelete: false, + }, + } + for i, tc := range tCases { + h := &Helper{ + SkipWaitForDeleteTimeoutSeconds: tc.skipWaitForDeleteTimeoutSeconds, + } + pod := corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Namespace: "default", + }, + } + + if tc.timeStampAgeSeconds > 0 { + dTime := &metav1.Time{Time: time.Now().Add(time.Duration(tc.timeStampAgeSeconds) * time.Second * -1)} + pod.ObjectMeta.SetDeletionTimestamp(dTime) + } + + podDeleteStatus := h.skipDeletedFilter(context.Background(), &pod) + if podDeleteStatus.Delete != tc.expectedDelete { + t.Errorf("test %v: unexpected podDeleteStatus.delete; actual %v; expected %v", i, podDeleteStatus.Delete, tc.expectedDelete) + } + } +} diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index 2df9172976eb..c1d82cdc9a58 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -29,11 +29,8 @@ import ( "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" - kubedrain "k8s.io/kubectl/pkg/drain" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -47,6 +44,7 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/controllers/noderefutil" "sigs.k8s.io/cluster-api/controllers/remote" + "sigs.k8s.io/cluster-api/internal/controllers/machine/drain" "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" @@ -57,6 +55,10 @@ import ( "sigs.k8s.io/cluster-api/util/predicates" ) +const ( + drainRetryInterval = time.Duration(20) * time.Second +) + var ( errNilNodeRef = errors.New("noderef is nil") errLastControlPlaneNode = errors.New("last control plane member") @@ -83,9 +85,6 @@ type Reconciler struct { // WatchFilterValue is the label value used to filter events prior to reconciliation. WatchFilterValue string - // NodeDrainClientTimeout timeout of the client used for draining nodes. - NodeDrainClientTimeout time.Duration - controller controller.Controller recorder record.EventRecorder externalTracker external.ObjectTracker @@ -94,6 +93,7 @@ type Reconciler struct { // during a single reconciliation. nodeDeletionRetryTimeout time.Duration ssaCache ssa.Cache + drainCache drain.Cache } func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { @@ -151,6 +151,7 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt Cache: mgr.GetCache(), } r.ssaCache = ssa.NewCache() + r.drainCache = drain.NewCache() return nil } @@ -377,7 +378,6 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu return ctrl.Result{}, err } - log.Info("Draining node", "Node", klog.KRef("", m.Status.NodeRef.Name)) // The DrainingSucceededCondition never exists before the node is drained for the first time, // so its transition time can be used to record the first time draining. // This `if` condition prevents the transition time to be changed more than once. @@ -389,12 +389,15 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu return ctrl.Result{}, errors.Wrap(err, "failed to patch Machine") } - if result, err := r.drainNode(ctx, cluster, m.Status.NodeRef.Name); !result.IsZero() || err != nil { - if err != nil { - conditions.MarkFalse(m, clusterv1.DrainingSucceededCondition, clusterv1.DrainingFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedDrainNode", "error draining Machine's node %q: %v", m.Status.NodeRef.Name, err) - } - return result, err + result, err := r.drainNode(ctx, cluster, m, m.Status.NodeRef.Name) + if err != nil { + conditions.MarkFalse(m, clusterv1.DrainingSucceededCondition, clusterv1.DrainingFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedDrainNode", "error draining Machine's node %q: %v", m.Status.NodeRef.Name, err) + return ctrl.Result{}, err + } + if !result.IsZero() { + // Note: For non-error cases where the drain is not completed yet the DrainingSucceeded condition is updated in drainNode. + return result, nil } conditions.MarkTrue(m, clusterv1.DrainingSucceededCondition) @@ -417,7 +420,6 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedWaitForVolumeDetach", "error waiting for node volumes detaching, Machine's node %q: %v", m.Status.NodeRef.Name, err) return ctrl.Result{}, err } - log.Info("Waiting for node volumes to be detached", "Node", klog.KRef("", m.Status.NodeRef.Name)) return ctrl.Result{}, nil } conditions.MarkTrue(m, clusterv1.VolumeDetachSucceededCondition) @@ -643,10 +645,11 @@ func (r *Reconciler) isDeleteNodeAllowed(ctx context.Context, cluster *clusterv1 return nil } -func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, nodeName string) (ctrl.Result, error) { +func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, nodeName string) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx, "Node", klog.KRef("", nodeName)) + ctx = ctrl.LoggerInto(ctx, log) - restConfig, err := r.Tracker.GetRESTConfig(ctx, util.ObjectKey(cluster)) + remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster)) if err != nil { if errors.Is(err, remote.ErrClusterLocked) { log.V(5).Info("Requeuing drain Node because another worker has the lock on the ClusterCacheTracker") @@ -655,46 +658,20 @@ func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, log.Error(err, "Error creating a remote client for cluster while draining Node, won't retry") return ctrl.Result{}, nil } - restConfig = rest.CopyConfig(restConfig) - restConfig.Timeout = r.NodeDrainClientTimeout - kubeClient, err := kubernetes.NewForConfig(restConfig) - if err != nil { - log.Error(err, "Error creating a remote client while deleting Machine, won't retry") - return ctrl.Result{}, nil - } - node, err := kubeClient.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) - if err != nil { + node := &corev1.Node{} + if err := remoteClient.Get(ctx, client.ObjectKey{Name: nodeName}, node); err != nil { if apierrors.IsNotFound(err) { // If an admin deletes the node directly, we'll end up here. log.Error(err, "Could not find node from noderef, it may have already been deleted") return ctrl.Result{}, nil } - return ctrl.Result{}, errors.Wrapf(err, "unable to get node %v", nodeName) - } - - drainer := &kubedrain.Helper{ - Client: kubeClient, - Ctx: ctx, - Force: true, - IgnoreAllDaemonSets: true, - DeleteEmptyDirData: true, - GracePeriodSeconds: -1, - // If a pod is not evicted in 20 seconds, retry the eviction next time the - // machine gets reconciled again (to allow other machines to be reconciled). - Timeout: 20 * time.Second, - OnPodDeletedOrEvicted: func(pod *corev1.Pod, usingEviction bool) { - verbStr := "Deleted" - if usingEviction { - verbStr = "Evicted" - } - log.Info(fmt.Sprintf("%s pod from Node", verbStr), - "Pod", klog.KObj(pod)) - }, - Out: writer{log.Info}, - ErrOut: writer{func(msg string, keysAndValues ...interface{}) { - log.Error(nil, msg, keysAndValues...) - }}, + return ctrl.Result{}, errors.Wrapf(err, "unable to get Node %s", nodeName) + } + + drainer := &drain.Helper{ + Client: remoteClient, + GracePeriodSeconds: -1, } if noderefutil.IsNodeUnreachable(node) { @@ -709,23 +686,70 @@ func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, // Override the grace period of pods to reduce the time needed to skip them. drainer.GracePeriodSeconds = 1 - log.V(5).Info("Node is unreachable, draining will ignore gracePeriod. PDBs are still honored.") + log.V(3).Info("Node is unreachable, draining will use 1s GracePeriodSeconds and will ignore all Pods that have a deletionTimestamp > 1s old. PDBs are still honored.") } - if err := kubedrain.RunCordonOrUncordon(drainer, node, true); err != nil { + if err := drainer.CordonNode(ctx, node); err != nil { // Machine will be re-reconciled after a cordon failure. - log.Error(err, "Cordon failed") - return ctrl.Result{}, errors.Wrapf(err, "unable to cordon node %v", node.Name) + return ctrl.Result{}, errors.Wrapf(err, "failed to cordon Node %s", node.Name) } - if err := kubedrain.RunNodeDrain(drainer, node.Name); err != nil { - // Machine will be re-reconciled after a drain failure. - log.Error(err, "Drain failed, retry in 20s") - return ctrl.Result{RequeueAfter: 20 * time.Second}, nil + podDeleteList, err := drainer.GetPodsForEviction(ctx, nodeName) + if err != nil { + return ctrl.Result{}, err } - log.Info("Drain successful") - return ctrl.Result{}, nil + podsToBeDrained := podDeleteList.Pods() + + if len(podsToBeDrained) == 0 { + log.Info("Drain completed, no Pods to drain on the Node") + return ctrl.Result{}, nil + } + + // Check drain cache to ensure we won't retry drain before drainRetryInterval. + // Note: This is intentionally only done if we have Pods to evict, because otherwise + // even the "no-op" drain would be requeued. + if cacheEntry, ok := r.drainCache.Has(client.ObjectKeyFromObject(machine)); ok { + if requeueAfter, requeue := shouldRequeueDrain(time.Now(), cacheEntry.LastDrain); requeue { + log.Info(fmt.Sprintf("Requeuing in %s because there already was a drain in the last %s", requeueAfter, drainRetryInterval)) + return ctrl.Result{RequeueAfter: requeueAfter}, nil + } + } + + log.Info("Draining Node") + + evictionResult := drainer.EvictPods(ctx, podDeleteList) + + // Add entry to the drain cache so we won't retry drain before drainRetryInterval. + r.drainCache.Add(drain.CacheEntry{ + Machine: client.ObjectKeyFromObject(machine), + LastDrain: time.Now(), + }) + + if evictionResult.DrainCompleted() { + log.Info("Drain completed, remaining Pods on the Node have been evicted") + return ctrl.Result{}, nil + } + + conditions.MarkFalse(machine, clusterv1.DrainingSucceededCondition, clusterv1.DrainingReason, clusterv1.ConditionSeverityInfo, evictionResult.ConditionMessage()) + podsFailedEviction := []*corev1.Pod{} + for _, p := range evictionResult.PodsFailedEviction { + podsFailedEviction = append(podsFailedEviction, p...) + } + log.Info(fmt.Sprintf("Drain not completed yet, requeuing in %s", drainRetryInterval), + "podsFailedEviction", drain.PodListToString(podsFailedEviction, 5), + "podsWithDeletionTimestamp", drain.PodListToString(evictionResult.PodsDeletionTimestampSet, 5), + ) + return ctrl.Result{RequeueAfter: drainRetryInterval}, nil +} + +func shouldRequeueDrain(now time.Time, lastDrain time.Time) (time.Duration, bool) { + timeSinceLastDrain := now.Sub(lastDrain) + if timeSinceLastDrain < drainRetryInterval { + return drainRetryInterval - timeSinceLastDrain, true + } + + return time.Duration(0), false } // shouldWaitForNodeVolumes returns true if node status still have volumes attached and the node is reachable @@ -735,6 +759,7 @@ func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, // so after node draining we need to check if all volumes are detached before deleting the node. func (r *Reconciler) shouldWaitForNodeVolumes(ctx context.Context, cluster *clusterv1.Cluster, nodeName string) (bool, error) { log := ctrl.LoggerFrom(ctx, "Node", klog.KRef("", nodeName)) + ctx = ctrl.LoggerInto(ctx, log) remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster)) if err != nil { @@ -754,11 +779,16 @@ func (r *Reconciler) shouldWaitForNodeVolumes(ctx context.Context, cluster *clus // If a node is unreachable, we can't detach the volume. // We need to skip the detachment as we otherwise block deletions // of unreachable nodes when a volume is attached. - log.Info("Skipping volume detachment as node is unreachable.") + log.Info("Node is unreachable, skip waiting for volume detachment.") return false, nil } - return len(node.Status.VolumesAttached) != 0, nil + if len(node.Status.VolumesAttached) != 0 { + log.Info("Waiting for Node volumes to be detached") + return true, nil + } + + return false, nil } func (r *Reconciler) deleteNode(ctx context.Context, cluster *clusterv1.Cluster, name string) error { @@ -961,14 +991,3 @@ func (r *Reconciler) nodeToMachine(ctx context.Context, o client.Object) []recon return nil } - -// writer implements io.Writer interface as a pass-through for klog. -type writer struct { - logFunc func(msg string, keysAndValues ...interface{}) -} - -// Write passes string(p) into writer's logFunc and always returns len(p). -func (w writer) Write(p []byte) (n int, err error) { - w.logFunc(string(p)) - return len(p), nil -} diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index dc9fa0842f44..ec72dee7cc50 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -24,6 +24,7 @@ import ( "github.com/go-logr/logr" . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -42,6 +43,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/api/v1beta1/index" "sigs.k8s.io/cluster-api/controllers/remote" + "sigs.k8s.io/cluster-api/internal/controllers/machine/drain" "sigs.k8s.io/cluster-api/internal/test/builder" "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" @@ -1465,6 +1467,390 @@ func TestIsNodeDrainedAllowed(t *testing.T) { } } +func TestDrainNode(t *testing.T) { + testCluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test-cluster", + }, + } + testMachine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test-machine", + }, + } + + tests := []struct { + name string + nodeName string + node *corev1.Node + pods []*corev1.Pod + wantCondition *clusterv1.Condition + wantResult ctrl.Result + wantErr string + }{ + { + name: "Node does not exist, no-op", + nodeName: "node-does-not-exist", + }, + { + name: "Node does exist, should be cordoned", + nodeName: "node-1", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + }, + }, + }, + { + name: "Node does exist, should stay cordoned", + nodeName: "node-1", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + }, + Spec: corev1.NodeSpec{ + Unschedulable: true, + }, + }, + }, + { + name: "Node does exist, only Pods that don't have to be drained", + nodeName: "node-1", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + }, + Spec: corev1.NodeSpec{ + Unschedulable: true, + }, + }, + pods: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1-skip-mirror-pod", + Annotations: map[string]string{ + corev1.MirrorPodAnnotationKey: "some-value", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-4-skip-daemonset-pod", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "DaemonSet", + Name: "daemonset-does-exist", + Controller: ptr.To(true), + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + }, + }, + }, + { + name: "Node does exist, some Pods have to be drained", + nodeName: "node-1", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + }, + Spec: corev1.NodeSpec{ + Unschedulable: true, + }, + }, + pods: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1-skip-mirror-pod", + Annotations: map[string]string{ + corev1.MirrorPodAnnotationKey: "some-value", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-2-delete-running-deployment-pod", + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + Controller: ptr.To(true), + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + }, + }, + wantResult: ctrl.Result{RequeueAfter: 20 * time.Second}, + wantCondition: &clusterv1.Condition{ + Type: clusterv1.DrainingSucceededCondition, + Status: corev1.ConditionFalse, + Severity: clusterv1.ConditionSeverityInfo, + Reason: clusterv1.DrainingReason, + Message: `Drain not completed yet: +* Pods with deletionTimestamp that still exist: pod-2-delete-running-deployment-pod`, + }, + }, + { + name: "Node does exist but is unreachable, no Pods have to be drained because they all have old deletionTimestamps", + nodeName: "node-1", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + }, + Spec: corev1.NodeSpec{ + Unschedulable: true, + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ // unreachable. + { + Type: corev1.NodeReady, + Status: corev1.ConditionUnknown, + }, + }, + }, + }, + pods: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-1-skip-pod-old-deletionTimestamp", + DeletionTimestamp: &metav1.Time{Time: time.Now().Add(time.Duration(1) * time.Hour * -1)}, + Finalizers: []string{"block-deletion"}, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + // Setting NodeName here to avoid noise in the table above. + for i := range tt.pods { + tt.pods[i].Spec.NodeName = tt.nodeName + } + + // Making a copy because drainNode will modify the Machine. + testMachine := testMachine.DeepCopy() + + var objs []client.Object + objs = append(objs, testCluster, testMachine) + c := fake.NewClientBuilder(). + WithObjects(objs...). + Build() + + var remoteObjs []client.Object + if tt.node != nil { + remoteObjs = append(remoteObjs, tt.node) + } + for _, p := range tt.pods { + remoteObjs = append(remoteObjs, p) + } + remoteObjs = append(remoteObjs, &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "daemonset-does-exist", + }, + }) + remoteClient := fake.NewClientBuilder(). + WithIndex(&corev1.Pod{}, "spec.nodeName", podByNodeName). + WithObjects(remoteObjs...). + Build() + + tracker := remote.NewTestClusterCacheTracker(ctrl.Log, c, remoteClient, fakeScheme, client.ObjectKeyFromObject(testCluster)) + r := &Reconciler{ + Client: c, + Tracker: tracker, + drainCache: drain.NewCache(), + } + + res, err := r.drainNode(ctx, testCluster, testMachine, tt.nodeName) + g.Expect(res).To(BeComparableTo(tt.wantResult)) + if tt.wantErr == "" { + g.Expect(err).ToNot(HaveOccurred()) + } else { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(BeComparableTo(tt.wantErr)) + } + + gotCondition := conditions.Get(testMachine, clusterv1.DrainingSucceededCondition) + if tt.wantCondition == nil { + g.Expect(gotCondition).To(BeNil()) + } else { + g.Expect(gotCondition).ToNot(BeNil()) + // Cleanup for easier comparison + gotCondition.LastTransitionTime = metav1.Time{} + g.Expect(gotCondition).To(BeComparableTo(tt.wantCondition)) + } + + // If there is a Node it should be cordoned. + if tt.node != nil { + gotNode := &corev1.Node{} + g.Expect(remoteClient.Get(ctx, client.ObjectKeyFromObject(tt.node), gotNode)).To(Succeed()) + g.Expect(gotNode.Spec.Unschedulable).To(BeTrue()) + } + }) + } +} + +func TestDrainNode_withCaching(t *testing.T) { + testCluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test-cluster", + }, + } + testMachine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test-machine", + }, + } + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + }, + Spec: corev1.NodeSpec{ + Unschedulable: true, + }, + } + + pods := []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod-delete-running-deployment-pod", + Finalizers: []string{ + // Add a finalizer so the Pod doesn't go away after eviction. + "cluster.x-k8s.io/block", + }, + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Deployment", + Controller: ptr.To(true), + }, + }, + }, + Spec: corev1.PodSpec{ + NodeName: "node-1", + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + }, + } + + g := NewWithT(t) + + var objs []client.Object + objs = append(objs, testCluster, testMachine) + c := fake.NewClientBuilder(). + WithObjects(objs...). + Build() + + remoteObjs := []client.Object{node} + for _, p := range pods { + remoteObjs = append(remoteObjs, p) + } + remoteClient := fake.NewClientBuilder(). + WithIndex(&corev1.Pod{}, "spec.nodeName", podByNodeName). + WithObjects(remoteObjs...). + Build() + + tracker := remote.NewTestClusterCacheTracker(ctrl.Log, c, remoteClient, fakeScheme, client.ObjectKeyFromObject(testCluster)) + drainCache := drain.NewCache() + r := &Reconciler{ + Client: c, + Tracker: tracker, + drainCache: drainCache, + } + + // The first reconcile will cordon the Node, evict the one Pod running on the Node and then requeue. + res, err := r.drainNode(ctx, testCluster, testMachine, "node-1") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res).To(BeComparableTo(ctrl.Result{RequeueAfter: drainRetryInterval})) + // Condition should report the one Pod that has been evicted. + gotCondition := conditions.Get(testMachine, clusterv1.DrainingSucceededCondition) + g.Expect(gotCondition).ToNot(BeNil()) + // Cleanup for easier comparison + gotCondition.LastTransitionTime = metav1.Time{} + g.Expect(gotCondition).To(BeComparableTo(&clusterv1.Condition{ + Type: clusterv1.DrainingSucceededCondition, + Status: corev1.ConditionFalse, + Severity: clusterv1.ConditionSeverityInfo, + Reason: clusterv1.DrainingReason, + Message: `Drain not completed yet: +* Pods with deletionTimestamp that still exist: pod-delete-running-deployment-pod`, + })) + // Node should be cordoned. + gotNode := &corev1.Node{} + g.Expect(remoteClient.Get(ctx, client.ObjectKeyFromObject(node), gotNode)).To(Succeed()) + g.Expect(gotNode.Spec.Unschedulable).To(BeTrue()) + + // Drain cache should have an entry for the Machine + gotEntry1, ok := drainCache.Has(client.ObjectKeyFromObject(testMachine)) + g.Expect(ok).To(BeTrue()) + + // The second reconcile will just requeue with a duration < drainRetryInterval because there already was + // one drain within the drainRetryInterval. + res, err = r.drainNode(ctx, testCluster, testMachine, "node-1") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.RequeueAfter).To(BeNumerically(">", time.Duration(0))) + g.Expect(res.RequeueAfter).To(BeNumerically("<", drainRetryInterval)) + + // LastDrain in the drain cache entry should not have changed + gotEntry2, ok := drainCache.Has(client.ObjectKeyFromObject(testMachine)) + g.Expect(ok).To(BeTrue()) + g.Expect(gotEntry1).To(BeComparableTo(gotEntry2)) +} + +func TestShouldRequeueDrain(t *testing.T) { + now := time.Now() + + tests := []struct { + name string + now time.Time + lastDrain time.Time + wantRequeue bool + wantRequeueAfter time.Duration + }{ + { + name: "Requeue after 15s last drain was 5s ago (drainRetryInterval: 20s)", + now: now, + lastDrain: now.Add(-time.Duration(5) * time.Second), + wantRequeue: true, + wantRequeueAfter: time.Duration(15) * time.Second, + }, + { + name: "Don't requeue last drain was 20s ago (drainRetryInterval: 20s)", + now: now, + lastDrain: now.Add(-time.Duration(20) * time.Second), + wantRequeue: false, + wantRequeueAfter: time.Duration(0), + }, + { + name: "Don't requeue last drain was 60s ago (drainRetryInterval: 20s)", + now: now, + lastDrain: now.Add(-time.Duration(60) * time.Second), + wantRequeue: false, + wantRequeueAfter: time.Duration(0), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + gotRequeueAfter, gotRequeue := shouldRequeueDrain(tt.now, tt.lastDrain) + g.Expect(gotRequeue).To(Equal(tt.wantRequeue)) + g.Expect(gotRequeueAfter).To(Equal(tt.wantRequeueAfter)) + }) + } +} + func TestIsNodeVolumeDetachingAllowed(t *testing.T) { testCluster := &clusterv1.Cluster{ TypeMeta: metav1.TypeMeta{Kind: "Cluster", APIVersion: clusterv1.GroupVersion.String()}, @@ -2683,3 +3069,16 @@ func assertCondition(t *testing.T, from conditions.Getter, condition *clusterv1. } } } + +func podByNodeName(o client.Object) []string { + pod, ok := o.(*corev1.Pod) + if !ok { + panic(fmt.Sprintf("Expected a Pod but got a %T", o)) + } + + if pod.Spec.NodeName == "" { + return nil + } + + return []string{pod.Spec.NodeName} +} diff --git a/main.go b/main.go index 36ed3aea29ef..cc0c7a2bb428 100644 --- a/main.go +++ b/main.go @@ -19,7 +19,6 @@ package main import ( "context" - "errors" "flag" "fmt" "os" @@ -27,6 +26,7 @@ import ( "time" "github.com/spf13/pflag" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -116,7 +116,6 @@ var ( machinePoolConcurrency int clusterResourceSetConcurrency int machineHealthCheckConcurrency int - nodeDrainClientTimeout time.Duration useDeprecatedInfraMachineNaming bool ) @@ -220,9 +219,6 @@ func InitFlags(fs *pflag.FlagSet) { fs.IntVar(&clusterCacheTrackerClientBurst, "clustercachetracker-client-burst", 30, "Maximum number of queries that should be allowed in one burst from the cluster cache tracker clients to the Kubernetes API server of workload clusters.") - fs.DurationVar(&nodeDrainClientTimeout, "node-drain-client-timeout-duration", time.Second*10, - "The timeout of the client used for draining nodes. Defaults to 10s") - fs.IntVar(&webhookPort, "webhook-port", 9443, "Webhook Server port") @@ -275,11 +271,6 @@ func main() { restConfig.Burst = restConfigBurst restConfig.UserAgent = remote.DefaultClusterAPIUserAgent(controllerName) - if nodeDrainClientTimeout <= 0 { - setupLog.Error(errors.New("node drain client timeout must be greater than zero"), "unable to start manager") - os.Exit(1) - } - minVer := version.MinimumKubernetesVersion if feature.Gates.Enabled(feature.ClusterTopology) { minVer = version.MinimumKubernetesVersionClusterTopology @@ -414,9 +405,17 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager, watchNamespaces map SecretCachingClient: secretCachingClient, ControllerName: controllerName, Log: &ctrl.Log, - Indexes: []remote.Index{remote.NodeProviderIDIndex}, - ClientQPS: clusterCacheTrackerClientQPS, - ClientBurst: clusterCacheTrackerClientBurst, + ClientUncachedObjects: []client.Object{ + // Don't cache ConfigMaps & Secrets. + &corev1.ConfigMap{}, + &corev1.Secret{}, + // Don't cache Pods & DaemonSets (we get/list them e.g. during drain). + &corev1.Pod{}, + &appsv1.DaemonSet{}, + }, + Indexes: []remote.Index{remote.NodeProviderIDIndex}, + ClientQPS: clusterCacheTrackerClientQPS, + ClientBurst: clusterCacheTrackerClientBurst, }, ) if err != nil { @@ -534,11 +533,10 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager, watchNamespaces map os.Exit(1) } if err := (&controllers.MachineReconciler{ - Client: mgr.GetClient(), - APIReader: mgr.GetAPIReader(), - Tracker: tracker, - WatchFilterValue: watchFilterValue, - NodeDrainClientTimeout: nodeDrainClientTimeout, + Client: mgr.GetClient(), + APIReader: mgr.GetAPIReader(), + Tracker: tracker, + WatchFilterValue: watchFilterValue, }).SetupWithManager(ctx, mgr, concurrency(machineConcurrency)); err != nil { setupLog.Error(err, "Unable to create controller", "controller", "Machine") os.Exit(1) diff --git a/test/go.mod b/test/go.mod index dce60eaa2c5e..5ba40c659f1d 100644 --- a/test/go.mod +++ b/test/go.mod @@ -39,7 +39,6 @@ require ( ) require ( - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/BurntSushi/toml v1.4.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect @@ -55,7 +54,6 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/chai2010/gettext-go v1.0.2 // indirect github.com/cloudflare/circl v1.3.7 // indirect github.com/containerd/log v0.1.0 // indirect github.com/coreos/go-semver v0.3.1 // indirect @@ -65,11 +63,9 @@ require ( github.com/distribution/reference v0.6.0 // indirect github.com/docker/go-units v0.4.0 // indirect github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 // indirect - github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect - github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -80,7 +76,6 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.0.1 // indirect github.com/google/cel-go v0.20.1 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-github/v53 v53.2.0 // indirect @@ -88,10 +83,8 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect - github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect @@ -100,20 +93,16 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/spdystream v0.4.0 // indirect - github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect @@ -121,10 +110,8 @@ require ( github.com/opencontainers/image-spec v1.0.2 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect - github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/shopspring/decimal v1.3.1 // indirect @@ -137,7 +124,6 @@ require ( github.com/subosito/gotenv v1.6.0 // indirect github.com/valyala/fastjson v1.6.4 // indirect github.com/x448/float16 v0.8.4 // indirect - github.com/xlab/treeprint v1.2.0 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.15 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect @@ -149,7 +135,6 @@ require ( go.opentelemetry.io/otel/sdk v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.26.0 // indirect @@ -171,13 +156,9 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.4.0 // indirect - k8s.io/cli-runtime v0.31.0 // indirect k8s.io/cluster-bootstrap v0.31.0 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect - k8s.io/kubectl v0.31.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.17.3 // indirect - sigs.k8s.io/kustomize/kyaml v0.17.2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/test/go.sum b/test/go.sum index b8380b319af9..9729ca7be36b 100644 --- a/test/go.sum +++ b/test/go.sum @@ -1,7 +1,5 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= @@ -37,15 +35,8 @@ github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2y github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= -github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= @@ -65,8 +56,6 @@ github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -85,14 +74,10 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flatcar/ignition v0.36.2 h1:xGHgScUe0P4Fkprjqv7L2CE58emiQgP833OCCn9z2v4= @@ -103,8 +88,6 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= -github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -130,18 +113,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= @@ -151,11 +124,6 @@ github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -172,16 +140,12 @@ github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQu github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= @@ -216,8 +180,6 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= -github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -227,8 +189,6 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= -github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -245,8 +205,6 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= -github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -266,8 +224,6 @@ github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3v github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= -github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pin/tftp v2.1.0+incompatible/go.mod h1:xVpZOMCXTy+A5QMjEVN0Glwa1sUvaJhFXbr/aAxuxGY= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -276,7 +232,6 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= @@ -285,14 +240,11 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -323,7 +275,6 @@ github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -349,8 +300,6 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= -github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= @@ -390,8 +339,6 @@ go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= -go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -406,18 +353,11 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -429,18 +369,14 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -449,10 +385,8 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -460,7 +394,6 @@ golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= @@ -473,11 +406,7 @@ golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -491,30 +420,14 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -539,8 +452,6 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= @@ -549,8 +460,6 @@ k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/apiserver v0.31.0 h1:p+2dgJjy+bk+B1Csz+mc2wl5gHwvNkC9QJV+w55LVrY= k8s.io/apiserver v0.31.0/go.mod h1:KI9ox5Yu902iBnnyMmy7ajonhKnkeZYJhTZ/YI+WEMk= -k8s.io/cli-runtime v0.31.0 h1:V2Q1gj1u3/WfhD475HBQrIYsoryg/LrhhK4RwpN+DhA= -k8s.io/cli-runtime v0.31.0/go.mod h1:vg3H94wsubuvWfSmStDbekvbla5vFGC+zLWqcf+bGDw= k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= k8s.io/cluster-bootstrap v0.31.0 h1:jj5t1PArBPddvDypdNpzqnZQ/+qnGxpJuTF7SX05h1Y= @@ -561,8 +470,6 @@ k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/kubectl v0.31.0 h1:kANwAAPVY02r4U4jARP/C+Q1sssCcN/1p9Nk+7BQKVg= -k8s.io/kubectl v0.31.0/go.mod h1:pB47hhFypGsaHAPjlwrNbvhXgmuAr01ZBvAIIUaI8d4= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY= @@ -573,10 +480,6 @@ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMm sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kind v0.24.0 h1:g4y4eu0qa+SCeKESLpESgMmVFBebL0BDa6f777OIWrg= sigs.k8s.io/kind v0.24.0/go.mod h1:t7ueEpzPYJvHA8aeLtI52rtFftNgUYUaCwvxjk7phfw= -sigs.k8s.io/kustomize/api v0.17.3 h1:6GCuHSsxq7fN5yhF2XrC+AAr8gxQwhexgHflOAD/JJU= -sigs.k8s.io/kustomize/api v0.17.3/go.mod h1:TuDH4mdx7jTfK61SQ/j1QZM/QWR+5rmEiNjvYlhzFhc= -sigs.k8s.io/kustomize/kyaml v0.17.2 h1:+AzvoJUY0kq4QAhH/ydPHHMRLijtUKiyVyh7fOSshr0= -sigs.k8s.io/kustomize/kyaml v0.17.2/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=